title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
[POC] implement test_arithmetic.py
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index a323e2487e356..71b2774a92612 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -13,7 +13,7 @@ import pandas.util.testing as tm import pandas as pd -from pandas._libs.tslibs import Timestamp, Timedelta +from pandas._libs.tslibs import Timestamp from pandas.tests.indexes.common import Base @@ -26,42 +26,6 @@ def full_like(array, value): return ret -class TestIndexArithmeticWithTimedeltaScalar(object): - - @pytest.mark.parametrize('index', [ - Int64Index(range(1, 11)), - UInt64Index(range(1, 11)), - Float64Index(range(1, 11)), - RangeIndex(1, 11)]) - @pytest.mark.parametrize('scalar_td', [Timedelta(days=1), - Timedelta(days=1).to_timedelta64(), - Timedelta(days=1).to_pytimedelta()]) - def test_index_mul_timedelta(self, scalar_td, index): - # GH#19333 - expected = pd.timedelta_range('1 days', '10 days') - - result = index * scalar_td - tm.assert_index_equal(result, expected) - commute = scalar_td * index - tm.assert_index_equal(commute, expected) - - @pytest.mark.parametrize('index', [Int64Index(range(1, 3)), - UInt64Index(range(1, 3)), - Float64Index(range(1, 3)), - RangeIndex(1, 3)]) - @pytest.mark.parametrize('scalar_td', [Timedelta(days=1), - Timedelta(days=1).to_timedelta64(), - Timedelta(days=1).to_pytimedelta()]) - def test_index_rdiv_timedelta(self, scalar_td, index): - expected = pd.TimedeltaIndex(['1 Day', '12 Hours']) - - result = scalar_td / index - tm.assert_index_equal(result, expected) - - with pytest.raises(TypeError): - index / scalar_td - - class Numeric(Base): def test_can_hold_identifiers(self): diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index bf2308cd8c097..2571498ca802c 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -891,22 +891,3 @@ def test_td64series_mul_timedeltalike_invalid(self, scalar_td): td1 * scalar_td with tm.assert_raises_regex(TypeError, pattern): scalar_td * td1 - - -class TestTimedeltaSeriesInvalidArithmeticOps(object): - @pytest.mark.parametrize('scalar_td', [ - timedelta(minutes=5, seconds=4), - Timedelta('5m4s'), - Timedelta('5m4s').to_timedelta64()]) - def test_td64series_pow_invalid(self, scalar_td): - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - - # check that we are getting a TypeError - # with 'operate' (from core/ops.py) for the ops that are not - # defined - pattern = 'operate|unsupported|cannot|not supported' - with tm.assert_raises_regex(TypeError, pattern): - scalar_td ** td1 - with tm.assert_raises_regex(TypeError, pattern): - td1 ** scalar_td diff --git a/pandas/tests/test_arithmetic.py b/pandas/tests/test_arithmetic.py new file mode 100644 index 0000000000000..f15b629f15ae3 --- /dev/null +++ b/pandas/tests/test_arithmetic.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +# Arithmetc tests for DataFrame/Series/Index/Array classes that should +# behave identically. +from datetime import timedelta + +import pytest +import numpy as np + +import pandas as pd +import pandas.util.testing as tm + +from pandas import Timedelta + + +# ------------------------------------------------------------------ +# Numeric dtypes Arithmetic with Timedelta Scalar + +class TestNumericArraylikeArithmeticWithTimedeltaScalar(object): + + @pytest.mark.parametrize('box', [ + pd.Index, + pd.Series, + pytest.param(pd.DataFrame, + marks=pytest.mark.xfail(reason="block.eval incorrect", + strict=True)) + ]) + @pytest.mark.parametrize('index', [ + pd.Int64Index(range(1, 11)), + pd.UInt64Index(range(1, 11)), + pd.Float64Index(range(1, 11)), + pd.RangeIndex(1, 11)], + ids=lambda x: type(x).__name__) + @pytest.mark.parametrize('scalar_td', [ + Timedelta(days=1), + Timedelta(days=1).to_timedelta64(), + Timedelta(days=1).to_pytimedelta()], + ids=lambda x: type(x).__name__) + def test_index_mul_timedelta(self, scalar_td, index, box): + # GH#19333 + + if (box is pd.Series and + type(scalar_td) is timedelta and index.dtype == 'f8'): + raise pytest.xfail(reason="Cannot multiply timedelta by float") + + expected = pd.timedelta_range('1 days', '10 days') + + index = tm.box_expected(index, box) + expected = tm.box_expected(expected, box) + + result = index * scalar_td + tm.assert_equal(result, expected) + + commute = scalar_td * index + tm.assert_equal(commute, expected) + + @pytest.mark.parametrize('box', [pd.Index, pd.Series, pd.DataFrame]) + @pytest.mark.parametrize('index', [ + pd.Int64Index(range(1, 3)), + pd.UInt64Index(range(1, 3)), + pd.Float64Index(range(1, 3)), + pd.RangeIndex(1, 3)], + ids=lambda x: type(x).__name__) + @pytest.mark.parametrize('scalar_td', [ + Timedelta(days=1), + Timedelta(days=1).to_timedelta64(), + Timedelta(days=1).to_pytimedelta()], + ids=lambda x: type(x).__name__) + def test_index_rdiv_timedelta(self, scalar_td, index, box): + + if box is pd.Series and type(scalar_td) is timedelta: + raise pytest.xfail(reason="TODO: Figure out why this case fails") + if box is pd.DataFrame and isinstance(scalar_td, timedelta): + raise pytest.xfail(reason="TODO: Figure out why this case fails") + + expected = pd.TimedeltaIndex(['1 Day', '12 Hours']) + + index = tm.box_expected(index, box) + expected = tm.box_expected(expected, box) + + result = scalar_td / index + tm.assert_equal(result, expected) + + with pytest.raises(TypeError): + index / scalar_td + + +# ------------------------------------------------------------------ +# Timedelta64[ns] dtype Arithmetic Operations + + +class TestTimedeltaArraylikeInvalidArithmeticOps(object): + + @pytest.mark.parametrize('box', [ + pd.Index, + pd.Series, + pytest.param(pd.DataFrame, + marks=pytest.mark.xfail(reason="raises ValueError " + "instead of TypeError", + strict=True)) + ]) + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + Timedelta('5m4s'), + Timedelta('5m4s').to_timedelta64()]) + def test_td64series_pow_invalid(self, scalar_td, box): + td1 = pd.Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + td1 = tm.box_expected(td1, box) + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + pattern = 'operate|unsupported|cannot|not supported' + with tm.assert_raises_regex(TypeError, pattern): + scalar_td ** td1 + + with tm.assert_raises_regex(TypeError, pattern): + td1 ** scalar_td diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 9697c991122dd..6dffbcb0b4f01 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1478,6 +1478,50 @@ def assert_panel_equal(left, right, assert item in left, msg +def assert_equal(left, right, **kwargs): + """ + Wrapper for tm.assert_*_equal to dispatch to the appropriate test function. + + Parameters + ---------- + left : Index, Series, or DataFrame + right : Index, Series, or DataFrame + **kwargs + """ + if isinstance(left, pd.Index): + assert_index_equal(left, right, **kwargs) + elif isinstance(left, pd.Series): + assert_series_equal(left, right, **kwargs) + elif isinstance(left, pd.DataFrame): + assert_frame_equal(left, right, **kwargs) + else: + raise NotImplementedError(type(left)) + + +def box_expected(expected, box_cls): + """ + Helper function to wrap the expected output of a test in a given box_class. + + Parameters + ---------- + expected : np.ndarray, Index, Series + box_cls : {Index, Series, DataFrame} + + Returns + ------- + subclass of box_cls + """ + if box_cls is pd.Index: + expected = pd.Index(expected) + elif box_cls is pd.Series: + expected = pd.Series(expected) + elif box_cls is pd.DataFrame: + expected = pd.Series(expected).to_frame() + else: + raise NotImplementedError(box_cls) + return expected + + # ----------------------------------------------------------------------------- # Sparse
There are a ton of scattered arithmetic tests for Index/Series/DataFrame that _should_ be testing the same things, but in fact are haphazard. Fixing this given the current structure would entail an enormous about of code duplication. This PR if a proof of concept for gathering all those tests in one test_arithmetic.py file, parametrizing them, and ensuring that the relevant behavior is identical across arraylike classes. As Datetime/Timedelta/Period EA come online, the case for de-duplication will be even stronger. In this form it is really easy to track (via xfails) what behavior needs fixing.
https://api.github.com/repos/pandas-dev/pandas/pulls/22033
2018-07-23T21:14:22Z
2018-07-29T16:04:58Z
2018-07-29T16:04:58Z
2018-07-29T17:12:00Z
fix Error Indexing DafaFrame with a 0-d array
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 43e380abd8bb5..3193d30b31314 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -501,6 +501,7 @@ Indexing - Fixed ``DataFrame[np.nan]`` when columns are non-unique (:issue:`21428`) - Bug when indexing :class:`DatetimeIndex` with nanosecond resolution dates and timezones (:issue:`11679`) - Bug where indexing with a Numpy array containing negative values would mutate the indexer (:issue:`21867`) +- Bug where indexing with a 0-dimensional array would error with an unhelpful stack trace (:issue: `21946`) Missing ^^^^^^^ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index cf4b4fe6bc084..b7bef329714e4 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4178,6 +4178,8 @@ def _validate_indexer(self, form, key, kind): pass elif is_integer(key): pass + elif np.array(key).ndim == 0: + self._invalid_indexer(form, key) elif kind in ['iloc', 'getitem']: self._invalid_indexer(form, key) return key diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e0b6048b2ad64..1cfac6a22f902 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2692,8 +2692,9 @@ def is_nested_tuple(tup, labels): def is_list_like_indexer(key): # allow a list_like, but exclude NamedTuples which can be indexers - return is_list_like(key) and not (isinstance(key, tuple) and - type(key) is not tuple) + return (is_list_like(key) + and not (isinstance(key, tuple) and type(key) is not tuple) + and not np.array(key).ndim == 0) def is_label_like(key): diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 9c992770fc64c..9ddf22c0bde5f 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -890,6 +890,13 @@ def test_no_reference_cycle(self): del df assert wr() is None + def test_zero_index_iloc_raises(self): + df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) + ar = np.array(0) + msg = 'Cannot index by location index with a non-integer key' + with assert_raises_regex(TypeError, msg): + df.iloc[ar] + class TestSeriesNoneCoercion(object): EXPECTED_RESULTS = [
closes #21946 - [x ] closes #xxxx - [x ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/22032
2018-07-23T21:00:11Z
2018-07-24T16:38:07Z
null
2018-07-24T16:38:07Z
REF/API: Stricter extension checking.
diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 905073645fcb3..4a0bf67f47bae 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -9,7 +9,8 @@ from pandas.core.dtypes.dtypes import ( registry, CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, DatetimeTZDtypeType, PeriodDtype, PeriodDtypeType, IntervalDtype, - IntervalDtypeType, ExtensionDtype) + IntervalDtypeType, PandasExtensionDtype, ExtensionDtype, + _pandas_registry) from pandas.core.dtypes.generic import ( ABCCategorical, ABCPeriodIndex, ABCDatetimeIndex, ABCSeries, ABCSparseArray, ABCSparseSeries, ABCCategoricalIndex, ABCIndexClass, @@ -1709,17 +1710,9 @@ def is_extension_array_dtype(arr_or_dtype): Third-party libraries may implement arrays or types satisfying this interface as well. """ - from pandas.core.arrays import ExtensionArray - - if isinstance(arr_or_dtype, (ABCIndexClass, ABCSeries)): - arr_or_dtype = arr_or_dtype._values - - try: - arr_or_dtype = pandas_dtype(arr_or_dtype) - except TypeError: - pass - - return isinstance(arr_or_dtype, (ExtensionDtype, ExtensionArray)) + dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype) + return (isinstance(dtype, ExtensionDtype) or + registry.find(dtype) is not None) def is_complex_dtype(arr_or_dtype): @@ -1999,12 +1992,12 @@ def pandas_dtype(dtype): return dtype # registered extension types - result = registry.find(dtype) + result = _pandas_registry.find(dtype) or registry.find(dtype) if result is not None: return result # un-registered extension types - elif isinstance(dtype, ExtensionDtype): + elif isinstance(dtype, (PandasExtensionDtype, ExtensionDtype)): return dtype # try a numpy dtype diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index cf771a127a696..f53ccc86fc4ff 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -22,9 +22,9 @@ class Registry(object): -------- registry.register(MyExtensionDtype) """ - dtypes = [] + def __init__(self): + self.dtypes = [] - @classmethod def register(self, dtype): """ Parameters @@ -50,7 +50,7 @@ def find(self, dtype): dtype_type = dtype if not isinstance(dtype, type): dtype_type = type(dtype) - if issubclass(dtype_type, (PandasExtensionDtype, ExtensionDtype)): + if issubclass(dtype_type, ExtensionDtype): return dtype return None @@ -65,6 +65,9 @@ def find(self, dtype): registry = Registry() +# TODO(Extension): remove the second registry once all internal extension +# dtypes are real extension dtypes. +_pandas_registry = Registry() class PandasExtensionDtype(_DtypeOpsMixin): @@ -822,7 +825,7 @@ def is_dtype(cls, dtype): # register the dtypes in search order -registry.register(DatetimeTZDtype) -registry.register(PeriodDtype) registry.register(IntervalDtype) registry.register(CategoricalDtype) +_pandas_registry.register(DatetimeTZDtype) +_pandas_registry.register(PeriodDtype) diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 02ac7fc7d5ed7..55c841ba1fc46 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -9,7 +9,7 @@ from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, PeriodDtype, - IntervalDtype, CategoricalDtype, registry) + IntervalDtype, CategoricalDtype, registry, _pandas_registry) from pandas.core.dtypes.common import ( is_categorical_dtype, is_categorical, is_datetime64tz_dtype, is_datetimetz, @@ -775,21 +775,31 @@ def test_update_dtype_errors(self, bad_dtype): @pytest.mark.parametrize( 'dtype', - [DatetimeTZDtype, CategoricalDtype, - PeriodDtype, IntervalDtype]) + [CategoricalDtype, IntervalDtype]) def test_registry(dtype): assert dtype in registry.dtypes +@pytest.mark.parametrize('dtype', [DatetimeTZDtype, PeriodDtype]) +def test_pandas_registry(dtype): + assert dtype not in registry.dtypes + assert dtype in _pandas_registry.dtypes + + @pytest.mark.parametrize( 'dtype, expected', [('int64', None), ('interval', IntervalDtype()), ('interval[int64]', IntervalDtype()), ('interval[datetime64[ns]]', IntervalDtype('datetime64[ns]')), - ('category', CategoricalDtype()), - ('period[D]', PeriodDtype('D')), - ('datetime64[ns, US/Eastern]', DatetimeTZDtype('ns', 'US/Eastern'))]) + ('category', CategoricalDtype())]) def test_registry_find(dtype, expected): - assert registry.find(dtype) == expected + + +@pytest.mark.parametrize( + 'dtype, expected', + [('period[D]', PeriodDtype('D')), + ('datetime64[ns, US/Eastern]', DatetimeTZDtype('ns', 'US/Eastern'))]) +def test_pandas_registry_find(dtype, expected): + assert _pandas_registry.find(dtype) == expected
Removes is_extension_array_dtype's handling of both arrays and dtypes. Now it handles just arrays, and we provide `is_extension_dtype` for checking whether a dtype is an extension dtype. It's the caller's responsibility to know whether they have an array or dtype. Closes #22021
https://api.github.com/repos/pandas-dev/pandas/pulls/22031
2018-07-23T19:41:56Z
2018-07-31T13:22:48Z
2018-07-31T13:22:47Z
2018-07-31T13:22:52Z
[BLD] [CLN] Close assorted issues - bare exceptions, unused func
diff --git a/pandas/_libs/skiplist.pxd b/pandas/_libs/skiplist.pxd index 82a0862112199..78f206962bcfc 100644 --- a/pandas/_libs/skiplist.pxd +++ b/pandas/_libs/skiplist.pxd @@ -3,8 +3,6 @@ from cython cimport Py_ssize_t -from numpy cimport double_t - cdef extern from "src/skiplist.h": ctypedef struct node_t: @@ -33,7 +31,7 @@ cdef extern from "src/skiplist.h": # Node itself not intended to be exposed. cdef class Node: cdef public: - double_t value + double value list next list width diff --git a/pandas/_libs/skiplist.pyx b/pandas/_libs/skiplist.pyx index 5ede31b24118d..23836ef7f4de9 100644 --- a/pandas/_libs/skiplist.pyx +++ b/pandas/_libs/skiplist.pyx @@ -9,9 +9,6 @@ from libc.math cimport log import numpy as np -cimport numpy as cnp -from numpy cimport double_t -cnp.import_array() # MSVC does not have log2! @@ -26,11 +23,11 @@ from random import random cdef class Node: # cdef public: - # double_t value + # double value # list next # list width - def __init__(self, double_t value, list next, list width): + def __init__(self, double value, list next, list width): self.value = value self.next = next self.width = width diff --git a/pandas/_libs/src/compat_helper.h b/pandas/_libs/src/compat_helper.h index bdff61d7d4150..116cd91070a60 100644 --- a/pandas/_libs/src/compat_helper.h +++ b/pandas/_libs/src/compat_helper.h @@ -11,7 +11,7 @@ The full license is in the LICENSE file, distributed with this software. #define PANDAS__LIBS_SRC_COMPAT_HELPER_H_ #include "Python.h" -#include "numpy_helper.h" +#include "helper.h" /* PySlice_GetIndicesEx changes signature in PY3 diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index 63ab120833ba1..4dc4fcb00d84d 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -131,7 +131,7 @@ def _validate_timedelta_unit(arg): """ provide validation / translation for timedelta short units """ try: return _unit_map[arg] - except: + except (KeyError, TypeError): if arg is None: return 'ns' raise ValueError("invalid timedelta unit {arg} provided" diff --git a/pandas/io/s3.py b/pandas/io/s3.py index bd2286c5c8569..7d1360934fd53 100644 --- a/pandas/io/s3.py +++ b/pandas/io/s3.py @@ -3,7 +3,7 @@ try: import s3fs from botocore.exceptions import NoCredentialsError -except: +except ImportError: raise ImportError("The s3fs library is required to handle s3 files") if compat.PY3: diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 9567c08781856..136299a4b81be 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -27,7 +27,7 @@ import scipy _is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >= LooseVersion('0.19.0')) -except: +except ImportError: _is_scipy_ge_0190 = False diff --git a/pandas/tests/io/generate_legacy_storage_files.py b/pandas/tests/io/generate_legacy_storage_files.py index eb40e5521f7f1..aa020ba4c0623 100755 --- a/pandas/tests/io/generate_legacy_storage_files.py +++ b/pandas/tests/io/generate_legacy_storage_files.py @@ -303,7 +303,7 @@ def write_legacy_pickles(output_dir): # make sure we are < 0.13 compat (in py3) try: from pandas.compat import zip, cPickle as pickle # noqa - except: + except ImportError: import pickle version = pandas.__version__ diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 45cbbd43cd6a8..c71e26ae56e8e 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -218,7 +218,7 @@ def c_unpickler(path): with open(path, 'rb') as fh: fh.seek(0) return c_pickle.load(fh) - except: + except ImportError: c_pickler = None c_unpickler = None diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index f8f742c5980ac..4b0edfce89174 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -468,7 +468,7 @@ def _transaction_test(self): with self.pandasSQL.run_transaction() as trans: trans.execute(ins_sql) raise Exception('error') - except: + except Exception: # ignore raised exception pass res = self.pandasSQL.read_query('SELECT * FROM test_trans') diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 2bc44cb1c683f..ab3fdd8cbf84f 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -27,7 +27,7 @@ import scipy _is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >= LooseVersion('0.19.0')) -except: +except ImportError: _is_scipy_ge_0190 = False diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 7d5753d03f4fc..82cd44113cb25 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -1,7 +1,6 @@ from pandas.compat import callable, signature, PY2 from pandas._libs.properties import cache_readonly # noqa import inspect -import types import warnings from textwrap import dedent, wrap from functools import wraps, update_wrapper, WRAPPER_ASSIGNMENTS @@ -339,48 +338,3 @@ def make_signature(func): if spec.keywords: args.append('**' + spec.keywords) return args, spec.args - - -class docstring_wrapper(object): - """ - Decorator to wrap a function and provide - a dynamically evaluated doc-string. - - Parameters - ---------- - func : callable - creator : callable - return the doc-string - default : str, optional - return this doc-string on error - """ - _attrs = ['__module__', '__name__', - '__qualname__', '__annotations__'] - - def __init__(self, func, creator, default=None): - self.func = func - self.creator = creator - self.default = default - update_wrapper( - self, func, [attr for attr in self._attrs - if hasattr(func, attr)]) - - def __get__(self, instance, cls=None): - - # we are called with a class - if instance is None: - return self - - # we want to return the actual passed instance - return types.MethodType(self, instance) - - def __call__(self, *args, **kwargs): - return self.func(*args, **kwargs) - - @property - def __doc__(self): - try: - return self.creator() - except Exception as exc: - msg = self.default or str(exc) - return msg diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py index 01198fc541e0c..5600834f3b615 100644 --- a/pandas/util/_print_versions.py +++ b/pandas/util/_print_versions.py @@ -114,7 +114,7 @@ def show_versions(as_json=False): if (as_json): try: import json - except: + except ImportError: import simplejson as json j = dict(system=dict(sys_info), dependencies=dict(deps_blob)) diff --git a/setup.py b/setup.py index d265733738425..f058c8a6e3c99 100755 --- a/setup.py +++ b/setup.py @@ -438,9 +438,12 @@ def get_tag(self): # enable coverage by building cython files by setting the environment variable -# "PANDAS_CYTHON_COVERAGE" (with a Truthy value) +# "PANDAS_CYTHON_COVERAGE" (with a Truthy value) or by running build_ext +# with `--with-cython-coverage`enabled linetrace = os.environ.get('PANDAS_CYTHON_COVERAGE', False) -CYTHON_TRACE = str(int(bool(linetrace))) +if '--with-cython-coverage' in sys.argv: + linetrace = True + sys.argv.remove('--with-cython-coverage') # Note: if not using `cythonize`, coverage can be enabled by # pinning `ext.cython_directives = directives` to each ext in extensions.
<s>- Removing numpy cimport from cython modules where feasible fixes the npy_deprecated1.7 ... warning.</s> <b>update</b> Nope! - Catch specific exceptions in a handful of places. - Removes unused function #19676 - Implement command-line option to enable cython coverage #21991 closes #19676 closes #21991 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/22030
2018-07-23T19:17:16Z
2018-07-26T12:54:08Z
2018-07-26T12:54:08Z
2018-07-26T16:22:22Z
Separate out internals.concat, internals.managers
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 68698f45d5623..55f2e06a1a976 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -19,6 +19,9 @@ cdef extern from "compat_helper.h": Py_ssize_t *slicelength) except -1 +from algos import ensure_int64 + + cdef class BlockPlacement: # __slots__ = '_as_slice', '_as_array', '_len' cdef slice _as_slice @@ -436,3 +439,26 @@ def get_blkno_indexers(int64_t[:] blknos, bint group=True): i += 1 yield blkno, result + + +def get_blkno_placements(blknos, blk_count, group=True): + """ + + Parameters + ---------- + blknos : array of int64 + blk_count : int + group : bool + + Returns + ------- + iterator + yield (BlockPlacement, blkno) + + """ + + blknos = ensure_int64(blknos) + + # FIXME: blk_count is unused, but it may avoid the use of dicts in cython + for blkno, indexer in get_blkno_indexers(blknos, group): + yield blkno, BlockPlacement(indexer) diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index a4cd301806569..22caa577c2891 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -1,2549 +1,14 @@ # -*- coding: utf-8 -*- -import copy -import itertools -import operator -from collections import defaultdict -from functools import partial - -import numpy as np - -from pandas._libs import internals as libinternals - -from pandas.core.base import PandasObject - -from pandas.core.dtypes.dtypes import ( - ExtensionDtype, - PandasExtensionDtype) -from pandas.core.dtypes.common import ( - _NS_DTYPE, - ensure_int64, - is_timedelta64_dtype, - is_datetime64_dtype, is_datetimetz, - is_categorical_dtype, - is_datetimelike_v_numeric, - is_float_dtype, is_numeric_dtype, - is_numeric_v_string_like, is_extension_type, - is_extension_array_dtype, - is_scalar, - _get_dtype) -from pandas.core.dtypes.cast import ( - maybe_promote, - infer_dtype_from_scalar, - find_common_type) -from pandas.core.dtypes.missing import isna -import pandas.core.dtypes.concat as _concat -from pandas.core.dtypes.generic import ABCSeries, ABCExtensionArray - -import pandas.core.algorithms as algos - -from pandas.core.index import Index, MultiIndex, ensure_index -from pandas.core.indexing import maybe_convert_indices -from pandas.io.formats.printing import pprint_thing - -from pandas.core.sparse.array import _maybe_to_sparse -from pandas._libs import lib, tslibs -from pandas._libs.internals import BlockPlacement - -from pandas.util._decorators import cache_readonly -from pandas.util._validators import validate_bool_kwarg -from pandas.compat import range, map, zip, u - -from .blocks import ( - Block, - _extend_blocks, _merge_blocks, _safe_reshape, - make_block, get_block_type) from .blocks import ( # noqa:F401 _block2d_to_blocknd, _factor_indexer, _block_shape, # io.pytables + _safe_reshape, # io.packers + make_block, # io.pytables, io.packers FloatBlock, IntBlock, ComplexBlock, BoolBlock, ObjectBlock, TimeDeltaBlock, DatetimeBlock, DatetimeTZBlock, - CategoricalBlock, ExtensionBlock, SparseBlock, ScalarBlock) - -# TODO: flexible with index=None and/or items=None - - -class BlockManager(PandasObject): - """ - Core internal data structure to implement DataFrame, Series, Panel, etc. - - Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a - lightweight blocked set of labeled data to be manipulated by the DataFrame - public API class - - Attributes - ---------- - shape - ndim - axes - values - items - - Methods - ------- - set_axis(axis, new_labels) - copy(deep=True) - - get_dtype_counts - get_ftype_counts - get_dtypes - get_ftypes - - apply(func, axes, block_filter_fn) - - get_bool_data - get_numeric_data - - get_slice(slice_like, axis) - get(label) - iget(loc) - get_scalar(label_tup) - - take(indexer, axis) - reindex_axis(new_labels, axis) - reindex_indexer(new_labels, indexer, axis) - - delete(label) - insert(loc, label, value) - set(label, value) - - Parameters - ---------- - - - Notes - ----- - This is *not* a public API class - """ - __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', - '_is_consolidated', '_blknos', '_blklocs'] - - def __init__(self, blocks, axes, do_integrity_check=True): - self.axes = [ensure_index(ax) for ax in axes] - self.blocks = tuple(blocks) - - for block in blocks: - if block.is_sparse: - if len(block.mgr_locs) != 1: - raise AssertionError("Sparse block refers to multiple " - "items") - else: - if self.ndim != block.ndim: - raise AssertionError( - 'Number of Block dimensions ({block}) must equal ' - 'number of axes ({self})'.format(block=block.ndim, - self=self.ndim)) - - if do_integrity_check: - self._verify_integrity() - - self._consolidate_check() - - self._rebuild_blknos_and_blklocs() - - def make_empty(self, axes=None): - """ return an empty BlockManager with the items axis of len 0 """ - if axes is None: - axes = [ensure_index([])] + [ensure_index(a) - for a in self.axes[1:]] - - # preserve dtype if possible - if self.ndim == 1: - blocks = np.array([], dtype=self.array_dtype) - else: - blocks = [] - return self.__class__(blocks, axes) - - def __nonzero__(self): - return True - - # Python3 compat - __bool__ = __nonzero__ - - @property - def shape(self): - return tuple(len(ax) for ax in self.axes) - - @property - def ndim(self): - return len(self.axes) - - def set_axis(self, axis, new_labels): - new_labels = ensure_index(new_labels) - old_len = len(self.axes[axis]) - new_len = len(new_labels) - - if new_len != old_len: - raise ValueError( - 'Length mismatch: Expected axis has {old} elements, new ' - 'values have {new} elements'.format(old=old_len, new=new_len)) - - self.axes[axis] = new_labels - - def rename_axis(self, mapper, axis, copy=True, level=None): - """ - Rename one of axes. - - Parameters - ---------- - mapper : unary callable - axis : int - copy : boolean, default True - level : int, default None - - """ - obj = self.copy(deep=copy) - obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) - return obj - - def add_prefix(self, prefix): - f = partial('{prefix}{}'.format, prefix=prefix) - return self.rename_axis(f, axis=0) - - def add_suffix(self, suffix): - f = partial('{}{suffix}'.format, suffix=suffix) - return self.rename_axis(f, axis=0) - - @property - def _is_single_block(self): - if self.ndim == 1: - return True - - if len(self.blocks) != 1: - return False - - blk = self.blocks[0] - return (blk.mgr_locs.is_slice_like and - blk.mgr_locs.as_slice == slice(0, len(self), 1)) - - def _rebuild_blknos_and_blklocs(self): - """ - Update mgr._blknos / mgr._blklocs. - """ - new_blknos = np.empty(self.shape[0], dtype=np.int64) - new_blklocs = np.empty(self.shape[0], dtype=np.int64) - new_blknos.fill(-1) - new_blklocs.fill(-1) - - for blkno, blk in enumerate(self.blocks): - rl = blk.mgr_locs - new_blknos[rl.indexer] = blkno - new_blklocs[rl.indexer] = np.arange(len(rl)) - - if (new_blknos == -1).any(): - raise AssertionError("Gaps in blk ref_locs") - - self._blknos = new_blknos - self._blklocs = new_blklocs - - # make items read only for now - def _get_items(self): - return self.axes[0] - - items = property(fget=_get_items) - - def _get_counts(self, f): - """ return a dict of the counts of the function in BlockManager """ - self._consolidate_inplace() - counts = dict() - for b in self.blocks: - v = f(b) - counts[v] = counts.get(v, 0) + b.shape[0] - return counts - - def get_dtype_counts(self): - return self._get_counts(lambda b: b.dtype.name) - - def get_ftype_counts(self): - return self._get_counts(lambda b: b.ftype) - - def get_dtypes(self): - dtypes = np.array([blk.dtype for blk in self.blocks]) - return algos.take_1d(dtypes, self._blknos, allow_fill=False) - - def get_ftypes(self): - ftypes = np.array([blk.ftype for blk in self.blocks]) - return algos.take_1d(ftypes, self._blknos, allow_fill=False) - - def __getstate__(self): - block_values = [b.values for b in self.blocks] - block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] - axes_array = [ax for ax in self.axes] - - extra_state = { - '0.14.1': { - 'axes': axes_array, - 'blocks': [dict(values=b.values, mgr_locs=b.mgr_locs.indexer) - for b in self.blocks] - } - } - - # First three elements of the state are to maintain forward - # compatibility with 0.13.1. - return axes_array, block_values, block_items, extra_state - - def __setstate__(self, state): - def unpickle_block(values, mgr_locs): - # numpy < 1.7 pickle compat - if values.dtype == 'M8[us]': - values = values.astype('M8[ns]') - return make_block(values, placement=mgr_locs) - - if (isinstance(state, tuple) and len(state) >= 4 and - '0.14.1' in state[3]): - state = state[3]['0.14.1'] - self.axes = [ensure_index(ax) for ax in state['axes']] - self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs']) - for b in state['blocks']) - else: - # discard anything after 3rd, support beta pickling format for a - # little while longer - ax_arrays, bvalues, bitems = state[:3] - - self.axes = [ensure_index(ax) for ax in ax_arrays] - - if len(bitems) == 1 and self.axes[0].equals(bitems[0]): - # This is a workaround for pre-0.14.1 pickles that didn't - # support unpickling multi-block frames/panels with non-unique - # columns/items, because given a manager with items ["a", "b", - # "a"] there's no way of knowing which block's "a" is where. - # - # Single-block case can be supported under the assumption that - # block items corresponded to manager items 1-to-1. - all_mgr_locs = [slice(0, len(bitems[0]))] - else: - all_mgr_locs = [self.axes[0].get_indexer(blk_items) - for blk_items in bitems] - - self.blocks = tuple( - unpickle_block(values, mgr_locs) - for values, mgr_locs in zip(bvalues, all_mgr_locs)) - - self._post_setstate() - - def _post_setstate(self): - self._is_consolidated = False - self._known_consolidated = False - self._rebuild_blknos_and_blklocs() - - def __len__(self): - return len(self.items) - - def __unicode__(self): - output = pprint_thing(self.__class__.__name__) - for i, ax in enumerate(self.axes): - if i == 0: - output += u('\nItems: {ax}'.format(ax=ax)) - else: - output += u('\nAxis {i}: {ax}'.format(i=i, ax=ax)) - - for block in self.blocks: - output += u('\n{block}'.format(block=pprint_thing(block))) - return output - - def _verify_integrity(self): - mgr_shape = self.shape - tot_items = sum(len(x.mgr_locs) for x in self.blocks) - for block in self.blocks: - if block._verify_integrity and block.shape[1:] != mgr_shape[1:]: - construction_error(tot_items, block.shape[1:], self.axes) - if len(self.items) != tot_items: - raise AssertionError('Number of manager items must equal union of ' - 'block items\n# manager items: {0}, # ' - 'tot_items: {1}'.format( - len(self.items), tot_items)) - - def apply(self, f, axes=None, filter=None, do_integrity_check=False, - consolidate=True, **kwargs): - """ - iterate over the blocks, collect and create a new block manager - - Parameters - ---------- - f : the callable or function name to operate on at the block level - axes : optional (if not supplied, use self.axes) - filter : list, if supplied, only call the block if the filter is in - the block - do_integrity_check : boolean, default False. Do the block manager - integrity check - consolidate: boolean, default True. Join together blocks having same - dtype - - Returns - ------- - Block Manager (new object) - - """ - - result_blocks = [] - - # filter kwarg is used in replace-* family of methods - if filter is not None: - filter_locs = set(self.items.get_indexer_for(filter)) - if len(filter_locs) == len(self.items): - # All items are included, as if there were no filtering - filter = None - else: - kwargs['filter'] = filter_locs - - if consolidate: - self._consolidate_inplace() - - if f == 'where': - align_copy = True - if kwargs.get('align', True): - align_keys = ['other', 'cond'] - else: - align_keys = ['cond'] - elif f == 'putmask': - align_copy = False - if kwargs.get('align', True): - align_keys = ['new', 'mask'] - else: - align_keys = ['mask'] - elif f == 'eval': - align_copy = False - align_keys = ['other'] - elif f == 'fillna': - # fillna internally does putmask, maybe it's better to do this - # at mgr, not block level? - align_copy = False - align_keys = ['value'] - else: - align_keys = [] - - # TODO(EA): may interfere with ExtensionBlock.setitem for blocks - # with a .values attribute. - aligned_args = dict((k, kwargs[k]) - for k in align_keys - if hasattr(kwargs[k], 'values') and - not isinstance(kwargs[k], ABCExtensionArray)) - - for b in self.blocks: - if filter is not None: - if not b.mgr_locs.isin(filter_locs).any(): - result_blocks.append(b) - continue - - if aligned_args: - b_items = self.items[b.mgr_locs.indexer] - - for k, obj in aligned_args.items(): - axis = getattr(obj, '_info_axis_number', 0) - kwargs[k] = obj.reindex(b_items, axis=axis, - copy=align_copy) - - kwargs['mgr'] = self - applied = getattr(b, f)(**kwargs) - result_blocks = _extend_blocks(applied, result_blocks) - - if len(result_blocks) == 0: - return self.make_empty(axes or self.axes) - bm = self.__class__(result_blocks, axes or self.axes, - do_integrity_check=do_integrity_check) - bm._consolidate_inplace() - return bm - - def reduction(self, f, axis=0, consolidate=True, transposed=False, - **kwargs): - """ - iterate over the blocks, collect and create a new block manager. - This routine is intended for reduction type operations and - will do inference on the generated blocks. - - Parameters - ---------- - f: the callable or function name to operate on at the block level - axis: reduction axis, default 0 - consolidate: boolean, default True. Join together blocks having same - dtype - transposed: boolean, default False - we are holding transposed data - - Returns - ------- - Block Manager (new object) - - """ - - if consolidate: - self._consolidate_inplace() - - axes, blocks = [], [] - for b in self.blocks: - kwargs['mgr'] = self - axe, block = getattr(b, f)(axis=axis, **kwargs) - - axes.append(axe) - blocks.append(block) - - # note that some DatetimeTZ, Categorical are always ndim==1 - ndim = {b.ndim for b in blocks} - - if 2 in ndim: - - new_axes = list(self.axes) - - # multiple blocks that are reduced - if len(blocks) > 1: - new_axes[1] = axes[0] - - # reset the placement to the original - for b, sb in zip(blocks, self.blocks): - b.mgr_locs = sb.mgr_locs - - else: - new_axes[axis] = Index(np.concatenate( - [ax.values for ax in axes])) - - if transposed: - new_axes = new_axes[::-1] - blocks = [b.make_block(b.values.T, - placement=np.arange(b.shape[1]) - ) for b in blocks] - - return self.__class__(blocks, new_axes) - - # 0 ndim - if 0 in ndim and 1 not in ndim: - values = np.array([b.values for b in blocks]) - if len(values) == 1: - return values.item() - blocks = [make_block(values, ndim=1)] - axes = Index([ax[0] for ax in axes]) - - # single block - values = _concat._concat_compat([b.values for b in blocks]) - - # compute the orderings of our original data - if len(self.blocks) > 1: - - indexer = np.empty(len(self.axes[0]), dtype=np.intp) - i = 0 - for b in self.blocks: - for j in b.mgr_locs: - indexer[j] = i - i = i + 1 - - values = values.take(indexer) - - return SingleBlockManager( - [make_block(values, - ndim=1, - placement=np.arange(len(values)))], - axes[0]) - - def isna(self, func, **kwargs): - return self.apply('apply', func=func, **kwargs) - - def where(self, **kwargs): - return self.apply('where', **kwargs) - - def eval(self, **kwargs): - return self.apply('eval', **kwargs) - - def quantile(self, **kwargs): - return self.reduction('quantile', **kwargs) - - def setitem(self, **kwargs): - return self.apply('setitem', **kwargs) - - def putmask(self, **kwargs): - return self.apply('putmask', **kwargs) - - def diff(self, **kwargs): - return self.apply('diff', **kwargs) - - def interpolate(self, **kwargs): - return self.apply('interpolate', **kwargs) - - def shift(self, **kwargs): - return self.apply('shift', **kwargs) - - def fillna(self, **kwargs): - return self.apply('fillna', **kwargs) - - def downcast(self, **kwargs): - return self.apply('downcast', **kwargs) - - def astype(self, dtype, **kwargs): - return self.apply('astype', dtype=dtype, **kwargs) - - def convert(self, **kwargs): - return self.apply('convert', **kwargs) - - def replace(self, **kwargs): - return self.apply('replace', **kwargs) - - def replace_list(self, src_list, dest_list, inplace=False, regex=False, - mgr=None): - """ do a list replace """ - - inplace = validate_bool_kwarg(inplace, 'inplace') - - if mgr is None: - mgr = self - - # figure out our mask a-priori to avoid repeated replacements - values = self.as_array() - - def comp(s): - if isna(s): - return isna(values) - return _maybe_compare(values, getattr(s, 'asm8', s), operator.eq) - - masks = [comp(s) for i, s in enumerate(src_list)] - - result_blocks = [] - src_len = len(src_list) - 1 - for blk in self.blocks: - - # its possible to get multiple result blocks here - # replace ALWAYS will return a list - rb = [blk if inplace else blk.copy()] - for i, (s, d) in enumerate(zip(src_list, dest_list)): - new_rb = [] - for b in rb: - if b.dtype == np.object_: - convert = i == src_len - result = b.replace(s, d, inplace=inplace, regex=regex, - mgr=mgr, convert=convert) - new_rb = _extend_blocks(result, new_rb) - else: - # get our mask for this element, sized to this - # particular block - m = masks[i][b.mgr_locs.indexer] - if m.any(): - b = b.coerce_to_target_dtype(d) - new_rb.extend(b.putmask(m, d, inplace=True)) - else: - new_rb.append(b) - rb = new_rb - result_blocks.extend(rb) - - bm = self.__class__(result_blocks, self.axes) - bm._consolidate_inplace() - return bm - - def reshape_nd(self, axes, **kwargs): - """ a 2d-nd reshape operation on a BlockManager """ - return self.apply('reshape_nd', axes=axes, **kwargs) - - def is_consolidated(self): - """ - Return True if more than one block with the same dtype - """ - if not self._known_consolidated: - self._consolidate_check() - return self._is_consolidated - - def _consolidate_check(self): - ftypes = [blk.ftype for blk in self.blocks] - self._is_consolidated = len(ftypes) == len(set(ftypes)) - self._known_consolidated = True - - @property - def is_mixed_type(self): - # Warning, consolidation needs to get checked upstairs - self._consolidate_inplace() - return len(self.blocks) > 1 - - @property - def is_numeric_mixed_type(self): - # Warning, consolidation needs to get checked upstairs - self._consolidate_inplace() - return all(block.is_numeric for block in self.blocks) - - @property - def is_datelike_mixed_type(self): - # Warning, consolidation needs to get checked upstairs - self._consolidate_inplace() - return any(block.is_datelike for block in self.blocks) - - @property - def any_extension_types(self): - """Whether any of the blocks in this manager are extension blocks""" - return any(block.is_extension for block in self.blocks) - - @property - def is_view(self): - """ return a boolean if we are a single block and are a view """ - if len(self.blocks) == 1: - return self.blocks[0].is_view - - # It is technically possible to figure out which blocks are views - # e.g. [ b.values.base is not None for b in self.blocks ] - # but then we have the case of possibly some blocks being a view - # and some blocks not. setting in theory is possible on the non-view - # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit - # complicated - - return False - - def get_bool_data(self, copy=False): - """ - Parameters - ---------- - copy : boolean, default False - Whether to copy the blocks - """ - self._consolidate_inplace() - return self.combine([b for b in self.blocks if b.is_bool], copy) - - def get_numeric_data(self, copy=False): - """ - Parameters - ---------- - copy : boolean, default False - Whether to copy the blocks - """ - self._consolidate_inplace() - return self.combine([b for b in self.blocks if b.is_numeric], copy) - - def combine(self, blocks, copy=True): - """ return a new manager with the blocks """ - if len(blocks) == 0: - return self.make_empty() - - # FIXME: optimization potential - indexer = np.sort(np.concatenate([b.mgr_locs.as_array - for b in blocks])) - inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) - - new_blocks = [] - for b in blocks: - b = b.copy(deep=copy) - b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array, - axis=0, allow_fill=False) - new_blocks.append(b) - - axes = list(self.axes) - axes[0] = self.items.take(indexer) - - return self.__class__(new_blocks, axes, do_integrity_check=False) - - def get_slice(self, slobj, axis=0): - if axis >= self.ndim: - raise IndexError("Requested axis not found in manager") - - if axis == 0: - new_blocks = self._slice_take_blocks_ax0(slobj) - else: - slicer = [slice(None)] * (axis + 1) - slicer[axis] = slobj - slicer = tuple(slicer) - new_blocks = [blk.getitem_block(slicer) for blk in self.blocks] - - new_axes = list(self.axes) - new_axes[axis] = new_axes[axis][slobj] - - bm = self.__class__(new_blocks, new_axes, do_integrity_check=False) - bm._consolidate_inplace() - return bm - - def __contains__(self, item): - return item in self.items - - @property - def nblocks(self): - return len(self.blocks) - - def copy(self, deep=True, mgr=None): - """ - Make deep or shallow copy of BlockManager - - Parameters - ---------- - deep : boolean o rstring, default True - If False, return shallow copy (do not copy data) - If 'all', copy data and a deep copy of the index - - Returns - ------- - copy : BlockManager - """ - - # this preserves the notion of view copying of axes - if deep: - if deep == 'all': - copy = lambda ax: ax.copy(deep=True) - else: - copy = lambda ax: ax.view() - new_axes = [copy(ax) for ax in self.axes] - else: - new_axes = list(self.axes) - return self.apply('copy', axes=new_axes, deep=deep, - do_integrity_check=False) - - def as_array(self, transpose=False, items=None): - """Convert the blockmanager data into an numpy array. - - Parameters - ---------- - transpose : boolean, default False - If True, transpose the return array - items : list of strings or None - Names of block items that will be included in the returned - array. ``None`` means that all block items will be used - - Returns - ------- - arr : ndarray - """ - if len(self.blocks) == 0: - arr = np.empty(self.shape, dtype=float) - return arr.transpose() if transpose else arr - - if items is not None: - mgr = self.reindex_axis(items, axis=0) - else: - mgr = self - - if self._is_single_block or not self.is_mixed_type: - arr = mgr.blocks[0].get_values() - else: - arr = mgr._interleave() - - return arr.transpose() if transpose else arr - - def _interleave(self): - """ - Return ndarray from blocks with specified item order - Items must be contained in the blocks - """ - dtype = _interleaved_dtype(self.blocks) - - result = np.empty(self.shape, dtype=dtype) - - if result.shape[0] == 0: - # Workaround for numpy 1.7 bug: - # - # >>> a = np.empty((0,10)) - # >>> a[slice(0,0)] - # array([], shape=(0, 10), dtype=float64) - # >>> a[[]] - # Traceback (most recent call last): - # File "<stdin>", line 1, in <module> - # IndexError: index 0 is out of bounds for axis 0 with size 0 - return result - - itemmask = np.zeros(self.shape[0]) - - for blk in self.blocks: - rl = blk.mgr_locs - result[rl.indexer] = blk.get_values(dtype) - itemmask[rl.indexer] = 1 - - if not itemmask.all(): - raise AssertionError('Some items were not contained in blocks') - - return result - - def to_dict(self, copy=True): - """ - Return a dict of str(dtype) -> BlockManager - - Parameters - ---------- - copy : boolean, default True - - Returns - ------- - values : a dict of dtype -> BlockManager - - Notes - ----- - This consolidates based on str(dtype) - """ - self._consolidate_inplace() - - bd = {} - for b in self.blocks: - bd.setdefault(str(b.dtype), []).append(b) - - return {dtype: self.combine(blocks, copy=copy) - for dtype, blocks in bd.items()} - - def xs(self, key, axis=1, copy=True, takeable=False): - if axis < 1: - raise AssertionError( - 'Can only take xs across axis >= 1, got {ax}'.format(ax=axis)) - - # take by position - if takeable: - loc = key - else: - loc = self.axes[axis].get_loc(key) - - slicer = [slice(None, None) for _ in range(self.ndim)] - slicer[axis] = loc - slicer = tuple(slicer) - - new_axes = list(self.axes) - - # could be an array indexer! - if isinstance(loc, (slice, np.ndarray)): - new_axes[axis] = new_axes[axis][loc] - else: - new_axes.pop(axis) - - new_blocks = [] - if len(self.blocks) > 1: - # we must copy here as we are mixed type - for blk in self.blocks: - newb = make_block(values=blk.values[slicer], - klass=blk.__class__, - placement=blk.mgr_locs) - new_blocks.append(newb) - elif len(self.blocks) == 1: - block = self.blocks[0] - vals = block.values[slicer] - if copy: - vals = vals.copy() - new_blocks = [make_block(values=vals, - placement=block.mgr_locs, - klass=block.__class__)] - - return self.__class__(new_blocks, new_axes) - - def fast_xs(self, loc): - """ - get a cross sectional for a given location in the - items ; handle dups - - return the result, is *could* be a view in the case of a - single block - """ - if len(self.blocks) == 1: - return self.blocks[0].iget((slice(None), loc)) - - items = self.items - - # non-unique (GH4726) - if not items.is_unique: - result = self._interleave() - if self.ndim == 2: - result = result.T - return result[loc] - - # unique - dtype = _interleaved_dtype(self.blocks) - n = len(items) - result = np.empty(n, dtype=dtype) - for blk in self.blocks: - # Such assignment may incorrectly coerce NaT to None - # result[blk.mgr_locs] = blk._slice((slice(None), loc)) - for i, rl in enumerate(blk.mgr_locs): - result[rl] = blk._try_coerce_result(blk.iget((i, loc))) - - return result - - def consolidate(self): - """ - Join together blocks having same dtype - - Returns - ------- - y : BlockManager - """ - if self.is_consolidated(): - return self - - bm = self.__class__(self.blocks, self.axes) - bm._is_consolidated = False - bm._consolidate_inplace() - return bm - - def _consolidate_inplace(self): - if not self.is_consolidated(): - self.blocks = tuple(_consolidate(self.blocks)) - self._is_consolidated = True - self._known_consolidated = True - self._rebuild_blknos_and_blklocs() - - def get(self, item, fastpath=True): - """ - Return values for selected item (ndarray or BlockManager). - """ - if self.items.is_unique: - - if not isna(item): - loc = self.items.get_loc(item) - else: - indexer = np.arange(len(self.items))[isna(self.items)] - - # allow a single nan location indexer - if not is_scalar(indexer): - if len(indexer) == 1: - loc = indexer.item() - else: - raise ValueError("cannot label index with a null key") - - return self.iget(loc, fastpath=fastpath) - else: - - if isna(item): - raise TypeError("cannot label index with a null key") - - indexer = self.items.get_indexer_for([item]) - return self.reindex_indexer(new_axis=self.items[indexer], - indexer=indexer, axis=0, - allow_dups=True) - - def iget(self, i, fastpath=True): - """ - Return the data as a SingleBlockManager if fastpath=True and possible - - Otherwise return as a ndarray - """ - block = self.blocks[self._blknos[i]] - values = block.iget(self._blklocs[i]) - if not fastpath or not block._box_to_block_values or values.ndim != 1: - return values - - # fastpath shortcut for select a single-dim from a 2-dim BM - return SingleBlockManager( - [block.make_block_same_class(values, - placement=slice(0, len(values)), - ndim=1)], - self.axes[1]) - - def get_scalar(self, tup): - """ - Retrieve single item - """ - full_loc = [ax.get_loc(x) for ax, x in zip(self.axes, tup)] - blk = self.blocks[self._blknos[full_loc[0]]] - values = blk.values - - # FIXME: this may return non-upcasted types? - if values.ndim == 1: - return values[full_loc[1]] - - full_loc[0] = self._blklocs[full_loc[0]] - return values[tuple(full_loc)] - - def delete(self, item): - """ - Delete selected item (items if non-unique) in-place. - """ - indexer = self.items.get_loc(item) - - is_deleted = np.zeros(self.shape[0], dtype=np.bool_) - is_deleted[indexer] = True - ref_loc_offset = -is_deleted.cumsum() - - is_blk_deleted = [False] * len(self.blocks) - - if isinstance(indexer, int): - affected_start = indexer - else: - affected_start = is_deleted.nonzero()[0][0] - - for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]): - blk = self.blocks[blkno] - bml = blk.mgr_locs - blk_del = is_deleted[bml.indexer].nonzero()[0] - - if len(blk_del) == len(bml): - is_blk_deleted[blkno] = True - continue - elif len(blk_del) != 0: - blk.delete(blk_del) - bml = blk.mgr_locs - - blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer]) - - # FIXME: use Index.delete as soon as it uses fastpath=True - self.axes[0] = self.items[~is_deleted] - self.blocks = tuple(b for blkno, b in enumerate(self.blocks) - if not is_blk_deleted[blkno]) - self._shape = None - self._rebuild_blknos_and_blklocs() - - def set(self, item, value, check=False): - """ - Set new item in-place. Does not consolidate. Adds new Block if not - contained in the current set of items - if check, then validate that we are not setting the same data in-place - """ - # FIXME: refactor, clearly separate broadcasting & zip-like assignment - # can prob also fix the various if tests for sparse/categorical - - # TODO(EA): Remove an is_extension_ when all extension types satisfy - # the interface - value_is_extension_type = (is_extension_type(value) or - is_extension_array_dtype(value)) - - # categorical/spares/datetimetz - if value_is_extension_type: - - def value_getitem(placement): - return value - else: - if value.ndim == self.ndim - 1: - value = _safe_reshape(value, (1,) + value.shape) - - def value_getitem(placement): - return value - else: - - def value_getitem(placement): - return value[placement.indexer] - - if value.shape[1:] != self.shape[1:]: - raise AssertionError('Shape of new values must be compatible ' - 'with manager shape') - - try: - loc = self.items.get_loc(item) - except KeyError: - # This item wasn't present, just insert at end - self.insert(len(self.items), item, value) - return - - if isinstance(loc, int): - loc = [loc] - - blknos = self._blknos[loc] - blklocs = self._blklocs[loc].copy() - - unfit_mgr_locs = [] - unfit_val_locs = [] - removed_blknos = [] - for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks), - group=True): - blk = self.blocks[blkno] - blk_locs = blklocs[val_locs.indexer] - if blk.should_store(value): - blk.set(blk_locs, value_getitem(val_locs), check=check) - else: - unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) - unfit_val_locs.append(val_locs) - - # If all block items are unfit, schedule the block for removal. - if len(val_locs) == len(blk.mgr_locs): - removed_blknos.append(blkno) - else: - self._blklocs[blk.mgr_locs.indexer] = -1 - blk.delete(blk_locs) - self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk)) - - if len(removed_blknos): - # Remove blocks & update blknos accordingly - is_deleted = np.zeros(self.nblocks, dtype=np.bool_) - is_deleted[removed_blknos] = True - - new_blknos = np.empty(self.nblocks, dtype=np.int64) - new_blknos.fill(-1) - new_blknos[~is_deleted] = np.arange(self.nblocks - - len(removed_blknos)) - self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0, - allow_fill=False) - self.blocks = tuple(blk for i, blk in enumerate(self.blocks) - if i not in set(removed_blknos)) - - if unfit_val_locs: - unfit_mgr_locs = np.concatenate(unfit_mgr_locs) - unfit_count = len(unfit_mgr_locs) - - new_blocks = [] - if value_is_extension_type: - # This code (ab-)uses the fact that sparse blocks contain only - # one item. - new_blocks.extend( - make_block(values=value.copy(), ndim=self.ndim, - placement=slice(mgr_loc, mgr_loc + 1)) - for mgr_loc in unfit_mgr_locs) - - self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) + - len(self.blocks)) - self._blklocs[unfit_mgr_locs] = 0 - - else: - # unfit_val_locs contains BlockPlacement objects - unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) - - new_blocks.append( - make_block(values=value_getitem(unfit_val_items), - ndim=self.ndim, placement=unfit_mgr_locs)) - - self._blknos[unfit_mgr_locs] = len(self.blocks) - self._blklocs[unfit_mgr_locs] = np.arange(unfit_count) - - self.blocks += tuple(new_blocks) - - # Newly created block's dtype may already be present. - self._known_consolidated = False - - def insert(self, loc, item, value, allow_duplicates=False): - """ - Insert item at selected position. - - Parameters - ---------- - loc : int - item : hashable - value : array_like - allow_duplicates: bool - If False, trying to insert non-unique item will raise - - """ - if not allow_duplicates and item in self.items: - # Should this be a different kind of error?? - raise ValueError('cannot insert {}, already exists'.format(item)) - - if not isinstance(loc, int): - raise TypeError("loc must be int") - - # insert to the axis; this could possibly raise a TypeError - new_axis = self.items.insert(loc, item) - - block = make_block(values=value, ndim=self.ndim, - placement=slice(loc, loc + 1)) - - for blkno, count in _fast_count_smallints(self._blknos[loc:]): - blk = self.blocks[blkno] - if count == len(blk.mgr_locs): - blk.mgr_locs = blk.mgr_locs.add(1) - else: - new_mgr_locs = blk.mgr_locs.as_array.copy() - new_mgr_locs[new_mgr_locs >= loc] += 1 - blk.mgr_locs = new_mgr_locs - - if loc == self._blklocs.shape[0]: - # np.append is a lot faster (at least in numpy 1.7.1), let's use it - # if we can. - self._blklocs = np.append(self._blklocs, 0) - self._blknos = np.append(self._blknos, len(self.blocks)) - else: - self._blklocs = np.insert(self._blklocs, loc, 0) - self._blknos = np.insert(self._blknos, loc, len(self.blocks)) - - self.axes[0] = new_axis - self.blocks += (block,) - self._shape = None - - self._known_consolidated = False - - if len(self.blocks) > 100: - self._consolidate_inplace() - - def reindex_axis(self, new_index, axis, method=None, limit=None, - fill_value=None, copy=True): - """ - Conform block manager to new index. - """ - new_index = ensure_index(new_index) - new_index, indexer = self.axes[axis].reindex(new_index, method=method, - limit=limit) - - return self.reindex_indexer(new_index, indexer, axis=axis, - fill_value=fill_value, copy=copy) - - def reindex_indexer(self, new_axis, indexer, axis, fill_value=None, - allow_dups=False, copy=True): - """ - Parameters - ---------- - new_axis : Index - indexer : ndarray of int64 or None - axis : int - fill_value : object - allow_dups : bool - - pandas-indexer with -1's only. - """ - if indexer is None: - if new_axis is self.axes[axis] and not copy: - return self - - result = self.copy(deep=copy) - result.axes = list(self.axes) - result.axes[axis] = new_axis - return result - - self._consolidate_inplace() - - # some axes don't allow reindexing with dups - if not allow_dups: - self.axes[axis]._can_reindex(indexer) - - if axis >= self.ndim: - raise IndexError("Requested axis not found in manager") - - if axis == 0: - new_blocks = self._slice_take_blocks_ax0(indexer, - fill_tuple=(fill_value,)) - else: - new_blocks = [blk.take_nd(indexer, axis=axis, fill_tuple=( - fill_value if fill_value is not None else blk.fill_value,)) - for blk in self.blocks] - - new_axes = list(self.axes) - new_axes[axis] = new_axis - return self.__class__(new_blocks, new_axes) - - def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): - """ - Slice/take blocks along axis=0. - - Overloaded for SingleBlock - - Returns - ------- - new_blocks : list of Block - - """ - - allow_fill = fill_tuple is not None - - sl_type, slobj, sllen = _preprocess_slice_or_indexer( - slice_or_indexer, self.shape[0], allow_fill=allow_fill) - - if self._is_single_block: - blk = self.blocks[0] - - if sl_type in ('slice', 'mask'): - return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))] - elif not allow_fill or self.ndim == 1: - if allow_fill and fill_tuple[0] is None: - _, fill_value = maybe_promote(blk.dtype) - fill_tuple = (fill_value, ) - - return [blk.take_nd(slobj, axis=0, - new_mgr_locs=slice(0, sllen), - fill_tuple=fill_tuple)] - - if sl_type in ('slice', 'mask'): - blknos = self._blknos[slobj] - blklocs = self._blklocs[slobj] - else: - blknos = algos.take_1d(self._blknos, slobj, fill_value=-1, - allow_fill=allow_fill) - blklocs = algos.take_1d(self._blklocs, slobj, fill_value=-1, - allow_fill=allow_fill) - - # When filling blknos, make sure blknos is updated before appending to - # blocks list, that way new blkno is exactly len(blocks). - # - # FIXME: mgr_groupby_blknos must return mgr_locs in ascending order, - # pytables serialization will break otherwise. - blocks = [] - for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks), - group=True): - if blkno == -1: - # If we've got here, fill_tuple was not None. - fill_value = fill_tuple[0] - - blocks.append(self._make_na_block(placement=mgr_locs, - fill_value=fill_value)) - else: - blk = self.blocks[blkno] - - # Otherwise, slicing along items axis is necessary. - if not blk._can_consolidate: - # A non-consolidatable block, it's easy, because there's - # only one item and each mgr loc is a copy of that single - # item. - for mgr_loc in mgr_locs: - newblk = blk.copy(deep=True) - newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1) - blocks.append(newblk) - - else: - blocks.append(blk.take_nd(blklocs[mgr_locs.indexer], - axis=0, new_mgr_locs=mgr_locs, - fill_tuple=None)) - - return blocks - - def _make_na_block(self, placement, fill_value=None): - # TODO: infer dtypes other than float64 from fill_value - - if fill_value is None: - fill_value = np.nan - block_shape = list(self.shape) - block_shape[0] = len(placement) - - dtype, fill_value = infer_dtype_from_scalar(fill_value) - block_values = np.empty(block_shape, dtype=dtype) - block_values.fill(fill_value) - return make_block(block_values, placement=placement) - - def take(self, indexer, axis=1, verify=True, convert=True): - """ - Take items along any axis. - """ - self._consolidate_inplace() - indexer = (np.arange(indexer.start, indexer.stop, indexer.step, - dtype='int64') - if isinstance(indexer, slice) - else np.asanyarray(indexer, dtype='int64')) - - n = self.shape[axis] - if convert: - indexer = maybe_convert_indices(indexer, n) - - if verify: - if ((indexer == -1) | (indexer >= n)).any(): - raise Exception('Indices must be nonzero and less than ' - 'the axis length') - - new_labels = self.axes[axis].take(indexer) - return self.reindex_indexer(new_axis=new_labels, indexer=indexer, - axis=axis, allow_dups=True) - - def merge(self, other, lsuffix='', rsuffix=''): - if not self._is_indexed_like(other): - raise AssertionError('Must have same axes to merge managers') - - l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix, - right=other.items, rsuffix=rsuffix) - new_items = _concat_indexes([l, r]) - - new_blocks = [blk.copy(deep=False) for blk in self.blocks] - - offset = self.shape[0] - for blk in other.blocks: - blk = blk.copy(deep=False) - blk.mgr_locs = blk.mgr_locs.add(offset) - new_blocks.append(blk) - - new_axes = list(self.axes) - new_axes[0] = new_items - - return self.__class__(_consolidate(new_blocks), new_axes) - - def _is_indexed_like(self, other): - """ - Check all axes except items - """ - if self.ndim != other.ndim: - raise AssertionError( - 'Number of dimensions must agree got {ndim} and ' - '{oth_ndim}'.format(ndim=self.ndim, oth_ndim=other.ndim)) - for ax, oax in zip(self.axes[1:], other.axes[1:]): - if not ax.equals(oax): - return False - return True - - def equals(self, other): - self_axes, other_axes = self.axes, other.axes - if len(self_axes) != len(other_axes): - return False - if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): - return False - self._consolidate_inplace() - other._consolidate_inplace() - if len(self.blocks) != len(other.blocks): - return False - - # canonicalize block order, using a tuple combining the type - # name and then mgr_locs because there might be unconsolidated - # blocks (say, Categorical) which can only be distinguished by - # the iteration order - def canonicalize(block): - return (block.dtype.name, block.mgr_locs.as_array.tolist()) - - self_blocks = sorted(self.blocks, key=canonicalize) - other_blocks = sorted(other.blocks, key=canonicalize) - return all(block.equals(oblock) - for block, oblock in zip(self_blocks, other_blocks)) - - def unstack(self, unstacker_func): - """Return a blockmanager with all blocks unstacked. - - Parameters - ---------- - unstacker_func : callable - A (partially-applied) ``pd.core.reshape._Unstacker`` class. - - Returns - ------- - unstacked : BlockManager - """ - dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items) - new_columns = dummy.get_new_columns() - new_index = dummy.get_new_index() - new_blocks = [] - columns_mask = [] - - for blk in self.blocks: - blocks, mask = blk._unstack( - partial(unstacker_func, - value_columns=self.items[blk.mgr_locs.indexer]), - new_columns) - - new_blocks.extend(blocks) - columns_mask.extend(mask) - - new_columns = new_columns[columns_mask] - - bm = BlockManager(new_blocks, [new_columns, new_index]) - return bm - - -class SingleBlockManager(BlockManager): - """ manage a single block with """ - - ndim = 1 - _is_consolidated = True - _known_consolidated = True - __slots__ = () - - def __init__(self, block, axis, do_integrity_check=False, fastpath=False): - - if isinstance(axis, list): - if len(axis) != 1: - raise ValueError("cannot create SingleBlockManager with more " - "than 1 axis") - axis = axis[0] - - # passed from constructor, single block, single axis - if fastpath: - self.axes = [axis] - if isinstance(block, list): - - # empty block - if len(block) == 0: - block = [np.array([])] - elif len(block) != 1: - raise ValueError('Cannot create SingleBlockManager with ' - 'more than 1 block') - block = block[0] - else: - self.axes = [ensure_index(axis)] - - # create the block here - if isinstance(block, list): - - # provide consolidation to the interleaved_dtype - if len(block) > 1: - dtype = _interleaved_dtype(block) - block = [b.astype(dtype) for b in block] - block = _consolidate(block) - - if len(block) != 1: - raise ValueError('Cannot create SingleBlockManager with ' - 'more than 1 block') - block = block[0] - - if not isinstance(block, Block): - block = make_block(block, placement=slice(0, len(axis)), ndim=1) - - self.blocks = [block] - - def _post_setstate(self): - pass - - @property - def _block(self): - return self.blocks[0] - - @property - def _values(self): - return self._block.values - - @property - def _blknos(self): - """ compat with BlockManager """ - return None - - @property - def _blklocs(self): - """ compat with BlockManager """ - return None - - def get_slice(self, slobj, axis=0): - if axis >= self.ndim: - raise IndexError("Requested axis not found in manager") - - return self.__class__(self._block._slice(slobj), - self.index[slobj], fastpath=True) - - @property - def index(self): - return self.axes[0] - - def convert(self, **kwargs): - """ convert the whole block as one """ - kwargs['by_item'] = False - return self.apply('convert', **kwargs) - - @property - def dtype(self): - return self._block.dtype - - @property - def array_dtype(self): - return self._block.array_dtype - - @property - def ftype(self): - return self._block.ftype - - def get_dtype_counts(self): - return {self.dtype.name: 1} - - def get_ftype_counts(self): - return {self.ftype: 1} - - def get_dtypes(self): - return np.array([self._block.dtype]) - - def get_ftypes(self): - return np.array([self._block.ftype]) - - def external_values(self): - return self._block.external_values() - - def internal_values(self): - return self._block.internal_values() - - def formatting_values(self): - """Return the internal values used by the DataFrame/SeriesFormatter""" - return self._block.formatting_values() - - def get_values(self): - """ return a dense type view """ - return np.array(self._block.to_dense(), copy=False) - - @property - def asobject(self): - """ - return a object dtype array. datetime/timedelta like values are boxed - to Timestamp/Timedelta instances. - """ - return self._block.get_values(dtype=object) - - @property - def _can_hold_na(self): - return self._block._can_hold_na - - def is_consolidated(self): - return True - - def _consolidate_check(self): - pass - - def _consolidate_inplace(self): - pass - - def delete(self, item): - """ - Delete single item from SingleBlockManager. - - Ensures that self.blocks doesn't become empty. - """ - loc = self.items.get_loc(item) - self._block.delete(loc) - self.axes[0] = self.axes[0].delete(loc) - - def fast_xs(self, loc): - """ - fast path for getting a cross-section - return a view of the data - """ - return self._block.values[loc] - - def concat(self, to_concat, new_axis): - """ - Concatenate a list of SingleBlockManagers into a single - SingleBlockManager. - - Used for pd.concat of Series objects with axis=0. - - Parameters - ---------- - to_concat : list of SingleBlockManagers - new_axis : Index of the result - - Returns - ------- - SingleBlockManager - - """ - non_empties = [x for x in to_concat if len(x) > 0] - - # check if all series are of the same block type: - if len(non_empties) > 0: - blocks = [obj.blocks[0] for obj in non_empties] - - if all(type(b) is type(blocks[0]) for b in blocks[1:]): # noqa - new_block = blocks[0].concat_same_type(blocks) - else: - values = [x.values for x in blocks] - values = _concat._concat_compat(values) - new_block = make_block( - values, placement=slice(0, len(values), 1)) - else: - values = [x._block.values for x in to_concat] - values = _concat._concat_compat(values) - new_block = make_block( - values, placement=slice(0, len(values), 1)) - - mgr = SingleBlockManager(new_block, new_axis) - return mgr - - -def construction_error(tot_items, block_shape, axes, e=None): - """ raise a helpful message about our construction """ - passed = tuple(map(int, [tot_items] + list(block_shape))) - implied = tuple(map(int, [len(ax) for ax in axes])) - if passed == implied and e is not None: - raise e - if block_shape[0] == 0: - raise ValueError("Empty data passed with indices specified.") - raise ValueError("Shape of passed values is {0}, indices imply {1}".format( - passed, implied)) - - -def create_block_manager_from_blocks(blocks, axes): - try: - if len(blocks) == 1 and not isinstance(blocks[0], Block): - # if blocks[0] is of length 0, return empty blocks - if not len(blocks[0]): - blocks = [] - else: - # It's OK if a single block is passed as values, its placement - # is basically "all items", but if there're many, don't bother - # converting, it's an error anyway. - blocks = [make_block(values=blocks[0], - placement=slice(0, len(axes[0])))] - - mgr = BlockManager(blocks, axes) - mgr._consolidate_inplace() - return mgr - - except (ValueError) as e: - blocks = [getattr(b, 'values', b) for b in blocks] - tot_items = sum(b.shape[0] for b in blocks) - construction_error(tot_items, blocks[0].shape[1:], axes, e) - - -def create_block_manager_from_arrays(arrays, names, axes): - - try: - blocks = form_blocks(arrays, names, axes) - mgr = BlockManager(blocks, axes) - mgr._consolidate_inplace() - return mgr - except ValueError as e: - construction_error(len(arrays), arrays[0].shape, axes, e) - - -def form_blocks(arrays, names, axes): - # put "leftover" items in float bucket, where else? - # generalize? - items_dict = defaultdict(list) - extra_locs = [] - - names_idx = ensure_index(names) - if names_idx.equals(axes[0]): - names_indexer = np.arange(len(names_idx)) - else: - assert names_idx.intersection(axes[0]).is_unique - names_indexer = names_idx.get_indexer_for(axes[0]) - - for i, name_idx in enumerate(names_indexer): - if name_idx == -1: - extra_locs.append(i) - continue - - k = names[name_idx] - v = arrays[name_idx] - - block_type = get_block_type(v) - items_dict[block_type.__name__].append((i, k, v)) - - blocks = [] - if len(items_dict['FloatBlock']): - float_blocks = _multi_blockify(items_dict['FloatBlock']) - blocks.extend(float_blocks) - - if len(items_dict['ComplexBlock']): - complex_blocks = _multi_blockify(items_dict['ComplexBlock']) - blocks.extend(complex_blocks) - - if len(items_dict['TimeDeltaBlock']): - timedelta_blocks = _multi_blockify(items_dict['TimeDeltaBlock']) - blocks.extend(timedelta_blocks) - - if len(items_dict['IntBlock']): - int_blocks = _multi_blockify(items_dict['IntBlock']) - blocks.extend(int_blocks) - - if len(items_dict['DatetimeBlock']): - datetime_blocks = _simple_blockify(items_dict['DatetimeBlock'], - _NS_DTYPE) - blocks.extend(datetime_blocks) - - if len(items_dict['DatetimeTZBlock']): - dttz_blocks = [make_block(array, - klass=DatetimeTZBlock, - placement=[i]) - for i, _, array in items_dict['DatetimeTZBlock']] - blocks.extend(dttz_blocks) - - if len(items_dict['BoolBlock']): - bool_blocks = _simple_blockify(items_dict['BoolBlock'], np.bool_) - blocks.extend(bool_blocks) - - if len(items_dict['ObjectBlock']) > 0: - object_blocks = _simple_blockify(items_dict['ObjectBlock'], np.object_) - blocks.extend(object_blocks) - - if len(items_dict['SparseBlock']) > 0: - sparse_blocks = _sparse_blockify(items_dict['SparseBlock']) - blocks.extend(sparse_blocks) - - if len(items_dict['CategoricalBlock']) > 0: - cat_blocks = [make_block(array, klass=CategoricalBlock, placement=[i]) - for i, _, array in items_dict['CategoricalBlock']] - blocks.extend(cat_blocks) - - if len(items_dict['ExtensionBlock']): - - external_blocks = [ - make_block(array, klass=ExtensionBlock, placement=[i]) - for i, _, array in items_dict['ExtensionBlock'] - ] - - blocks.extend(external_blocks) - - if len(extra_locs): - shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) - - # empty items -> dtype object - block_values = np.empty(shape, dtype=object) - block_values.fill(np.nan) - - na_block = make_block(block_values, placement=extra_locs) - blocks.append(na_block) - - return blocks - - -def _simple_blockify(tuples, dtype): - """ return a single array of a block that has a single dtype; if dtype is - not None, coerce to this dtype - """ - values, placement = _stack_arrays(tuples, dtype) - - # CHECK DTYPE? - if dtype is not None and values.dtype != dtype: # pragma: no cover - values = values.astype(dtype) - - block = make_block(values, placement=placement) - return [block] - - -def _multi_blockify(tuples, dtype=None): - """ return an array of blocks that potentially have different dtypes """ - - # group by dtype - grouper = itertools.groupby(tuples, lambda x: x[2].dtype) - - new_blocks = [] - for dtype, tup_block in grouper: - - values, placement = _stack_arrays(list(tup_block), dtype) - - block = make_block(values, placement=placement) - new_blocks.append(block) - - return new_blocks - - -def _sparse_blockify(tuples, dtype=None): - """ return an array of blocks that potentially have different dtypes (and - are sparse) - """ - - new_blocks = [] - for i, names, array in tuples: - array = _maybe_to_sparse(array) - block = make_block(array, klass=SparseBlock, placement=[i]) - new_blocks.append(block) - - return new_blocks - - -def _stack_arrays(tuples, dtype): - - # fml - def _asarray_compat(x): - if isinstance(x, ABCSeries): - return x._values - else: - return np.asarray(x) - - def _shape_compat(x): - if isinstance(x, ABCSeries): - return len(x), - else: - return x.shape - - placement, names, arrays = zip(*tuples) - - first = arrays[0] - shape = (len(arrays),) + _shape_compat(first) - - stacked = np.empty(shape, dtype=dtype) - for i, arr in enumerate(arrays): - stacked[i] = _asarray_compat(arr) - - return stacked, placement - - -def _interleaved_dtype(blocks): - if not len(blocks): - return None - - dtype = find_common_type([b.dtype for b in blocks]) - - # only numpy compat - if isinstance(dtype, (PandasExtensionDtype, ExtensionDtype)): - dtype = np.object - - return dtype - - -def _consolidate(blocks): - """ - Merge blocks having same dtype, exclude non-consolidating blocks - """ - - # sort by _can_consolidate, dtype - gkey = lambda x: x._consolidate_key - grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) - - new_blocks = [] - for (_can_consolidate, dtype), group_blocks in grouper: - merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype, - _can_consolidate=_can_consolidate) - new_blocks = _extend_blocks(merged_blocks, new_blocks) - return new_blocks - - -def _maybe_compare(a, b, op): - - is_a_array = isinstance(a, np.ndarray) - is_b_array = isinstance(b, np.ndarray) - - # numpy deprecation warning to have i8 vs integer comparisons - if is_datetimelike_v_numeric(a, b): - result = False - - # numpy deprecation warning if comparing numeric vs string-like - elif is_numeric_v_string_like(a, b): - result = False - - else: - result = op(a, b) - - if is_scalar(result) and (is_a_array or is_b_array): - type_names = [type(a).__name__, type(b).__name__] - - if is_a_array: - type_names[0] = 'ndarray(dtype={dtype})'.format(dtype=a.dtype) - - if is_b_array: - type_names[1] = 'ndarray(dtype={dtype})'.format(dtype=b.dtype) - - raise TypeError( - "Cannot compare types {a!r} and {b!r}".format(a=type_names[0], - b=type_names[1])) - return result - - -def _concat_indexes(indexes): - return indexes[0].append(indexes[1:]) - - -def _get_blkno_placements(blknos, blk_count, group=True): - """ - - Parameters - ---------- - blknos : array of int64 - blk_count : int - group : bool - - Returns - ------- - iterator - yield (BlockPlacement, blkno) - - """ - - blknos = ensure_int64(blknos) - - # FIXME: blk_count is unused, but it may avoid the use of dicts in cython - for blkno, indexer in libinternals.get_blkno_indexers(blknos, group): - yield blkno, BlockPlacement(indexer) - - -def items_overlap_with_suffix(left, lsuffix, right, rsuffix): - """ - If two indices overlap, add suffixes to overlapping entries. - - If corresponding suffix is empty, the entry is simply converted to string. - - """ - to_rename = left.intersection(right) - if len(to_rename) == 0: - return left, right - else: - if not lsuffix and not rsuffix: - raise ValueError('columns overlap but no suffix specified: ' - '{rename}'.format(rename=to_rename)) - - def lrenamer(x): - if x in to_rename: - return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix) - return x - - def rrenamer(x): - if x in to_rename: - return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix) - return x - - return (_transform_index(left, lrenamer), - _transform_index(right, rrenamer)) - - -def _transform_index(index, func, level=None): - """ - Apply function to all values found in index. - - This includes transforming multiindex entries separately. - Only apply function to one level of the MultiIndex if level is specified. - - """ - if isinstance(index, MultiIndex): - if level is not None: - items = [tuple(func(y) if i == level else y - for i, y in enumerate(x)) for x in index] - else: - items = [tuple(func(y) for y in x) for x in index] - return MultiIndex.from_tuples(items, names=index.names) - else: - items = [func(x) for x in index] - return Index(items, name=index.name, tupleize_cols=False) - - -def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): - """ - Concatenate block managers into one. - - Parameters - ---------- - mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples - axes : list of Index - concat_axis : int - copy : bool - - """ - concat_plan = combine_concat_plans( - [get_mgr_concatenation_plan(mgr, indexers) - for mgr, indexers in mgrs_indexers], concat_axis) - - blocks = [] - - for placement, join_units in concat_plan: - - if len(join_units) == 1 and not join_units[0].indexers: - b = join_units[0].block - values = b.values - if copy: - values = values.copy() - elif not copy: - values = values.view() - b = b.make_block_same_class(values, placement=placement) - elif is_uniform_join_units(join_units): - b = join_units[0].block.concat_same_type( - [ju.block for ju in join_units], placement=placement) - else: - b = make_block( - concatenate_join_units(join_units, concat_axis, copy=copy), - placement=placement) - blocks.append(b) - - return BlockManager(blocks, axes) - - -def is_uniform_join_units(join_units): - """ - Check if the join units consist of blocks of uniform type that can - be concatenated using Block.concat_same_type instead of the generic - concatenate_join_units (which uses `_concat._concat_compat`). - - """ - return ( - # all blocks need to have the same type - all(type(ju.block) is type(join_units[0].block) for ju in join_units) and # noqa - # no blocks that would get missing values (can lead to type upcasts) - # unless we're an extension dtype. - all(not ju.is_na or ju.block.is_extension for ju in join_units) and - # no blocks with indexers (as then the dimensions do not fit) - all(not ju.indexers for ju in join_units) and - # disregard Panels - all(ju.block.ndim <= 2 for ju in join_units) and - # only use this path when there is something to concatenate - len(join_units) > 1) - - -def is_uniform_reindex(join_units): - return ( - # TODO: should this be ju.block._can_hold_na? - all(ju.block and ju.block.is_extension for ju in join_units) and - len(set(ju.block.dtype.name for ju in join_units)) == 1 - ) - - -def get_empty_dtype_and_na(join_units): - """ - Return dtype and N/A values to use when concatenating specified units. - - Returned N/A value may be None which means there was no casting involved. - - Returns - ------- - dtype - na - """ - - if len(join_units) == 1: - blk = join_units[0].block - if blk is None: - return np.float64, np.nan - - if is_uniform_reindex(join_units): - # XXX: integrate property - empty_dtype = join_units[0].block.dtype - upcasted_na = join_units[0].block.fill_value - return empty_dtype, upcasted_na - - has_none_blocks = False - dtypes = [None] * len(join_units) - for i, unit in enumerate(join_units): - if unit.block is None: - has_none_blocks = True - else: - dtypes[i] = unit.dtype - - upcast_classes = defaultdict(list) - null_upcast_classes = defaultdict(list) - for dtype, unit in zip(dtypes, join_units): - if dtype is None: - continue - - if is_categorical_dtype(dtype): - upcast_cls = 'category' - elif is_datetimetz(dtype): - upcast_cls = 'datetimetz' - elif issubclass(dtype.type, np.bool_): - upcast_cls = 'bool' - elif issubclass(dtype.type, np.object_): - upcast_cls = 'object' - elif is_datetime64_dtype(dtype): - upcast_cls = 'datetime' - elif is_timedelta64_dtype(dtype): - upcast_cls = 'timedelta' - elif is_float_dtype(dtype) or is_numeric_dtype(dtype): - upcast_cls = dtype.name - else: - upcast_cls = 'float' - - # Null blocks should not influence upcast class selection, unless there - # are only null blocks, when same upcasting rules must be applied to - # null upcast classes. - if unit.is_na: - null_upcast_classes[upcast_cls].append(dtype) - else: - upcast_classes[upcast_cls].append(dtype) - - if not upcast_classes: - upcast_classes = null_upcast_classes - - # create the result - if 'object' in upcast_classes: - return np.dtype(np.object_), np.nan - elif 'bool' in upcast_classes: - if has_none_blocks: - return np.dtype(np.object_), np.nan - else: - return np.dtype(np.bool_), None - elif 'category' in upcast_classes: - return np.dtype(np.object_), np.nan - elif 'datetimetz' in upcast_classes: - dtype = upcast_classes['datetimetz'] - return dtype[0], tslibs.iNaT - elif 'datetime' in upcast_classes: - return np.dtype('M8[ns]'), tslibs.iNaT - elif 'timedelta' in upcast_classes: - return np.dtype('m8[ns]'), tslibs.iNaT - else: # pragma - g = np.find_common_type(upcast_classes, []) - if is_float_dtype(g): - return g, g.type(np.nan) - elif is_numeric_dtype(g): - if has_none_blocks: - return np.float64, np.nan - else: - return g, None - - msg = "invalid dtype determination in get_concat_dtype" - raise AssertionError(msg) - - -def concatenate_join_units(join_units, concat_axis, copy): - """ - Concatenate values from several join units along selected axis. - """ - if concat_axis == 0 and len(join_units) > 1: - # Concatenating join units along ax0 is handled in _merge_blocks. - raise AssertionError("Concatenating join units along axis0") - - empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units) - - to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype, - upcasted_na=upcasted_na) - for ju in join_units] - - if len(to_concat) == 1: - # Only one block, nothing to concatenate. - concat_values = to_concat[0] - if copy: - if isinstance(concat_values, np.ndarray): - # non-reindexed (=not yet copied) arrays are made into a view - # in JoinUnit.get_reindexed_values - if concat_values.base is not None: - concat_values = concat_values.copy() - else: - concat_values = concat_values.copy() - else: - concat_values = _concat._concat_compat(to_concat, axis=concat_axis) - - return concat_values - - -def get_mgr_concatenation_plan(mgr, indexers): - """ - Construct concatenation plan for given block manager and indexers. - - Parameters - ---------- - mgr : BlockManager - indexers : dict of {axis: indexer} - - Returns - ------- - plan : list of (BlockPlacement, JoinUnit) tuples - - """ - # Calculate post-reindex shape , save for item axis which will be separate - # for each block anyway. - mgr_shape = list(mgr.shape) - for ax, indexer in indexers.items(): - mgr_shape[ax] = len(indexer) - mgr_shape = tuple(mgr_shape) - - if 0 in indexers: - ax0_indexer = indexers.pop(0) - blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1) - blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1) - else: - - if mgr._is_single_block: - blk = mgr.blocks[0] - return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))] - - ax0_indexer = None - blknos = mgr._blknos - blklocs = mgr._blklocs - - plan = [] - for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks), - group=False): - - assert placements.is_slice_like - - join_unit_indexers = indexers.copy() - - shape = list(mgr_shape) - shape[0] = len(placements) - shape = tuple(shape) - - if blkno == -1: - unit = JoinUnit(None, shape) - else: - blk = mgr.blocks[blkno] - ax0_blk_indexer = blklocs[placements.indexer] - - unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and - # Fastpath detection of join unit not - # needing to reindex its block: no ax0 - # reindexing took place and block - # placement was sequential before. - ((ax0_indexer is None and - blk.mgr_locs.is_slice_like and - blk.mgr_locs.as_slice.step == 1) or - # Slow-ish detection: all indexer locs - # are sequential (and length match is - # checked above). - (np.diff(ax0_blk_indexer) == 1).all())) - - # Omit indexer if no item reindexing is required. - if unit_no_ax0_reindexing: - join_unit_indexers.pop(0, None) - else: - join_unit_indexers[0] = ax0_blk_indexer - - unit = JoinUnit(blk, shape, join_unit_indexers) - - plan.append((placements, unit)) - - return plan - - -def combine_concat_plans(plans, concat_axis): - """ - Combine multiple concatenation plans into one. - - existing_plan is updated in-place. - """ - if len(plans) == 1: - for p in plans[0]: - yield p[0], [p[1]] - - elif concat_axis == 0: - offset = 0 - for plan in plans: - last_plc = None - - for plc, unit in plan: - yield plc.add(offset), [unit] - last_plc = plc - - if last_plc is not None: - offset += last_plc.as_slice.stop - - else: - num_ended = [0] - - def _next_or_none(seq): - retval = next(seq, None) - if retval is None: - num_ended[0] += 1 - return retval - - plans = list(map(iter, plans)) - next_items = list(map(_next_or_none, plans)) - - while num_ended[0] != len(next_items): - if num_ended[0] > 0: - raise ValueError("Plan shapes are not aligned") - - placements, units = zip(*next_items) - - lengths = list(map(len, placements)) - min_len, max_len = min(lengths), max(lengths) - - if min_len == max_len: - yield placements[0], units - next_items[:] = map(_next_or_none, plans) - else: - yielded_placement = None - yielded_units = [None] * len(next_items) - for i, (plc, unit) in enumerate(next_items): - yielded_units[i] = unit - if len(plc) > min_len: - # trim_join_unit updates unit in place, so only - # placement needs to be sliced to skip min_len. - next_items[i] = (plc[min_len:], - trim_join_unit(unit, min_len)) - else: - yielded_placement = plc - next_items[i] = _next_or_none(plans[i]) - - yield yielded_placement, yielded_units - - -def trim_join_unit(join_unit, length): - """ - Reduce join_unit's shape along item axis to length. - - Extra items that didn't fit are returned as a separate block. - """ - - if 0 not in join_unit.indexers: - extra_indexers = join_unit.indexers - - if join_unit.block is None: - extra_block = None - else: - extra_block = join_unit.block.getitem_block(slice(length, None)) - join_unit.block = join_unit.block.getitem_block(slice(length)) - else: - extra_block = join_unit.block - - extra_indexers = copy.copy(join_unit.indexers) - extra_indexers[0] = extra_indexers[0][length:] - join_unit.indexers[0] = join_unit.indexers[0][:length] - - extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:] - join_unit.shape = (length,) + join_unit.shape[1:] - - return JoinUnit(block=extra_block, indexers=extra_indexers, - shape=extra_shape) - - -class JoinUnit(object): - - def __init__(self, block, shape, indexers=None): - # Passing shape explicitly is required for cases when block is None. - if indexers is None: - indexers = {} - self.block = block - self.indexers = indexers - self.shape = shape - - def __repr__(self): - return '{name}({block!r}, {indexers})'.format( - name=self.__class__.__name__, block=self.block, - indexers=self.indexers) - - @cache_readonly - def needs_filling(self): - for indexer in self.indexers.values(): - # FIXME: cache results of indexer == -1 checks. - if (indexer == -1).any(): - return True - - return False - - @cache_readonly - def dtype(self): - if self.block is None: - raise AssertionError("Block is None, no dtype") - - if not self.needs_filling: - return self.block.dtype - else: - return _get_dtype(maybe_promote(self.block.dtype, - self.block.fill_value)[0]) - - @cache_readonly - def is_na(self): - if self.block is None: - return True - - if not self.block._can_hold_na: - return False - - # Usually it's enough to check but a small fraction of values to see if - # a block is NOT null, chunks should help in such cases. 1000 value - # was chosen rather arbitrarily. - values = self.block.values - if self.block.is_categorical: - values_flat = values.categories - elif self.block.is_sparse: - # fill_value is not NaN and have holes - if not values._null_fill_value and values.sp_index.ngaps > 0: - return False - values_flat = values.ravel(order='K') - elif isinstance(self.block, ExtensionBlock): - values_flat = values - else: - values_flat = values.ravel(order='K') - total_len = values_flat.shape[0] - chunk_len = max(total_len // 40, 1000) - for i in range(0, total_len, chunk_len): - if not isna(values_flat[i:i + chunk_len]).all(): - return False - - return True - - def get_reindexed_values(self, empty_dtype, upcasted_na): - if upcasted_na is None: - # No upcasting is necessary - fill_value = self.block.fill_value - values = self.block.get_values() - else: - fill_value = upcasted_na - - if self.is_na: - if getattr(self.block, 'is_object', False): - # we want to avoid filling with np.nan if we are - # using None; we already know that we are all - # nulls - values = self.block.values.ravel(order='K') - if len(values) and values[0] is None: - fill_value = None - - if getattr(self.block, 'is_datetimetz', False) or \ - is_datetimetz(empty_dtype): - pass - elif getattr(self.block, 'is_categorical', False): - pass - elif getattr(self.block, 'is_sparse', False): - pass - else: - missing_arr = np.empty(self.shape, dtype=empty_dtype) - missing_arr.fill(fill_value) - return missing_arr - - if not self.indexers: - if not self.block._can_consolidate: - # preserve these for validation in _concat_compat - return self.block.values - - if self.block.is_bool and not self.block.is_categorical: - # External code requested filling/upcasting, bool values must - # be upcasted to object to avoid being upcasted to numeric. - values = self.block.astype(np.object_).values - elif self.block.is_extension: - values = self.block.values - else: - # No dtype upcasting is done here, it will be performed during - # concatenation itself. - values = self.block.get_values() - - if not self.indexers: - # If there's no indexing to be done, we want to signal outside - # code that this array must be copied explicitly. This is done - # by returning a view and checking `retval.base`. - values = values.view() - - else: - for ax, indexer in self.indexers.items(): - values = algos.take_nd(values, indexer, axis=ax, - fill_value=fill_value) - - return values - - -def _fast_count_smallints(arr): - """Faster version of set(arr) for sequences of small numbers.""" - if len(arr) == 0: - # Handle empty arr case separately: numpy 1.6 chokes on that. - return np.empty((0, 2), dtype=arr.dtype) - else: - counts = np.bincount(arr.astype(np.int_)) - nz = counts.nonzero()[0] - return np.c_[nz, counts[nz]] - - -def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill): - if isinstance(slice_or_indexer, slice): - return ('slice', slice_or_indexer, - libinternals.slice_len(slice_or_indexer, length)) - elif (isinstance(slice_or_indexer, np.ndarray) and - slice_or_indexer.dtype == np.bool_): - return 'mask', slice_or_indexer, slice_or_indexer.sum() - else: - indexer = np.asanyarray(slice_or_indexer, dtype=np.int64) - if not allow_fill: - indexer = maybe_convert_indices(indexer, length) - return 'fancy', indexer, len(indexer) + CategoricalBlock, ExtensionBlock, SparseBlock, ScalarBlock, + Block) +from .managers import ( # noqa:F401 + BlockManager, SingleBlockManager, + create_block_manager_from_arrays, create_block_manager_from_blocks, + items_overlap_with_suffix, # reshape.merge + concatenate_block_managers) # reshape.concat, reshape.merge diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py new file mode 100644 index 0000000000000..4eeeb069d7142 --- /dev/null +++ b/pandas/core/internals/concat.py @@ -0,0 +1,474 @@ +# -*- coding: utf-8 -*- +# TODO: Needs a better name; too many modules are already called "concat" +import copy +from collections import defaultdict + +import numpy as np + +from pandas._libs import tslibs, internals as libinternals +from pandas.util._decorators import cache_readonly + +from pandas.core.dtypes.missing import isna +from pandas.core.dtypes.common import ( + is_timedelta64_dtype, + is_datetime64_dtype, is_datetimetz, + is_categorical_dtype, + is_float_dtype, is_numeric_dtype, + _get_dtype) +from pandas.core.dtypes.cast import maybe_promote +import pandas.core.dtypes.concat as _concat + +import pandas.core.algorithms as algos + + +def get_mgr_concatenation_plan(mgr, indexers): + """ + Construct concatenation plan for given block manager and indexers. + + Parameters + ---------- + mgr : BlockManager + indexers : dict of {axis: indexer} + + Returns + ------- + plan : list of (BlockPlacement, JoinUnit) tuples + + """ + # Calculate post-reindex shape , save for item axis which will be separate + # for each block anyway. + mgr_shape = list(mgr.shape) + for ax, indexer in indexers.items(): + mgr_shape[ax] = len(indexer) + mgr_shape = tuple(mgr_shape) + + if 0 in indexers: + ax0_indexer = indexers.pop(0) + blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1) + blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1) + else: + + if mgr._is_single_block: + blk = mgr.blocks[0] + return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))] + + ax0_indexer = None + blknos = mgr._blknos + blklocs = mgr._blklocs + + plan = [] + for blkno, placements in libinternals.get_blkno_placements(blknos, + mgr.nblocks, + group=False): + + assert placements.is_slice_like + + join_unit_indexers = indexers.copy() + + shape = list(mgr_shape) + shape[0] = len(placements) + shape = tuple(shape) + + if blkno == -1: + unit = JoinUnit(None, shape) + else: + blk = mgr.blocks[blkno] + ax0_blk_indexer = blklocs[placements.indexer] + + unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and + # Fastpath detection of join unit not + # needing to reindex its block: no ax0 + # reindexing took place and block + # placement was sequential before. + ((ax0_indexer is None and + blk.mgr_locs.is_slice_like and + blk.mgr_locs.as_slice.step == 1) or + # Slow-ish detection: all indexer locs + # are sequential (and length match is + # checked above). + (np.diff(ax0_blk_indexer) == 1).all())) + + # Omit indexer if no item reindexing is required. + if unit_no_ax0_reindexing: + join_unit_indexers.pop(0, None) + else: + join_unit_indexers[0] = ax0_blk_indexer + + unit = JoinUnit(blk, shape, join_unit_indexers) + + plan.append((placements, unit)) + + return plan + + +class JoinUnit(object): + + def __init__(self, block, shape, indexers=None): + # Passing shape explicitly is required for cases when block is None. + if indexers is None: + indexers = {} + self.block = block + self.indexers = indexers + self.shape = shape + + def __repr__(self): + return '{name}({block!r}, {indexers})'.format( + name=self.__class__.__name__, block=self.block, + indexers=self.indexers) + + @cache_readonly + def needs_filling(self): + for indexer in self.indexers.values(): + # FIXME: cache results of indexer == -1 checks. + if (indexer == -1).any(): + return True + + return False + + @cache_readonly + def dtype(self): + if self.block is None: + raise AssertionError("Block is None, no dtype") + + if not self.needs_filling: + return self.block.dtype + else: + return _get_dtype(maybe_promote(self.block.dtype, + self.block.fill_value)[0]) + + @cache_readonly + def is_na(self): + if self.block is None: + return True + + if not self.block._can_hold_na: + return False + + # Usually it's enough to check but a small fraction of values to see if + # a block is NOT null, chunks should help in such cases. 1000 value + # was chosen rather arbitrarily. + values = self.block.values + if self.block.is_categorical: + values_flat = values.categories + elif self.block.is_sparse: + # fill_value is not NaN and have holes + if not values._null_fill_value and values.sp_index.ngaps > 0: + return False + values_flat = values.ravel(order='K') + elif self.block.is_extension: + values_flat = values + else: + values_flat = values.ravel(order='K') + total_len = values_flat.shape[0] + chunk_len = max(total_len // 40, 1000) + for i in range(0, total_len, chunk_len): + if not isna(values_flat[i:i + chunk_len]).all(): + return False + + return True + + def get_reindexed_values(self, empty_dtype, upcasted_na): + if upcasted_na is None: + # No upcasting is necessary + fill_value = self.block.fill_value + values = self.block.get_values() + else: + fill_value = upcasted_na + + if self.is_na: + if getattr(self.block, 'is_object', False): + # we want to avoid filling with np.nan if we are + # using None; we already know that we are all + # nulls + values = self.block.values.ravel(order='K') + if len(values) and values[0] is None: + fill_value = None + + if getattr(self.block, 'is_datetimetz', False) or \ + is_datetimetz(empty_dtype): + pass + elif getattr(self.block, 'is_categorical', False): + pass + elif getattr(self.block, 'is_sparse', False): + pass + else: + missing_arr = np.empty(self.shape, dtype=empty_dtype) + missing_arr.fill(fill_value) + return missing_arr + + if not self.indexers: + if not self.block._can_consolidate: + # preserve these for validation in _concat_compat + return self.block.values + + if self.block.is_bool and not self.block.is_categorical: + # External code requested filling/upcasting, bool values must + # be upcasted to object to avoid being upcasted to numeric. + values = self.block.astype(np.object_).values + elif self.block.is_extension: + values = self.block.values + else: + # No dtype upcasting is done here, it will be performed during + # concatenation itself. + values = self.block.get_values() + + if not self.indexers: + # If there's no indexing to be done, we want to signal outside + # code that this array must be copied explicitly. This is done + # by returning a view and checking `retval.base`. + values = values.view() + + else: + for ax, indexer in self.indexers.items(): + values = algos.take_nd(values, indexer, axis=ax, + fill_value=fill_value) + + return values + + +def concatenate_join_units(join_units, concat_axis, copy): + """ + Concatenate values from several join units along selected axis. + """ + if concat_axis == 0 and len(join_units) > 1: + # Concatenating join units along ax0 is handled in _merge_blocks. + raise AssertionError("Concatenating join units along axis0") + + empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units) + + to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype, + upcasted_na=upcasted_na) + for ju in join_units] + + if len(to_concat) == 1: + # Only one block, nothing to concatenate. + concat_values = to_concat[0] + if copy: + if isinstance(concat_values, np.ndarray): + # non-reindexed (=not yet copied) arrays are made into a view + # in JoinUnit.get_reindexed_values + if concat_values.base is not None: + concat_values = concat_values.copy() + else: + concat_values = concat_values.copy() + else: + concat_values = _concat._concat_compat(to_concat, axis=concat_axis) + + return concat_values + + +def get_empty_dtype_and_na(join_units): + """ + Return dtype and N/A values to use when concatenating specified units. + + Returned N/A value may be None which means there was no casting involved. + + Returns + ------- + dtype + na + """ + + if len(join_units) == 1: + blk = join_units[0].block + if blk is None: + return np.float64, np.nan + + if is_uniform_reindex(join_units): + # XXX: integrate property + empty_dtype = join_units[0].block.dtype + upcasted_na = join_units[0].block.fill_value + return empty_dtype, upcasted_na + + has_none_blocks = False + dtypes = [None] * len(join_units) + for i, unit in enumerate(join_units): + if unit.block is None: + has_none_blocks = True + else: + dtypes[i] = unit.dtype + + upcast_classes = defaultdict(list) + null_upcast_classes = defaultdict(list) + for dtype, unit in zip(dtypes, join_units): + if dtype is None: + continue + + if is_categorical_dtype(dtype): + upcast_cls = 'category' + elif is_datetimetz(dtype): + upcast_cls = 'datetimetz' + elif issubclass(dtype.type, np.bool_): + upcast_cls = 'bool' + elif issubclass(dtype.type, np.object_): + upcast_cls = 'object' + elif is_datetime64_dtype(dtype): + upcast_cls = 'datetime' + elif is_timedelta64_dtype(dtype): + upcast_cls = 'timedelta' + elif is_float_dtype(dtype) or is_numeric_dtype(dtype): + upcast_cls = dtype.name + else: + upcast_cls = 'float' + + # Null blocks should not influence upcast class selection, unless there + # are only null blocks, when same upcasting rules must be applied to + # null upcast classes. + if unit.is_na: + null_upcast_classes[upcast_cls].append(dtype) + else: + upcast_classes[upcast_cls].append(dtype) + + if not upcast_classes: + upcast_classes = null_upcast_classes + + # create the result + if 'object' in upcast_classes: + return np.dtype(np.object_), np.nan + elif 'bool' in upcast_classes: + if has_none_blocks: + return np.dtype(np.object_), np.nan + else: + return np.dtype(np.bool_), None + elif 'category' in upcast_classes: + return np.dtype(np.object_), np.nan + elif 'datetimetz' in upcast_classes: + dtype = upcast_classes['datetimetz'] + return dtype[0], tslibs.iNaT + elif 'datetime' in upcast_classes: + return np.dtype('M8[ns]'), tslibs.iNaT + elif 'timedelta' in upcast_classes: + return np.dtype('m8[ns]'), tslibs.iNaT + else: # pragma + g = np.find_common_type(upcast_classes, []) + if is_float_dtype(g): + return g, g.type(np.nan) + elif is_numeric_dtype(g): + if has_none_blocks: + return np.float64, np.nan + else: + return g, None + + msg = "invalid dtype determination in get_concat_dtype" + raise AssertionError(msg) + + +def is_uniform_join_units(join_units): + """ + Check if the join units consist of blocks of uniform type that can + be concatenated using Block.concat_same_type instead of the generic + concatenate_join_units (which uses `_concat._concat_compat`). + + """ + return ( + # all blocks need to have the same type + all(type(ju.block) is type(join_units[0].block) for ju in join_units) and # noqa + # no blocks that would get missing values (can lead to type upcasts) + # unless we're an extension dtype. + all(not ju.is_na or ju.block.is_extension for ju in join_units) and + # no blocks with indexers (as then the dimensions do not fit) + all(not ju.indexers for ju in join_units) and + # disregard Panels + all(ju.block.ndim <= 2 for ju in join_units) and + # only use this path when there is something to concatenate + len(join_units) > 1) + + +def is_uniform_reindex(join_units): + return ( + # TODO: should this be ju.block._can_hold_na? + all(ju.block and ju.block.is_extension for ju in join_units) and + len(set(ju.block.dtype.name for ju in join_units)) == 1 + ) + + +def trim_join_unit(join_unit, length): + """ + Reduce join_unit's shape along item axis to length. + + Extra items that didn't fit are returned as a separate block. + """ + + if 0 not in join_unit.indexers: + extra_indexers = join_unit.indexers + + if join_unit.block is None: + extra_block = None + else: + extra_block = join_unit.block.getitem_block(slice(length, None)) + join_unit.block = join_unit.block.getitem_block(slice(length)) + else: + extra_block = join_unit.block + + extra_indexers = copy.copy(join_unit.indexers) + extra_indexers[0] = extra_indexers[0][length:] + join_unit.indexers[0] = join_unit.indexers[0][:length] + + extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:] + join_unit.shape = (length,) + join_unit.shape[1:] + + return JoinUnit(block=extra_block, indexers=extra_indexers, + shape=extra_shape) + + +def combine_concat_plans(plans, concat_axis): + """ + Combine multiple concatenation plans into one. + + existing_plan is updated in-place. + """ + if len(plans) == 1: + for p in plans[0]: + yield p[0], [p[1]] + + elif concat_axis == 0: + offset = 0 + for plan in plans: + last_plc = None + + for plc, unit in plan: + yield plc.add(offset), [unit] + last_plc = plc + + if last_plc is not None: + offset += last_plc.as_slice.stop + + else: + num_ended = [0] + + def _next_or_none(seq): + retval = next(seq, None) + if retval is None: + num_ended[0] += 1 + return retval + + plans = list(map(iter, plans)) + next_items = list(map(_next_or_none, plans)) + + while num_ended[0] != len(next_items): + if num_ended[0] > 0: + raise ValueError("Plan shapes are not aligned") + + placements, units = zip(*next_items) + + lengths = list(map(len, placements)) + min_len, max_len = min(lengths), max(lengths) + + if min_len == max_len: + yield placements[0], units + next_items[:] = map(_next_or_none, plans) + else: + yielded_placement = None + yielded_units = [None] * len(next_items) + for i, (plc, unit) in enumerate(next_items): + yielded_units[i] = unit + if len(plc) > min_len: + # trim_join_unit updates unit in place, so only + # placement needs to be sliced to skip min_len. + next_items[i] = (plc[min_len:], + trim_join_unit(unit, min_len)) + else: + yielded_placement = plc + next_items[i] = _next_or_none(plans[i]) + + yield yielded_placement, yielded_units diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py new file mode 100644 index 0000000000000..8ad569003a43a --- /dev/null +++ b/pandas/core/internals/managers.py @@ -0,0 +1,2068 @@ +# -*- coding: utf-8 -*- +from collections import defaultdict +from functools import partial +import itertools +import operator + +import numpy as np + +from pandas._libs import lib, internals as libinternals + +from pandas.util._validators import validate_bool_kwarg +from pandas.compat import range, map, zip + +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + PandasExtensionDtype) +from pandas.core.dtypes.common import ( + _NS_DTYPE, + is_datetimelike_v_numeric, + is_numeric_v_string_like, is_extension_type, + is_extension_array_dtype, + is_scalar) +from pandas.core.dtypes.cast import ( + maybe_promote, + infer_dtype_from_scalar, + find_common_type) +from pandas.core.dtypes.missing import isna +import pandas.core.dtypes.concat as _concat +from pandas.core.dtypes.generic import ABCSeries, ABCExtensionArray + +from pandas.core.base import PandasObject +import pandas.core.algorithms as algos +from pandas.core.sparse.array import _maybe_to_sparse + +from pandas.core.index import Index, MultiIndex, ensure_index +from pandas.core.indexing import maybe_convert_indices + +from pandas.io.formats.printing import pprint_thing + +from .blocks import ( + Block, DatetimeTZBlock, CategoricalBlock, ExtensionBlock, SparseBlock, + _extend_blocks, _merge_blocks, _safe_reshape, + make_block, get_block_type) +from .concat import ( # all for concatenate_block_managers + concatenate_join_units, is_uniform_join_units, + get_mgr_concatenation_plan, combine_concat_plans) + +# TODO: flexible with index=None and/or items=None + + +class BlockManager(PandasObject): + """ + Core internal data structure to implement DataFrame, Series, Panel, etc. + + Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a + lightweight blocked set of labeled data to be manipulated by the DataFrame + public API class + + Attributes + ---------- + shape + ndim + axes + values + items + + Methods + ------- + set_axis(axis, new_labels) + copy(deep=True) + + get_dtype_counts + get_ftype_counts + get_dtypes + get_ftypes + + apply(func, axes, block_filter_fn) + + get_bool_data + get_numeric_data + + get_slice(slice_like, axis) + get(label) + iget(loc) + get_scalar(label_tup) + + take(indexer, axis) + reindex_axis(new_labels, axis) + reindex_indexer(new_labels, indexer, axis) + + delete(label) + insert(loc, label, value) + set(label, value) + + Parameters + ---------- + + + Notes + ----- + This is *not* a public API class + """ + __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', + '_is_consolidated', '_blknos', '_blklocs'] + + def __init__(self, blocks, axes, do_integrity_check=True): + self.axes = [ensure_index(ax) for ax in axes] + self.blocks = tuple(blocks) + + for block in blocks: + if block.is_sparse: + if len(block.mgr_locs) != 1: + raise AssertionError("Sparse block refers to multiple " + "items") + else: + if self.ndim != block.ndim: + raise AssertionError( + 'Number of Block dimensions ({block}) must equal ' + 'number of axes ({self})'.format(block=block.ndim, + self=self.ndim)) + + if do_integrity_check: + self._verify_integrity() + + self._consolidate_check() + + self._rebuild_blknos_and_blklocs() + + def make_empty(self, axes=None): + """ return an empty BlockManager with the items axis of len 0 """ + if axes is None: + axes = [ensure_index([])] + [ensure_index(a) + for a in self.axes[1:]] + + # preserve dtype if possible + if self.ndim == 1: + blocks = np.array([], dtype=self.array_dtype) + else: + blocks = [] + return self.__class__(blocks, axes) + + def __nonzero__(self): + return True + + # Python3 compat + __bool__ = __nonzero__ + + @property + def shape(self): + return tuple(len(ax) for ax in self.axes) + + @property + def ndim(self): + return len(self.axes) + + def set_axis(self, axis, new_labels): + new_labels = ensure_index(new_labels) + old_len = len(self.axes[axis]) + new_len = len(new_labels) + + if new_len != old_len: + raise ValueError( + 'Length mismatch: Expected axis has {old} elements, new ' + 'values have {new} elements'.format(old=old_len, new=new_len)) + + self.axes[axis] = new_labels + + def rename_axis(self, mapper, axis, copy=True, level=None): + """ + Rename one of axes. + + Parameters + ---------- + mapper : unary callable + axis : int + copy : boolean, default True + level : int, default None + + """ + obj = self.copy(deep=copy) + obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) + return obj + + def add_prefix(self, prefix): + f = partial('{prefix}{}'.format, prefix=prefix) + return self.rename_axis(f, axis=0) + + def add_suffix(self, suffix): + f = partial('{}{suffix}'.format, suffix=suffix) + return self.rename_axis(f, axis=0) + + @property + def _is_single_block(self): + if self.ndim == 1: + return True + + if len(self.blocks) != 1: + return False + + blk = self.blocks[0] + return (blk.mgr_locs.is_slice_like and + blk.mgr_locs.as_slice == slice(0, len(self), 1)) + + def _rebuild_blknos_and_blklocs(self): + """ + Update mgr._blknos / mgr._blklocs. + """ + new_blknos = np.empty(self.shape[0], dtype=np.int64) + new_blklocs = np.empty(self.shape[0], dtype=np.int64) + new_blknos.fill(-1) + new_blklocs.fill(-1) + + for blkno, blk in enumerate(self.blocks): + rl = blk.mgr_locs + new_blknos[rl.indexer] = blkno + new_blklocs[rl.indexer] = np.arange(len(rl)) + + if (new_blknos == -1).any(): + raise AssertionError("Gaps in blk ref_locs") + + self._blknos = new_blknos + self._blklocs = new_blklocs + + # make items read only for now + def _get_items(self): + return self.axes[0] + + items = property(fget=_get_items) + + def _get_counts(self, f): + """ return a dict of the counts of the function in BlockManager """ + self._consolidate_inplace() + counts = dict() + for b in self.blocks: + v = f(b) + counts[v] = counts.get(v, 0) + b.shape[0] + return counts + + def get_dtype_counts(self): + return self._get_counts(lambda b: b.dtype.name) + + def get_ftype_counts(self): + return self._get_counts(lambda b: b.ftype) + + def get_dtypes(self): + dtypes = np.array([blk.dtype for blk in self.blocks]) + return algos.take_1d(dtypes, self._blknos, allow_fill=False) + + def get_ftypes(self): + ftypes = np.array([blk.ftype for blk in self.blocks]) + return algos.take_1d(ftypes, self._blknos, allow_fill=False) + + def __getstate__(self): + block_values = [b.values for b in self.blocks] + block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] + axes_array = [ax for ax in self.axes] + + extra_state = { + '0.14.1': { + 'axes': axes_array, + 'blocks': [dict(values=b.values, mgr_locs=b.mgr_locs.indexer) + for b in self.blocks] + } + } + + # First three elements of the state are to maintain forward + # compatibility with 0.13.1. + return axes_array, block_values, block_items, extra_state + + def __setstate__(self, state): + def unpickle_block(values, mgr_locs): + # numpy < 1.7 pickle compat + if values.dtype == 'M8[us]': + values = values.astype('M8[ns]') + return make_block(values, placement=mgr_locs) + + if (isinstance(state, tuple) and len(state) >= 4 and + '0.14.1' in state[3]): + state = state[3]['0.14.1'] + self.axes = [ensure_index(ax) for ax in state['axes']] + self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs']) + for b in state['blocks']) + else: + # discard anything after 3rd, support beta pickling format for a + # little while longer + ax_arrays, bvalues, bitems = state[:3] + + self.axes = [ensure_index(ax) for ax in ax_arrays] + + if len(bitems) == 1 and self.axes[0].equals(bitems[0]): + # This is a workaround for pre-0.14.1 pickles that didn't + # support unpickling multi-block frames/panels with non-unique + # columns/items, because given a manager with items ["a", "b", + # "a"] there's no way of knowing which block's "a" is where. + # + # Single-block case can be supported under the assumption that + # block items corresponded to manager items 1-to-1. + all_mgr_locs = [slice(0, len(bitems[0]))] + else: + all_mgr_locs = [self.axes[0].get_indexer(blk_items) + for blk_items in bitems] + + self.blocks = tuple( + unpickle_block(values, mgr_locs) + for values, mgr_locs in zip(bvalues, all_mgr_locs)) + + self._post_setstate() + + def _post_setstate(self): + self._is_consolidated = False + self._known_consolidated = False + self._rebuild_blknos_and_blklocs() + + def __len__(self): + return len(self.items) + + def __unicode__(self): + output = pprint_thing(self.__class__.__name__) + for i, ax in enumerate(self.axes): + if i == 0: + output += u'\nItems: {ax}'.format(ax=ax) + else: + output += u'\nAxis {i}: {ax}'.format(i=i, ax=ax) + + for block in self.blocks: + output += u'\n{block}'.format(block=pprint_thing(block)) + return output + + def _verify_integrity(self): + mgr_shape = self.shape + tot_items = sum(len(x.mgr_locs) for x in self.blocks) + for block in self.blocks: + if block._verify_integrity and block.shape[1:] != mgr_shape[1:]: + construction_error(tot_items, block.shape[1:], self.axes) + if len(self.items) != tot_items: + raise AssertionError('Number of manager items must equal union of ' + 'block items\n# manager items: {0}, # ' + 'tot_items: {1}'.format( + len(self.items), tot_items)) + + def apply(self, f, axes=None, filter=None, do_integrity_check=False, + consolidate=True, **kwargs): + """ + iterate over the blocks, collect and create a new block manager + + Parameters + ---------- + f : the callable or function name to operate on at the block level + axes : optional (if not supplied, use self.axes) + filter : list, if supplied, only call the block if the filter is in + the block + do_integrity_check : boolean, default False. Do the block manager + integrity check + consolidate: boolean, default True. Join together blocks having same + dtype + + Returns + ------- + Block Manager (new object) + + """ + + result_blocks = [] + + # filter kwarg is used in replace-* family of methods + if filter is not None: + filter_locs = set(self.items.get_indexer_for(filter)) + if len(filter_locs) == len(self.items): + # All items are included, as if there were no filtering + filter = None + else: + kwargs['filter'] = filter_locs + + if consolidate: + self._consolidate_inplace() + + if f == 'where': + align_copy = True + if kwargs.get('align', True): + align_keys = ['other', 'cond'] + else: + align_keys = ['cond'] + elif f == 'putmask': + align_copy = False + if kwargs.get('align', True): + align_keys = ['new', 'mask'] + else: + align_keys = ['mask'] + elif f == 'eval': + align_copy = False + align_keys = ['other'] + elif f == 'fillna': + # fillna internally does putmask, maybe it's better to do this + # at mgr, not block level? + align_copy = False + align_keys = ['value'] + else: + align_keys = [] + + # TODO(EA): may interfere with ExtensionBlock.setitem for blocks + # with a .values attribute. + aligned_args = dict((k, kwargs[k]) + for k in align_keys + if hasattr(kwargs[k], 'values') and + not isinstance(kwargs[k], ABCExtensionArray)) + + for b in self.blocks: + if filter is not None: + if not b.mgr_locs.isin(filter_locs).any(): + result_blocks.append(b) + continue + + if aligned_args: + b_items = self.items[b.mgr_locs.indexer] + + for k, obj in aligned_args.items(): + axis = getattr(obj, '_info_axis_number', 0) + kwargs[k] = obj.reindex(b_items, axis=axis, + copy=align_copy) + + kwargs['mgr'] = self + applied = getattr(b, f)(**kwargs) + result_blocks = _extend_blocks(applied, result_blocks) + + if len(result_blocks) == 0: + return self.make_empty(axes or self.axes) + bm = self.__class__(result_blocks, axes or self.axes, + do_integrity_check=do_integrity_check) + bm._consolidate_inplace() + return bm + + def reduction(self, f, axis=0, consolidate=True, transposed=False, + **kwargs): + """ + iterate over the blocks, collect and create a new block manager. + This routine is intended for reduction type operations and + will do inference on the generated blocks. + + Parameters + ---------- + f: the callable or function name to operate on at the block level + axis: reduction axis, default 0 + consolidate: boolean, default True. Join together blocks having same + dtype + transposed: boolean, default False + we are holding transposed data + + Returns + ------- + Block Manager (new object) + + """ + + if consolidate: + self._consolidate_inplace() + + axes, blocks = [], [] + for b in self.blocks: + kwargs['mgr'] = self + axe, block = getattr(b, f)(axis=axis, **kwargs) + + axes.append(axe) + blocks.append(block) + + # note that some DatetimeTZ, Categorical are always ndim==1 + ndim = {b.ndim for b in blocks} + + if 2 in ndim: + + new_axes = list(self.axes) + + # multiple blocks that are reduced + if len(blocks) > 1: + new_axes[1] = axes[0] + + # reset the placement to the original + for b, sb in zip(blocks, self.blocks): + b.mgr_locs = sb.mgr_locs + + else: + new_axes[axis] = Index(np.concatenate( + [ax.values for ax in axes])) + + if transposed: + new_axes = new_axes[::-1] + blocks = [b.make_block(b.values.T, + placement=np.arange(b.shape[1]) + ) for b in blocks] + + return self.__class__(blocks, new_axes) + + # 0 ndim + if 0 in ndim and 1 not in ndim: + values = np.array([b.values for b in blocks]) + if len(values) == 1: + return values.item() + blocks = [make_block(values, ndim=1)] + axes = Index([ax[0] for ax in axes]) + + # single block + values = _concat._concat_compat([b.values for b in blocks]) + + # compute the orderings of our original data + if len(self.blocks) > 1: + + indexer = np.empty(len(self.axes[0]), dtype=np.intp) + i = 0 + for b in self.blocks: + for j in b.mgr_locs: + indexer[j] = i + i = i + 1 + + values = values.take(indexer) + + return SingleBlockManager( + [make_block(values, + ndim=1, + placement=np.arange(len(values)))], + axes[0]) + + def isna(self, func, **kwargs): + return self.apply('apply', func=func, **kwargs) + + def where(self, **kwargs): + return self.apply('where', **kwargs) + + def eval(self, **kwargs): + return self.apply('eval', **kwargs) + + def quantile(self, **kwargs): + return self.reduction('quantile', **kwargs) + + def setitem(self, **kwargs): + return self.apply('setitem', **kwargs) + + def putmask(self, **kwargs): + return self.apply('putmask', **kwargs) + + def diff(self, **kwargs): + return self.apply('diff', **kwargs) + + def interpolate(self, **kwargs): + return self.apply('interpolate', **kwargs) + + def shift(self, **kwargs): + return self.apply('shift', **kwargs) + + def fillna(self, **kwargs): + return self.apply('fillna', **kwargs) + + def downcast(self, **kwargs): + return self.apply('downcast', **kwargs) + + def astype(self, dtype, **kwargs): + return self.apply('astype', dtype=dtype, **kwargs) + + def convert(self, **kwargs): + return self.apply('convert', **kwargs) + + def replace(self, **kwargs): + return self.apply('replace', **kwargs) + + def replace_list(self, src_list, dest_list, inplace=False, regex=False, + mgr=None): + """ do a list replace """ + + inplace = validate_bool_kwarg(inplace, 'inplace') + + if mgr is None: + mgr = self + + # figure out our mask a-priori to avoid repeated replacements + values = self.as_array() + + def comp(s): + if isna(s): + return isna(values) + return _maybe_compare(values, getattr(s, 'asm8', s), operator.eq) + + masks = [comp(s) for i, s in enumerate(src_list)] + + result_blocks = [] + src_len = len(src_list) - 1 + for blk in self.blocks: + + # its possible to get multiple result blocks here + # replace ALWAYS will return a list + rb = [blk if inplace else blk.copy()] + for i, (s, d) in enumerate(zip(src_list, dest_list)): + new_rb = [] + for b in rb: + if b.dtype == np.object_: + convert = i == src_len + result = b.replace(s, d, inplace=inplace, regex=regex, + mgr=mgr, convert=convert) + new_rb = _extend_blocks(result, new_rb) + else: + # get our mask for this element, sized to this + # particular block + m = masks[i][b.mgr_locs.indexer] + if m.any(): + b = b.coerce_to_target_dtype(d) + new_rb.extend(b.putmask(m, d, inplace=True)) + else: + new_rb.append(b) + rb = new_rb + result_blocks.extend(rb) + + bm = self.__class__(result_blocks, self.axes) + bm._consolidate_inplace() + return bm + + def reshape_nd(self, axes, **kwargs): + """ a 2d-nd reshape operation on a BlockManager """ + return self.apply('reshape_nd', axes=axes, **kwargs) + + def is_consolidated(self): + """ + Return True if more than one block with the same dtype + """ + if not self._known_consolidated: + self._consolidate_check() + return self._is_consolidated + + def _consolidate_check(self): + ftypes = [blk.ftype for blk in self.blocks] + self._is_consolidated = len(ftypes) == len(set(ftypes)) + self._known_consolidated = True + + @property + def is_mixed_type(self): + # Warning, consolidation needs to get checked upstairs + self._consolidate_inplace() + return len(self.blocks) > 1 + + @property + def is_numeric_mixed_type(self): + # Warning, consolidation needs to get checked upstairs + self._consolidate_inplace() + return all(block.is_numeric for block in self.blocks) + + @property + def is_datelike_mixed_type(self): + # Warning, consolidation needs to get checked upstairs + self._consolidate_inplace() + return any(block.is_datelike for block in self.blocks) + + @property + def any_extension_types(self): + """Whether any of the blocks in this manager are extension blocks""" + return any(block.is_extension for block in self.blocks) + + @property + def is_view(self): + """ return a boolean if we are a single block and are a view """ + if len(self.blocks) == 1: + return self.blocks[0].is_view + + # It is technically possible to figure out which blocks are views + # e.g. [ b.values.base is not None for b in self.blocks ] + # but then we have the case of possibly some blocks being a view + # and some blocks not. setting in theory is possible on the non-view + # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit + # complicated + + return False + + def get_bool_data(self, copy=False): + """ + Parameters + ---------- + copy : boolean, default False + Whether to copy the blocks + """ + self._consolidate_inplace() + return self.combine([b for b in self.blocks if b.is_bool], copy) + + def get_numeric_data(self, copy=False): + """ + Parameters + ---------- + copy : boolean, default False + Whether to copy the blocks + """ + self._consolidate_inplace() + return self.combine([b for b in self.blocks if b.is_numeric], copy) + + def combine(self, blocks, copy=True): + """ return a new manager with the blocks """ + if len(blocks) == 0: + return self.make_empty() + + # FIXME: optimization potential + indexer = np.sort(np.concatenate([b.mgr_locs.as_array + for b in blocks])) + inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) + + new_blocks = [] + for b in blocks: + b = b.copy(deep=copy) + b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array, + axis=0, allow_fill=False) + new_blocks.append(b) + + axes = list(self.axes) + axes[0] = self.items.take(indexer) + + return self.__class__(new_blocks, axes, do_integrity_check=False) + + def get_slice(self, slobj, axis=0): + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + if axis == 0: + new_blocks = self._slice_take_blocks_ax0(slobj) + else: + slicer = [slice(None)] * (axis + 1) + slicer[axis] = slobj + slicer = tuple(slicer) + new_blocks = [blk.getitem_block(slicer) for blk in self.blocks] + + new_axes = list(self.axes) + new_axes[axis] = new_axes[axis][slobj] + + bm = self.__class__(new_blocks, new_axes, do_integrity_check=False) + bm._consolidate_inplace() + return bm + + def __contains__(self, item): + return item in self.items + + @property + def nblocks(self): + return len(self.blocks) + + def copy(self, deep=True, mgr=None): + """ + Make deep or shallow copy of BlockManager + + Parameters + ---------- + deep : boolean o rstring, default True + If False, return shallow copy (do not copy data) + If 'all', copy data and a deep copy of the index + + Returns + ------- + copy : BlockManager + """ + + # this preserves the notion of view copying of axes + if deep: + if deep == 'all': + copy = lambda ax: ax.copy(deep=True) + else: + copy = lambda ax: ax.view() + new_axes = [copy(ax) for ax in self.axes] + else: + new_axes = list(self.axes) + return self.apply('copy', axes=new_axes, deep=deep, + do_integrity_check=False) + + def as_array(self, transpose=False, items=None): + """Convert the blockmanager data into an numpy array. + + Parameters + ---------- + transpose : boolean, default False + If True, transpose the return array + items : list of strings or None + Names of block items that will be included in the returned + array. ``None`` means that all block items will be used + + Returns + ------- + arr : ndarray + """ + if len(self.blocks) == 0: + arr = np.empty(self.shape, dtype=float) + return arr.transpose() if transpose else arr + + if items is not None: + mgr = self.reindex_axis(items, axis=0) + else: + mgr = self + + if self._is_single_block or not self.is_mixed_type: + arr = mgr.blocks[0].get_values() + else: + arr = mgr._interleave() + + return arr.transpose() if transpose else arr + + def _interleave(self): + """ + Return ndarray from blocks with specified item order + Items must be contained in the blocks + """ + dtype = _interleaved_dtype(self.blocks) + + result = np.empty(self.shape, dtype=dtype) + + if result.shape[0] == 0: + # Workaround for numpy 1.7 bug: + # + # >>> a = np.empty((0,10)) + # >>> a[slice(0,0)] + # array([], shape=(0, 10), dtype=float64) + # >>> a[[]] + # Traceback (most recent call last): + # File "<stdin>", line 1, in <module> + # IndexError: index 0 is out of bounds for axis 0 with size 0 + return result + + itemmask = np.zeros(self.shape[0]) + + for blk in self.blocks: + rl = blk.mgr_locs + result[rl.indexer] = blk.get_values(dtype) + itemmask[rl.indexer] = 1 + + if not itemmask.all(): + raise AssertionError('Some items were not contained in blocks') + + return result + + def to_dict(self, copy=True): + """ + Return a dict of str(dtype) -> BlockManager + + Parameters + ---------- + copy : boolean, default True + + Returns + ------- + values : a dict of dtype -> BlockManager + + Notes + ----- + This consolidates based on str(dtype) + """ + self._consolidate_inplace() + + bd = {} + for b in self.blocks: + bd.setdefault(str(b.dtype), []).append(b) + + return {dtype: self.combine(blocks, copy=copy) + for dtype, blocks in bd.items()} + + def xs(self, key, axis=1, copy=True, takeable=False): + if axis < 1: + raise AssertionError( + 'Can only take xs across axis >= 1, got {ax}'.format(ax=axis)) + + # take by position + if takeable: + loc = key + else: + loc = self.axes[axis].get_loc(key) + + slicer = [slice(None, None) for _ in range(self.ndim)] + slicer[axis] = loc + slicer = tuple(slicer) + + new_axes = list(self.axes) + + # could be an array indexer! + if isinstance(loc, (slice, np.ndarray)): + new_axes[axis] = new_axes[axis][loc] + else: + new_axes.pop(axis) + + new_blocks = [] + if len(self.blocks) > 1: + # we must copy here as we are mixed type + for blk in self.blocks: + newb = make_block(values=blk.values[slicer], + klass=blk.__class__, + placement=blk.mgr_locs) + new_blocks.append(newb) + elif len(self.blocks) == 1: + block = self.blocks[0] + vals = block.values[slicer] + if copy: + vals = vals.copy() + new_blocks = [make_block(values=vals, + placement=block.mgr_locs, + klass=block.__class__)] + + return self.__class__(new_blocks, new_axes) + + def fast_xs(self, loc): + """ + get a cross sectional for a given location in the + items ; handle dups + + return the result, is *could* be a view in the case of a + single block + """ + if len(self.blocks) == 1: + return self.blocks[0].iget((slice(None), loc)) + + items = self.items + + # non-unique (GH4726) + if not items.is_unique: + result = self._interleave() + if self.ndim == 2: + result = result.T + return result[loc] + + # unique + dtype = _interleaved_dtype(self.blocks) + n = len(items) + result = np.empty(n, dtype=dtype) + for blk in self.blocks: + # Such assignment may incorrectly coerce NaT to None + # result[blk.mgr_locs] = blk._slice((slice(None), loc)) + for i, rl in enumerate(blk.mgr_locs): + result[rl] = blk._try_coerce_result(blk.iget((i, loc))) + + return result + + def consolidate(self): + """ + Join together blocks having same dtype + + Returns + ------- + y : BlockManager + """ + if self.is_consolidated(): + return self + + bm = self.__class__(self.blocks, self.axes) + bm._is_consolidated = False + bm._consolidate_inplace() + return bm + + def _consolidate_inplace(self): + if not self.is_consolidated(): + self.blocks = tuple(_consolidate(self.blocks)) + self._is_consolidated = True + self._known_consolidated = True + self._rebuild_blknos_and_blklocs() + + def get(self, item, fastpath=True): + """ + Return values for selected item (ndarray or BlockManager). + """ + if self.items.is_unique: + + if not isna(item): + loc = self.items.get_loc(item) + else: + indexer = np.arange(len(self.items))[isna(self.items)] + + # allow a single nan location indexer + if not is_scalar(indexer): + if len(indexer) == 1: + loc = indexer.item() + else: + raise ValueError("cannot label index with a null key") + + return self.iget(loc, fastpath=fastpath) + else: + + if isna(item): + raise TypeError("cannot label index with a null key") + + indexer = self.items.get_indexer_for([item]) + return self.reindex_indexer(new_axis=self.items[indexer], + indexer=indexer, axis=0, + allow_dups=True) + + def iget(self, i, fastpath=True): + """ + Return the data as a SingleBlockManager if fastpath=True and possible + + Otherwise return as a ndarray + """ + block = self.blocks[self._blknos[i]] + values = block.iget(self._blklocs[i]) + if not fastpath or not block._box_to_block_values or values.ndim != 1: + return values + + # fastpath shortcut for select a single-dim from a 2-dim BM + return SingleBlockManager( + [block.make_block_same_class(values, + placement=slice(0, len(values)), + ndim=1)], + self.axes[1]) + + def get_scalar(self, tup): + """ + Retrieve single item + """ + full_loc = [ax.get_loc(x) for ax, x in zip(self.axes, tup)] + blk = self.blocks[self._blknos[full_loc[0]]] + values = blk.values + + # FIXME: this may return non-upcasted types? + if values.ndim == 1: + return values[full_loc[1]] + + full_loc[0] = self._blklocs[full_loc[0]] + return values[tuple(full_loc)] + + def delete(self, item): + """ + Delete selected item (items if non-unique) in-place. + """ + indexer = self.items.get_loc(item) + + is_deleted = np.zeros(self.shape[0], dtype=np.bool_) + is_deleted[indexer] = True + ref_loc_offset = -is_deleted.cumsum() + + is_blk_deleted = [False] * len(self.blocks) + + if isinstance(indexer, int): + affected_start = indexer + else: + affected_start = is_deleted.nonzero()[0][0] + + for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]): + blk = self.blocks[blkno] + bml = blk.mgr_locs + blk_del = is_deleted[bml.indexer].nonzero()[0] + + if len(blk_del) == len(bml): + is_blk_deleted[blkno] = True + continue + elif len(blk_del) != 0: + blk.delete(blk_del) + bml = blk.mgr_locs + + blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer]) + + # FIXME: use Index.delete as soon as it uses fastpath=True + self.axes[0] = self.items[~is_deleted] + self.blocks = tuple(b for blkno, b in enumerate(self.blocks) + if not is_blk_deleted[blkno]) + self._shape = None + self._rebuild_blknos_and_blklocs() + + def set(self, item, value, check=False): + """ + Set new item in-place. Does not consolidate. Adds new Block if not + contained in the current set of items + if check, then validate that we are not setting the same data in-place + """ + # FIXME: refactor, clearly separate broadcasting & zip-like assignment + # can prob also fix the various if tests for sparse/categorical + + # TODO(EA): Remove an is_extension_ when all extension types satisfy + # the interface + value_is_extension_type = (is_extension_type(value) or + is_extension_array_dtype(value)) + + # categorical/spares/datetimetz + if value_is_extension_type: + + def value_getitem(placement): + return value + else: + if value.ndim == self.ndim - 1: + value = _safe_reshape(value, (1,) + value.shape) + + def value_getitem(placement): + return value + else: + + def value_getitem(placement): + return value[placement.indexer] + + if value.shape[1:] != self.shape[1:]: + raise AssertionError('Shape of new values must be compatible ' + 'with manager shape') + + try: + loc = self.items.get_loc(item) + except KeyError: + # This item wasn't present, just insert at end + self.insert(len(self.items), item, value) + return + + if isinstance(loc, int): + loc = [loc] + + blknos = self._blknos[loc] + blklocs = self._blklocs[loc].copy() + + unfit_mgr_locs = [] + unfit_val_locs = [] + removed_blknos = [] + for blkno, val_locs in libinternals.get_blkno_placements(blknos, + self.nblocks, + group=True): + blk = self.blocks[blkno] + blk_locs = blklocs[val_locs.indexer] + if blk.should_store(value): + blk.set(blk_locs, value_getitem(val_locs), check=check) + else: + unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) + unfit_val_locs.append(val_locs) + + # If all block items are unfit, schedule the block for removal. + if len(val_locs) == len(blk.mgr_locs): + removed_blknos.append(blkno) + else: + self._blklocs[blk.mgr_locs.indexer] = -1 + blk.delete(blk_locs) + self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk)) + + if len(removed_blknos): + # Remove blocks & update blknos accordingly + is_deleted = np.zeros(self.nblocks, dtype=np.bool_) + is_deleted[removed_blknos] = True + + new_blknos = np.empty(self.nblocks, dtype=np.int64) + new_blknos.fill(-1) + new_blknos[~is_deleted] = np.arange(self.nblocks - + len(removed_blknos)) + self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0, + allow_fill=False) + self.blocks = tuple(blk for i, blk in enumerate(self.blocks) + if i not in set(removed_blknos)) + + if unfit_val_locs: + unfit_mgr_locs = np.concatenate(unfit_mgr_locs) + unfit_count = len(unfit_mgr_locs) + + new_blocks = [] + if value_is_extension_type: + # This code (ab-)uses the fact that sparse blocks contain only + # one item. + new_blocks.extend( + make_block(values=value.copy(), ndim=self.ndim, + placement=slice(mgr_loc, mgr_loc + 1)) + for mgr_loc in unfit_mgr_locs) + + self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) + + len(self.blocks)) + self._blklocs[unfit_mgr_locs] = 0 + + else: + # unfit_val_locs contains BlockPlacement objects + unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) + + new_blocks.append( + make_block(values=value_getitem(unfit_val_items), + ndim=self.ndim, placement=unfit_mgr_locs)) + + self._blknos[unfit_mgr_locs] = len(self.blocks) + self._blklocs[unfit_mgr_locs] = np.arange(unfit_count) + + self.blocks += tuple(new_blocks) + + # Newly created block's dtype may already be present. + self._known_consolidated = False + + def insert(self, loc, item, value, allow_duplicates=False): + """ + Insert item at selected position. + + Parameters + ---------- + loc : int + item : hashable + value : array_like + allow_duplicates: bool + If False, trying to insert non-unique item will raise + + """ + if not allow_duplicates and item in self.items: + # Should this be a different kind of error?? + raise ValueError('cannot insert {}, already exists'.format(item)) + + if not isinstance(loc, int): + raise TypeError("loc must be int") + + # insert to the axis; this could possibly raise a TypeError + new_axis = self.items.insert(loc, item) + + block = make_block(values=value, ndim=self.ndim, + placement=slice(loc, loc + 1)) + + for blkno, count in _fast_count_smallints(self._blknos[loc:]): + blk = self.blocks[blkno] + if count == len(blk.mgr_locs): + blk.mgr_locs = blk.mgr_locs.add(1) + else: + new_mgr_locs = blk.mgr_locs.as_array.copy() + new_mgr_locs[new_mgr_locs >= loc] += 1 + blk.mgr_locs = new_mgr_locs + + if loc == self._blklocs.shape[0]: + # np.append is a lot faster (at least in numpy 1.7.1), let's use it + # if we can. + self._blklocs = np.append(self._blklocs, 0) + self._blknos = np.append(self._blknos, len(self.blocks)) + else: + self._blklocs = np.insert(self._blklocs, loc, 0) + self._blknos = np.insert(self._blknos, loc, len(self.blocks)) + + self.axes[0] = new_axis + self.blocks += (block,) + self._shape = None + + self._known_consolidated = False + + if len(self.blocks) > 100: + self._consolidate_inplace() + + def reindex_axis(self, new_index, axis, method=None, limit=None, + fill_value=None, copy=True): + """ + Conform block manager to new index. + """ + new_index = ensure_index(new_index) + new_index, indexer = self.axes[axis].reindex(new_index, method=method, + limit=limit) + + return self.reindex_indexer(new_index, indexer, axis=axis, + fill_value=fill_value, copy=copy) + + def reindex_indexer(self, new_axis, indexer, axis, fill_value=None, + allow_dups=False, copy=True): + """ + Parameters + ---------- + new_axis : Index + indexer : ndarray of int64 or None + axis : int + fill_value : object + allow_dups : bool + + pandas-indexer with -1's only. + """ + if indexer is None: + if new_axis is self.axes[axis] and not copy: + return self + + result = self.copy(deep=copy) + result.axes = list(self.axes) + result.axes[axis] = new_axis + return result + + self._consolidate_inplace() + + # some axes don't allow reindexing with dups + if not allow_dups: + self.axes[axis]._can_reindex(indexer) + + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + if axis == 0: + new_blocks = self._slice_take_blocks_ax0(indexer, + fill_tuple=(fill_value,)) + else: + new_blocks = [blk.take_nd(indexer, axis=axis, fill_tuple=( + fill_value if fill_value is not None else blk.fill_value,)) + for blk in self.blocks] + + new_axes = list(self.axes) + new_axes[axis] = new_axis + return self.__class__(new_blocks, new_axes) + + def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): + """ + Slice/take blocks along axis=0. + + Overloaded for SingleBlock + + Returns + ------- + new_blocks : list of Block + + """ + + allow_fill = fill_tuple is not None + + sl_type, slobj, sllen = _preprocess_slice_or_indexer( + slice_or_indexer, self.shape[0], allow_fill=allow_fill) + + if self._is_single_block: + blk = self.blocks[0] + + if sl_type in ('slice', 'mask'): + return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))] + elif not allow_fill or self.ndim == 1: + if allow_fill and fill_tuple[0] is None: + _, fill_value = maybe_promote(blk.dtype) + fill_tuple = (fill_value, ) + + return [blk.take_nd(slobj, axis=0, + new_mgr_locs=slice(0, sllen), + fill_tuple=fill_tuple)] + + if sl_type in ('slice', 'mask'): + blknos = self._blknos[slobj] + blklocs = self._blklocs[slobj] + else: + blknos = algos.take_1d(self._blknos, slobj, fill_value=-1, + allow_fill=allow_fill) + blklocs = algos.take_1d(self._blklocs, slobj, fill_value=-1, + allow_fill=allow_fill) + + # When filling blknos, make sure blknos is updated before appending to + # blocks list, that way new blkno is exactly len(blocks). + # + # FIXME: mgr_groupby_blknos must return mgr_locs in ascending order, + # pytables serialization will break otherwise. + blocks = [] + for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, + self.nblocks, + group=True): + if blkno == -1: + # If we've got here, fill_tuple was not None. + fill_value = fill_tuple[0] + + blocks.append(self._make_na_block(placement=mgr_locs, + fill_value=fill_value)) + else: + blk = self.blocks[blkno] + + # Otherwise, slicing along items axis is necessary. + if not blk._can_consolidate: + # A non-consolidatable block, it's easy, because there's + # only one item and each mgr loc is a copy of that single + # item. + for mgr_loc in mgr_locs: + newblk = blk.copy(deep=True) + newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1) + blocks.append(newblk) + + else: + blocks.append(blk.take_nd(blklocs[mgr_locs.indexer], + axis=0, new_mgr_locs=mgr_locs, + fill_tuple=None)) + + return blocks + + def _make_na_block(self, placement, fill_value=None): + # TODO: infer dtypes other than float64 from fill_value + + if fill_value is None: + fill_value = np.nan + block_shape = list(self.shape) + block_shape[0] = len(placement) + + dtype, fill_value = infer_dtype_from_scalar(fill_value) + block_values = np.empty(block_shape, dtype=dtype) + block_values.fill(fill_value) + return make_block(block_values, placement=placement) + + def take(self, indexer, axis=1, verify=True, convert=True): + """ + Take items along any axis. + """ + self._consolidate_inplace() + indexer = (np.arange(indexer.start, indexer.stop, indexer.step, + dtype='int64') + if isinstance(indexer, slice) + else np.asanyarray(indexer, dtype='int64')) + + n = self.shape[axis] + if convert: + indexer = maybe_convert_indices(indexer, n) + + if verify: + if ((indexer == -1) | (indexer >= n)).any(): + raise Exception('Indices must be nonzero and less than ' + 'the axis length') + + new_labels = self.axes[axis].take(indexer) + return self.reindex_indexer(new_axis=new_labels, indexer=indexer, + axis=axis, allow_dups=True) + + def merge(self, other, lsuffix='', rsuffix=''): + if not self._is_indexed_like(other): + raise AssertionError('Must have same axes to merge managers') + + l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix, + right=other.items, rsuffix=rsuffix) + new_items = _concat_indexes([l, r]) + + new_blocks = [blk.copy(deep=False) for blk in self.blocks] + + offset = self.shape[0] + for blk in other.blocks: + blk = blk.copy(deep=False) + blk.mgr_locs = blk.mgr_locs.add(offset) + new_blocks.append(blk) + + new_axes = list(self.axes) + new_axes[0] = new_items + + return self.__class__(_consolidate(new_blocks), new_axes) + + def _is_indexed_like(self, other): + """ + Check all axes except items + """ + if self.ndim != other.ndim: + raise AssertionError( + 'Number of dimensions must agree got {ndim} and ' + '{oth_ndim}'.format(ndim=self.ndim, oth_ndim=other.ndim)) + for ax, oax in zip(self.axes[1:], other.axes[1:]): + if not ax.equals(oax): + return False + return True + + def equals(self, other): + self_axes, other_axes = self.axes, other.axes + if len(self_axes) != len(other_axes): + return False + if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): + return False + self._consolidate_inplace() + other._consolidate_inplace() + if len(self.blocks) != len(other.blocks): + return False + + # canonicalize block order, using a tuple combining the type + # name and then mgr_locs because there might be unconsolidated + # blocks (say, Categorical) which can only be distinguished by + # the iteration order + def canonicalize(block): + return (block.dtype.name, block.mgr_locs.as_array.tolist()) + + self_blocks = sorted(self.blocks, key=canonicalize) + other_blocks = sorted(other.blocks, key=canonicalize) + return all(block.equals(oblock) + for block, oblock in zip(self_blocks, other_blocks)) + + def unstack(self, unstacker_func): + """Return a blockmanager with all blocks unstacked. + + Parameters + ---------- + unstacker_func : callable + A (partially-applied) ``pd.core.reshape._Unstacker`` class. + + Returns + ------- + unstacked : BlockManager + """ + dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items) + new_columns = dummy.get_new_columns() + new_index = dummy.get_new_index() + new_blocks = [] + columns_mask = [] + + for blk in self.blocks: + blocks, mask = blk._unstack( + partial(unstacker_func, + value_columns=self.items[blk.mgr_locs.indexer]), + new_columns) + + new_blocks.extend(blocks) + columns_mask.extend(mask) + + new_columns = new_columns[columns_mask] + + bm = BlockManager(new_blocks, [new_columns, new_index]) + return bm + + +class SingleBlockManager(BlockManager): + """ manage a single block with """ + + ndim = 1 + _is_consolidated = True + _known_consolidated = True + __slots__ = () + + def __init__(self, block, axis, do_integrity_check=False, fastpath=False): + + if isinstance(axis, list): + if len(axis) != 1: + raise ValueError("cannot create SingleBlockManager with more " + "than 1 axis") + axis = axis[0] + + # passed from constructor, single block, single axis + if fastpath: + self.axes = [axis] + if isinstance(block, list): + + # empty block + if len(block) == 0: + block = [np.array([])] + elif len(block) != 1: + raise ValueError('Cannot create SingleBlockManager with ' + 'more than 1 block') + block = block[0] + else: + self.axes = [ensure_index(axis)] + + # create the block here + if isinstance(block, list): + + # provide consolidation to the interleaved_dtype + if len(block) > 1: + dtype = _interleaved_dtype(block) + block = [b.astype(dtype) for b in block] + block = _consolidate(block) + + if len(block) != 1: + raise ValueError('Cannot create SingleBlockManager with ' + 'more than 1 block') + block = block[0] + + if not isinstance(block, Block): + block = make_block(block, placement=slice(0, len(axis)), ndim=1) + + self.blocks = [block] + + def _post_setstate(self): + pass + + @property + def _block(self): + return self.blocks[0] + + @property + def _values(self): + return self._block.values + + @property + def _blknos(self): + """ compat with BlockManager """ + return None + + @property + def _blklocs(self): + """ compat with BlockManager """ + return None + + def get_slice(self, slobj, axis=0): + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + return self.__class__(self._block._slice(slobj), + self.index[slobj], fastpath=True) + + @property + def index(self): + return self.axes[0] + + def convert(self, **kwargs): + """ convert the whole block as one """ + kwargs['by_item'] = False + return self.apply('convert', **kwargs) + + @property + def dtype(self): + return self._block.dtype + + @property + def array_dtype(self): + return self._block.array_dtype + + @property + def ftype(self): + return self._block.ftype + + def get_dtype_counts(self): + return {self.dtype.name: 1} + + def get_ftype_counts(self): + return {self.ftype: 1} + + def get_dtypes(self): + return np.array([self._block.dtype]) + + def get_ftypes(self): + return np.array([self._block.ftype]) + + def external_values(self): + return self._block.external_values() + + def internal_values(self): + return self._block.internal_values() + + def formatting_values(self): + """Return the internal values used by the DataFrame/SeriesFormatter""" + return self._block.formatting_values() + + def get_values(self): + """ return a dense type view """ + return np.array(self._block.to_dense(), copy=False) + + @property + def asobject(self): + """ + return a object dtype array. datetime/timedelta like values are boxed + to Timestamp/Timedelta instances. + """ + return self._block.get_values(dtype=object) + + @property + def _can_hold_na(self): + return self._block._can_hold_na + + def is_consolidated(self): + return True + + def _consolidate_check(self): + pass + + def _consolidate_inplace(self): + pass + + def delete(self, item): + """ + Delete single item from SingleBlockManager. + + Ensures that self.blocks doesn't become empty. + """ + loc = self.items.get_loc(item) + self._block.delete(loc) + self.axes[0] = self.axes[0].delete(loc) + + def fast_xs(self, loc): + """ + fast path for getting a cross-section + return a view of the data + """ + return self._block.values[loc] + + def concat(self, to_concat, new_axis): + """ + Concatenate a list of SingleBlockManagers into a single + SingleBlockManager. + + Used for pd.concat of Series objects with axis=0. + + Parameters + ---------- + to_concat : list of SingleBlockManagers + new_axis : Index of the result + + Returns + ------- + SingleBlockManager + + """ + non_empties = [x for x in to_concat if len(x) > 0] + + # check if all series are of the same block type: + if len(non_empties) > 0: + blocks = [obj.blocks[0] for obj in non_empties] + + if all(type(b) is type(blocks[0]) for b in blocks[1:]): # noqa + new_block = blocks[0].concat_same_type(blocks) + else: + values = [x.values for x in blocks] + values = _concat._concat_compat(values) + new_block = make_block( + values, placement=slice(0, len(values), 1)) + else: + values = [x._block.values for x in to_concat] + values = _concat._concat_compat(values) + new_block = make_block( + values, placement=slice(0, len(values), 1)) + + mgr = SingleBlockManager(new_block, new_axis) + return mgr + + +# -------------------------------------------------------------------- +# Constructor Helpers + +def create_block_manager_from_blocks(blocks, axes): + try: + if len(blocks) == 1 and not isinstance(blocks[0], Block): + # if blocks[0] is of length 0, return empty blocks + if not len(blocks[0]): + blocks = [] + else: + # It's OK if a single block is passed as values, its placement + # is basically "all items", but if there're many, don't bother + # converting, it's an error anyway. + blocks = [make_block(values=blocks[0], + placement=slice(0, len(axes[0])))] + + mgr = BlockManager(blocks, axes) + mgr._consolidate_inplace() + return mgr + + except (ValueError) as e: + blocks = [getattr(b, 'values', b) for b in blocks] + tot_items = sum(b.shape[0] for b in blocks) + construction_error(tot_items, blocks[0].shape[1:], axes, e) + + +def create_block_manager_from_arrays(arrays, names, axes): + + try: + blocks = form_blocks(arrays, names, axes) + mgr = BlockManager(blocks, axes) + mgr._consolidate_inplace() + return mgr + except ValueError as e: + construction_error(len(arrays), arrays[0].shape, axes, e) + + +def construction_error(tot_items, block_shape, axes, e=None): + """ raise a helpful message about our construction """ + passed = tuple(map(int, [tot_items] + list(block_shape))) + implied = tuple(map(int, [len(ax) for ax in axes])) + if passed == implied and e is not None: + raise e + if block_shape[0] == 0: + raise ValueError("Empty data passed with indices specified.") + raise ValueError("Shape of passed values is {0}, indices imply {1}".format( + passed, implied)) + + +# ----------------------------------------------------------------------- + +def form_blocks(arrays, names, axes): + # put "leftover" items in float bucket, where else? + # generalize? + items_dict = defaultdict(list) + extra_locs = [] + + names_idx = ensure_index(names) + if names_idx.equals(axes[0]): + names_indexer = np.arange(len(names_idx)) + else: + assert names_idx.intersection(axes[0]).is_unique + names_indexer = names_idx.get_indexer_for(axes[0]) + + for i, name_idx in enumerate(names_indexer): + if name_idx == -1: + extra_locs.append(i) + continue + + k = names[name_idx] + v = arrays[name_idx] + + block_type = get_block_type(v) + items_dict[block_type.__name__].append((i, k, v)) + + blocks = [] + if len(items_dict['FloatBlock']): + float_blocks = _multi_blockify(items_dict['FloatBlock']) + blocks.extend(float_blocks) + + if len(items_dict['ComplexBlock']): + complex_blocks = _multi_blockify(items_dict['ComplexBlock']) + blocks.extend(complex_blocks) + + if len(items_dict['TimeDeltaBlock']): + timedelta_blocks = _multi_blockify(items_dict['TimeDeltaBlock']) + blocks.extend(timedelta_blocks) + + if len(items_dict['IntBlock']): + int_blocks = _multi_blockify(items_dict['IntBlock']) + blocks.extend(int_blocks) + + if len(items_dict['DatetimeBlock']): + datetime_blocks = _simple_blockify(items_dict['DatetimeBlock'], + _NS_DTYPE) + blocks.extend(datetime_blocks) + + if len(items_dict['DatetimeTZBlock']): + dttz_blocks = [make_block(array, + klass=DatetimeTZBlock, + placement=[i]) + for i, _, array in items_dict['DatetimeTZBlock']] + blocks.extend(dttz_blocks) + + if len(items_dict['BoolBlock']): + bool_blocks = _simple_blockify(items_dict['BoolBlock'], np.bool_) + blocks.extend(bool_blocks) + + if len(items_dict['ObjectBlock']) > 0: + object_blocks = _simple_blockify(items_dict['ObjectBlock'], np.object_) + blocks.extend(object_blocks) + + if len(items_dict['SparseBlock']) > 0: + sparse_blocks = _sparse_blockify(items_dict['SparseBlock']) + blocks.extend(sparse_blocks) + + if len(items_dict['CategoricalBlock']) > 0: + cat_blocks = [make_block(array, klass=CategoricalBlock, placement=[i]) + for i, _, array in items_dict['CategoricalBlock']] + blocks.extend(cat_blocks) + + if len(items_dict['ExtensionBlock']): + + external_blocks = [ + make_block(array, klass=ExtensionBlock, placement=[i]) + for i, _, array in items_dict['ExtensionBlock'] + ] + + blocks.extend(external_blocks) + + if len(extra_locs): + shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) + + # empty items -> dtype object + block_values = np.empty(shape, dtype=object) + block_values.fill(np.nan) + + na_block = make_block(block_values, placement=extra_locs) + blocks.append(na_block) + + return blocks + + +def _simple_blockify(tuples, dtype): + """ return a single array of a block that has a single dtype; if dtype is + not None, coerce to this dtype + """ + values, placement = _stack_arrays(tuples, dtype) + + # CHECK DTYPE? + if dtype is not None and values.dtype != dtype: # pragma: no cover + values = values.astype(dtype) + + block = make_block(values, placement=placement) + return [block] + + +def _multi_blockify(tuples, dtype=None): + """ return an array of blocks that potentially have different dtypes """ + + # group by dtype + grouper = itertools.groupby(tuples, lambda x: x[2].dtype) + + new_blocks = [] + for dtype, tup_block in grouper: + + values, placement = _stack_arrays(list(tup_block), dtype) + + block = make_block(values, placement=placement) + new_blocks.append(block) + + return new_blocks + + +def _sparse_blockify(tuples, dtype=None): + """ return an array of blocks that potentially have different dtypes (and + are sparse) + """ + + new_blocks = [] + for i, names, array in tuples: + array = _maybe_to_sparse(array) + block = make_block(array, klass=SparseBlock, placement=[i]) + new_blocks.append(block) + + return new_blocks + + +def _stack_arrays(tuples, dtype): + + # fml + def _asarray_compat(x): + if isinstance(x, ABCSeries): + return x._values + else: + return np.asarray(x) + + def _shape_compat(x): + if isinstance(x, ABCSeries): + return len(x), + else: + return x.shape + + placement, names, arrays = zip(*tuples) + + first = arrays[0] + shape = (len(arrays),) + _shape_compat(first) + + stacked = np.empty(shape, dtype=dtype) + for i, arr in enumerate(arrays): + stacked[i] = _asarray_compat(arr) + + return stacked, placement + + +def _interleaved_dtype(blocks): + if not len(blocks): + return None + + dtype = find_common_type([b.dtype for b in blocks]) + + # only numpy compat + if isinstance(dtype, (PandasExtensionDtype, ExtensionDtype)): + dtype = np.object + + return dtype + + +def _consolidate(blocks): + """ + Merge blocks having same dtype, exclude non-consolidating blocks + """ + + # sort by _can_consolidate, dtype + gkey = lambda x: x._consolidate_key + grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) + + new_blocks = [] + for (_can_consolidate, dtype), group_blocks in grouper: + merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype, + _can_consolidate=_can_consolidate) + new_blocks = _extend_blocks(merged_blocks, new_blocks) + return new_blocks + + +def _maybe_compare(a, b, op): + + is_a_array = isinstance(a, np.ndarray) + is_b_array = isinstance(b, np.ndarray) + + # numpy deprecation warning to have i8 vs integer comparisons + if is_datetimelike_v_numeric(a, b): + result = False + + # numpy deprecation warning if comparing numeric vs string-like + elif is_numeric_v_string_like(a, b): + result = False + + else: + result = op(a, b) + + if is_scalar(result) and (is_a_array or is_b_array): + type_names = [type(a).__name__, type(b).__name__] + + if is_a_array: + type_names[0] = 'ndarray(dtype={dtype})'.format(dtype=a.dtype) + + if is_b_array: + type_names[1] = 'ndarray(dtype={dtype})'.format(dtype=b.dtype) + + raise TypeError( + "Cannot compare types {a!r} and {b!r}".format(a=type_names[0], + b=type_names[1])) + return result + + +def _concat_indexes(indexes): + return indexes[0].append(indexes[1:]) + + +def items_overlap_with_suffix(left, lsuffix, right, rsuffix): + """ + If two indices overlap, add suffixes to overlapping entries. + + If corresponding suffix is empty, the entry is simply converted to string. + + """ + to_rename = left.intersection(right) + if len(to_rename) == 0: + return left, right + else: + if not lsuffix and not rsuffix: + raise ValueError('columns overlap but no suffix specified: ' + '{rename}'.format(rename=to_rename)) + + def lrenamer(x): + if x in to_rename: + return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix) + return x + + def rrenamer(x): + if x in to_rename: + return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix) + return x + + return (_transform_index(left, lrenamer), + _transform_index(right, rrenamer)) + + +def _transform_index(index, func, level=None): + """ + Apply function to all values found in index. + + This includes transforming multiindex entries separately. + Only apply function to one level of the MultiIndex if level is specified. + + """ + if isinstance(index, MultiIndex): + if level is not None: + items = [tuple(func(y) if i == level else y + for i, y in enumerate(x)) for x in index] + else: + items = [tuple(func(y) for y in x) for x in index] + return MultiIndex.from_tuples(items, names=index.names) + else: + items = [func(x) for x in index] + return Index(items, name=index.name, tupleize_cols=False) + + +def _fast_count_smallints(arr): + """Faster version of set(arr) for sequences of small numbers.""" + if len(arr) == 0: + # Handle empty arr case separately: numpy 1.6 chokes on that. + return np.empty((0, 2), dtype=arr.dtype) + else: + counts = np.bincount(arr.astype(np.int_)) + nz = counts.nonzero()[0] + return np.c_[nz, counts[nz]] + + +def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill): + if isinstance(slice_or_indexer, slice): + return ('slice', slice_or_indexer, + libinternals.slice_len(slice_or_indexer, length)) + elif (isinstance(slice_or_indexer, np.ndarray) and + slice_or_indexer.dtype == np.bool_): + return 'mask', slice_or_indexer, slice_or_indexer.sum() + else: + indexer = np.asanyarray(slice_or_indexer, dtype=np.int64) + if not allow_fill: + indexer = maybe_convert_indices(indexer, length) + return 'fancy', indexer, len(indexer) + + +def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): + """ + Concatenate block managers into one. + + Parameters + ---------- + mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples + axes : list of Index + concat_axis : int + copy : bool + + """ + concat_plan = combine_concat_plans( + [get_mgr_concatenation_plan(mgr, indexers) + for mgr, indexers in mgrs_indexers], concat_axis) + + blocks = [] + + for placement, join_units in concat_plan: + + if len(join_units) == 1 and not join_units[0].indexers: + b = join_units[0].block + values = b.values + if copy: + values = values.copy() + elif not copy: + values = values.view() + b = b.make_block_same_class(values, placement=placement) + elif is_uniform_join_units(join_units): + b = join_units[0].block.concat_same_type( + [ju.block for ju in join_units], placement=placement) + else: + b = make_block( + concatenate_join_units(join_units, concat_axis, copy=copy), + placement=placement) + blocks.append(b) + + return BlockManager(blocks, axes)
Follow-up to #22014. Moved `get_blockno_placements` to libinternals since a) its a natural fit anyway and b) it is needed in both concat and managers and I didn't want to runtime-import it. The only non-cut/paste edit is a change of `isinstance(self.block, ExtensionBlock)` to `self.block.is_extension` `__init__` namespace is big in part because `io.packers` uses `getattr(internals, name)` and is just really tightly intertwined with internals.
https://api.github.com/repos/pandas-dev/pandas/pulls/22028
2018-07-23T16:44:24Z
2018-07-24T00:03:35Z
2018-07-24T00:03:35Z
2018-07-26T16:22:30Z
TST: restructure internal extension arrays tests (split between /arrays and /extension)
diff --git a/pandas/tests/extension/integer/test_integer.py b/pandas/tests/arrays/test_integer.py similarity index 70% rename from pandas/tests/extension/integer/test_integer.py rename to pandas/tests/arrays/test_integer.py index 3af127091d2d8..349a6aee5701e 100644 --- a/pandas/tests/extension/integer/test_integer.py +++ b/pandas/tests/arrays/test_integer.py @@ -1,11 +1,10 @@ +# -*- coding: utf-8 -*- import numpy as np import pandas as pd import pandas.util.testing as tm import pytest -from pandas.tests.extension import base -from pandas.api.types import ( - is_integer, is_scalar, is_float, is_float_dtype) +from pandas.api.types import is_integer, is_float, is_float_dtype, is_scalar from pandas.core.dtypes.generic import ABCIndexClass from pandas.core.arrays import ( @@ -14,6 +13,8 @@ Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype) +from pandas.tests.extension.base import BaseOpsUtil + def make_data(): return (list(range(8)) + @@ -39,42 +40,13 @@ def data_missing(dtype): return integer_array([np.nan, 1], dtype=dtype) -@pytest.fixture -def data_repeated(data): - def gen(count): - for _ in range(count): - yield data - yield gen - - -@pytest.fixture -def data_for_sorting(dtype): - return integer_array([1, 2, 0], dtype=dtype) - - -@pytest.fixture -def data_missing_for_sorting(dtype): - return integer_array([1, np.nan, 0], dtype=dtype) - - -@pytest.fixture -def na_cmp(): - # we are np.nan - return lambda x, y: np.isnan(x) and np.isnan(y) - - -@pytest.fixture -def na_value(): - return np.nan - - -@pytest.fixture -def data_for_grouping(dtype): - b = 1 - a = 0 - c = 2 - na = np.nan - return integer_array([b, b, na, na, a, a, b, c], dtype=dtype) +@pytest.fixture(params=['data', 'data_missing']) +def all_data(request, data, data_missing): + """Parametrized fixture giving 'data' and 'data_missing'""" + if request.param == 'data': + return data + elif request.param == 'data_missing': + return data_missing def test_dtypes(dtype): @@ -87,61 +59,50 @@ def test_dtypes(dtype): assert dtype.name is not None -class BaseInteger(object): - - def assert_index_equal(self, left, right, *args, **kwargs): - - left_na = left.isna() - right_na = right.isna() +class TestInterface(object): - tm.assert_numpy_array_equal(left_na, right_na) - return tm.assert_index_equal(left[~left_na], - right[~right_na], - *args, **kwargs) - - def assert_series_equal(self, left, right, *args, **kwargs): + def test_repr_array(self, data): + result = repr(data) - left_na = left.isna() - right_na = right.isna() + # not long + assert '...' not in result - tm.assert_series_equal(left_na, right_na) - return tm.assert_series_equal(left[~left_na], - right[~right_na], - *args, **kwargs) + assert 'dtype=' in result + assert 'IntegerArray' in result - def assert_frame_equal(self, left, right, *args, **kwargs): - # TODO(EA): select_dtypes - tm.assert_index_equal( - left.columns, right.columns, - exact=kwargs.get('check_column_type', 'equiv'), - check_names=kwargs.get('check_names', True), - check_exact=kwargs.get('check_exact', False), - check_categorical=kwargs.get('check_categorical', True), - obj='{obj}.columns'.format(obj=kwargs.get('obj', 'DataFrame'))) + def test_repr_array_long(self, data): + # some arrays may be able to assert a ... in the repr + with pd.option_context('display.max_seq_items', 1): + result = repr(data) - integers = (left.dtypes == 'integer').index + assert '...' in result + assert 'length' in result - for col in integers: - self.assert_series_equal(left[col], right[col], - *args, **kwargs) - left = left.drop(columns=integers) - right = right.drop(columns=integers) - tm.assert_frame_equal(left, right, *args, **kwargs) +class TestConstructors(object): + def test_from_dtype_from_float(self, data): + # construct from our dtype & string dtype + dtype = data.dtype -class TestDtype(BaseInteger, base.BaseDtypeTests): + # from float + expected = pd.Series(data) + result = pd.Series(np.array(data).astype('float'), dtype=str(dtype)) + tm.assert_series_equal(result, expected) - @pytest.mark.skip(reason="using multiple dtypes") - def test_is_dtype_unboxes_dtype(self): - # we have multiple dtypes, so skip - pass + # from int / list + expected = pd.Series(data) + result = pd.Series(np.array(data).tolist(), dtype=str(dtype)) + tm.assert_series_equal(result, expected) - def test_array_type_with_arg(self, data, dtype): - assert dtype.construct_array_type() is IntegerArray + # from int / array + expected = pd.Series(data).dropna().reset_index(drop=True) + dropped = np.array(data.dropna()).astype(np.dtype((dtype.type))) + result = pd.Series(dropped, dtype=str(dtype)) + tm.assert_series_equal(result, expected) -class TestArithmeticOps(BaseInteger, base.BaseArithmeticOpsTests): +class TestArithmeticOps(BaseOpsUtil): def _check_divmod_op(self, s, op, other, exc=None): super(TestArithmeticOps, self)._check_divmod_op(s, op, other, None) @@ -178,7 +139,7 @@ def _check_op_float(self, result, expected, mask, s, op_name, other): # check comparisions that are resulting in float dtypes expected[mask] = np.nan - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) def _check_op_integer(self, result, expected, mask, s, op_name, other): # check comparisions that are resulting in integer dtypes @@ -231,10 +192,10 @@ def _check_op_integer(self, result, expected, mask, s, op_name, other): original = original.astype('float') original[mask] = np.nan - self.assert_series_equal(original, expected.astype('float')) + tm.assert_series_equal(original, expected.astype('float')) # assert our expected result - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) def test_arith_integer_array(self, data, all_arithmetic_operators): # we operate with a rhs of an integer array @@ -319,7 +280,7 @@ def test_error(self, data, all_arithmetic_operators): opa(np.arange(len(s)).reshape(-1, len(s))) -class TestComparisonOps(BaseInteger, base.BaseComparisonOpsTests): +class TestComparisonOps(BaseOpsUtil): def _compare_other(self, s, data, op_name, other): op = self.get_op_from_name(op_name) @@ -345,144 +306,21 @@ def _compare_other(self, s, data, op_name, other): tm.assert_series_equal(result, expected) + def test_compare_scalar(self, data, all_compare_operators): + op_name = all_compare_operators + s = pd.Series(data) + self._compare_other(s, data, op_name, 0) -class TestInterface(BaseInteger, base.BaseInterfaceTests): - - def test_repr_array(self, data): - result = repr(data) - - # not long - assert '...' not in result - - assert 'dtype=' in result - assert 'IntegerArray' in result - - def test_repr_array_long(self, data): - # some arrays may be able to assert a ... in the repr - with pd.option_context('display.max_seq_items', 1): - result = repr(data) - - assert '...' in result - assert 'length' in result - - -class TestConstructors(BaseInteger, base.BaseConstructorsTests): - - def test_from_dtype_from_float(self, data): - # construct from our dtype & string dtype - dtype = data.dtype - - # from float - expected = pd.Series(data) - result = pd.Series(np.array(data).astype('float'), dtype=str(dtype)) - self.assert_series_equal(result, expected) - - # from int / list - expected = pd.Series(data) - result = pd.Series(np.array(data).tolist(), dtype=str(dtype)) - self.assert_series_equal(result, expected) - - # from int / array - expected = pd.Series(data).dropna().reset_index(drop=True) - dropped = np.array(data.dropna()).astype(np.dtype((dtype.type))) - result = pd.Series(dropped, dtype=str(dtype)) - self.assert_series_equal(result, expected) - - -class TestReshaping(BaseInteger, base.BaseReshapingTests): - - def test_concat_mixed_dtypes(self, data): - # https://github.com/pandas-dev/pandas/issues/20762 - df1 = pd.DataFrame({'A': data[:3]}) - df2 = pd.DataFrame({"A": [1, 2, 3]}) - df3 = pd.DataFrame({"A": ['a', 'b', 'c']}).astype('category') - df4 = pd.DataFrame({"A": pd.SparseArray([1, 2, 3])}) - dfs = [df1, df2, df3, df4] - - # dataframes - result = pd.concat(dfs) - expected = pd.concat([x.astype(object) for x in dfs]) - self.assert_frame_equal(result, expected) - - # series - result = pd.concat([x['A'] for x in dfs]) - expected = pd.concat([x['A'].astype(object) for x in dfs]) - self.assert_series_equal(result, expected) - - result = pd.concat([df1, df2]) - expected = pd.concat([df1.astype('object'), df2.astype('object')]) - self.assert_frame_equal(result, expected) - - # concat of an Integer and Int coerces to object dtype - # TODO(jreback) once integrated this would - # be a result of Integer - result = pd.concat([df1['A'], df2['A']]) - expected = pd.concat([df1['A'].astype('object'), - df2['A'].astype('object')]) - self.assert_series_equal(result, expected) - - -class TestGetitem(BaseInteger, base.BaseGetitemTests): - pass + def test_compare_array(self, data, all_compare_operators): + op_name = all_compare_operators + s = pd.Series(data) + other = pd.Series([0] * len(data)) + self._compare_other(s, data, op_name, other) -class TestMissing(BaseInteger, base.BaseMissingTests): +class TestCasting(object): pass - -class TestMethods(BaseInteger, base.BaseMethodsTests): - - @pytest.mark.parametrize('dropna', [True, False]) - def test_value_counts(self, all_data, dropna): - all_data = all_data[:10] - if dropna: - other = np.array(all_data[~all_data.isna()]) - else: - other = all_data - - result = pd.Series(all_data).value_counts(dropna=dropna).sort_index() - expected = pd.Series(other).value_counts( - dropna=dropna).sort_index() - expected.index = expected.index.astype(all_data.dtype) - - self.assert_series_equal(result, expected) - - def test_combine_add(self, data_repeated): - # GH 20825 - orig_data1, orig_data2 = data_repeated(2) - s1 = pd.Series(orig_data1) - s2 = pd.Series(orig_data2) - - # fundamentally this is not a great operation - # as overflow / underflow can easily happen here - # e.g. int8 + int8 - def scalar_add(a, b): - - # TODO; should really be a type specific NA - if pd.isna(a) or pd.isna(b): - return np.nan - if is_integer(a): - a = int(a) - elif is_integer(b): - b = int(b) - return a + b - - result = s1.combine(s2, scalar_add) - expected = pd.Series( - orig_data1._from_sequence([scalar_add(a, b) for (a, b) in - zip(orig_data1, - orig_data2)])) - self.assert_series_equal(result, expected) - - val = s1.iloc[0] - result = s1.combine(val, lambda x1, x2: x1 + x2) - expected = pd.Series( - orig_data1._from_sequence([a + val for a in list(orig_data1)])) - self.assert_series_equal(result, expected) - - -class TestCasting(BaseInteger, base.BaseCastingTests): - @pytest.mark.parametrize('dropna', [True, False]) def test_construct_index(self, all_data, dropna): # ensure that we do not coerce to Float64Index, rather @@ -497,7 +335,7 @@ def test_construct_index(self, all_data, dropna): result = pd.Index(integer_array(other, dtype=all_data.dtype)) expected = pd.Index(other, dtype=object) - self.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected) @pytest.mark.parametrize('dropna', [True, False]) def test_astype_index(self, all_data, dropna): @@ -515,7 +353,7 @@ def test_astype_index(self, all_data, dropna): result = idx.astype(dtype) expected = idx.astype(object).astype(dtype) - self.assert_index_equal(result, expected) + tm.assert_index_equal(result, expected) def test_astype(self, all_data): all_data = all_data[:10] @@ -528,13 +366,13 @@ def test_astype(self, all_data): s = pd.Series(ints) result = s.astype(all_data.dtype) expected = pd.Series(ints) - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) # coerce to same other - ints s = pd.Series(ints) result = s.astype(dtype) expected = pd.Series(ints, dtype=dtype) - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) # coerce to same numpy_dtype - ints s = pd.Series(ints) @@ -547,13 +385,13 @@ def test_astype(self, all_data): s = pd.Series(mixed) result = s.astype(all_data.dtype) expected = pd.Series(mixed) - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) # coerce to same other - mixed s = pd.Series(mixed) result = s.astype(dtype) expected = pd.Series(mixed, dtype=dtype) - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) # coerce to same numpy_dtype - mixed s = pd.Series(mixed) @@ -572,12 +410,12 @@ def test_astype_specific_casting(self, dtype): s = pd.Series([1, 2, 3], dtype='Int64') result = s.astype(dtype) expected = pd.Series([1, 2, 3], dtype=dtype) - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) s = pd.Series([1, 2, 3, None], dtype='Int64') result = s.astype(dtype) expected = pd.Series([1, 2, 3, None], dtype=dtype) - self.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected) def test_construct_cast_invalid(self, dtype): @@ -597,24 +435,6 @@ def test_construct_cast_invalid(self, dtype): pd.Series(arr).astype(dtype) -class TestGroupby(BaseInteger, base.BaseGroupbyTests): - - @pytest.mark.xfail(reason="groupby not working", strict=True) - def test_groupby_extension_no_sort(self, data_for_grouping): - super(TestGroupby, self).test_groupby_extension_no_sort( - data_for_grouping) - - @pytest.mark.parametrize('as_index', [ - pytest.param(True, - marks=pytest.mark.xfail(reason="groupby not working", - strict=True)), - False - ]) - def test_groupby_extension_agg(self, as_index, data_for_grouping): - super(TestGroupby, self).test_groupby_extension_agg( - as_index, data_for_grouping) - - def test_frame_repr(data_missing): df = pd.DataFrame({'A': data_missing}) diff --git a/pandas/tests/arrays/test_interval.py b/pandas/tests/arrays/test_interval.py new file mode 100644 index 0000000000000..bcf4cea795978 --- /dev/null +++ b/pandas/tests/arrays/test_interval.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +import pytest +import numpy as np + +from pandas import Index, IntervalIndex, date_range, timedelta_range +from pandas.core.arrays import IntervalArray +import pandas.util.testing as tm + + +@pytest.fixture(params=[ + (Index([0, 2, 4]), Index([1, 3, 5])), + (Index([0., 1., 2.]), Index([1., 2., 3.])), + (timedelta_range('0 days', periods=3), + timedelta_range('1 day', periods=3)), + (date_range('20170101', periods=3), date_range('20170102', periods=3)), + (date_range('20170101', periods=3, tz='US/Eastern'), + date_range('20170102', periods=3, tz='US/Eastern'))], + ids=lambda x: str(x[0].dtype)) +def left_right_dtypes(request): + """ + Fixture for building an IntervalArray from various dtypes + """ + return request.param + + +class TestMethods(object): + + @pytest.mark.parametrize('repeats', [0, 1, 5]) + def test_repeat(self, left_right_dtypes, repeats): + left, right = left_right_dtypes + result = IntervalArray.from_arrays(left, right).repeat(repeats) + expected = IntervalArray.from_arrays( + left.repeat(repeats), right.repeat(repeats)) + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize('bad_repeats, msg', [ + (-1, 'negative dimensions are not allowed'), + ('foo', r'invalid literal for (int|long)\(\) with base 10')]) + def test_repeat_errors(self, bad_repeats, msg): + array = IntervalArray.from_breaks(range(4)) + with tm.assert_raises_regex(ValueError, msg): + array.repeat(bad_repeats) + + @pytest.mark.parametrize('new_closed', [ + 'left', 'right', 'both', 'neither']) + def test_set_closed(self, closed, new_closed): + # GH 21670 + array = IntervalArray.from_breaks(range(10), closed=closed) + result = array.set_closed(new_closed) + expected = IntervalArray.from_breaks(range(10), closed=new_closed) + tm.assert_extension_array_equal(result, expected) + + +class TestSetitem(object): + + def test_set_na(self, left_right_dtypes): + left, right = left_right_dtypes + result = IntervalArray.from_arrays(left, right) + result[0] = np.nan + + expected_left = Index([left._na_value] + list(left[1:])) + expected_right = Index([right._na_value] + list(right[1:])) + expected = IntervalArray.from_arrays(expected_left, expected_right) + + tm.assert_extension_array_equal(result, expected) + + +def test_repr_matches(): + idx = IntervalIndex.from_breaks([1, 2, 3]) + a = repr(idx) + b = repr(idx.values) + assert a.replace("Index", "Array") == b diff --git a/pandas/tests/extension/base/methods.py b/pandas/tests/extension/base/methods.py index c8656808739c4..4e7886dd2e943 100644 --- a/pandas/tests/extension/base/methods.py +++ b/pandas/tests/extension/base/methods.py @@ -127,10 +127,11 @@ def test_combine_add(self, data_repeated): s1 = pd.Series(orig_data1) s2 = pd.Series(orig_data2) result = s1.combine(s2, lambda x1, x2: x1 + x2) - expected = pd.Series( - orig_data1._from_sequence([a + b for (a, b) in - zip(list(orig_data1), - list(orig_data2))])) + with np.errstate(over='ignore'): + expected = pd.Series( + orig_data1._from_sequence([a + b for (a, b) in + zip(list(orig_data1), + list(orig_data2))])) self.assert_series_equal(result, expected) val = s1.iloc[0] diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index f7bfdb8ec218a..05351c56862b8 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -23,9 +23,9 @@ def get_op_from_name(self, op_name): def check_opname(self, s, op_name, other, exc=NotImplementedError): op = self.get_op_from_name(op_name) - self._check_op(s, op, other, exc) + self._check_op(s, op, other, op_name, exc) - def _check_op(self, s, op, other, exc=NotImplementedError): + def _check_op(self, s, op, other, op_name, exc=NotImplementedError): if exc is None: result = op(s, other) expected = s.combine(other, op) @@ -69,7 +69,8 @@ def test_arith_series_with_array(self, data, all_arithmetic_operators): # ndarray & other series op_name = all_arithmetic_operators s = pd.Series(data) - self.check_opname(s, op_name, [s.iloc[0]] * len(s), exc=TypeError) + self.check_opname(s, op_name, pd.Series([s.iloc[0]] * len(s)), + exc=TypeError) def test_divmod(self, data): s = pd.Series(data) @@ -113,5 +114,5 @@ def test_compare_scalar(self, data, all_compare_operators): def test_compare_array(self, data, all_compare_operators): op_name = all_compare_operators s = pd.Series(data) - other = [0] * len(data) + other = pd.Series([data[0]] * len(data)) self._compare_other(s, data, op_name, other) diff --git a/pandas/tests/extension/category/__init__.py b/pandas/tests/extension/category/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/pandas/tests/extension/integer/__init__.py b/pandas/tests/extension/integer/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/pandas/tests/extension/interval/__init__.py b/pandas/tests/extension/interval/__init__.py deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/pandas/tests/extension/category/test_categorical.py b/pandas/tests/extension/test_categorical.py similarity index 85% rename from pandas/tests/extension/category/test_categorical.py rename to pandas/tests/extension/test_categorical.py index 76f6b03907ef8..b8c73a9efdae8 100644 --- a/pandas/tests/extension/category/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -1,3 +1,18 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" import string import pytest @@ -204,10 +219,14 @@ class TestComparisonOps(base.BaseComparisonOpsTests): def _compare_other(self, s, data, op_name, other): op = self.get_op_from_name(op_name) if op_name == '__eq__': - assert not op(data, other).all() + result = op(s, other) + expected = s.combine(other, lambda x, y: x == y) + assert (result == expected).all() elif op_name == '__ne__': - assert op(data, other).all() + result = op(s, other) + expected = s.combine(other, lambda x, y: x != y) + assert (result == expected).all() else: with pytest.raises(TypeError): diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py new file mode 100644 index 0000000000000..50c0e6dd8b347 --- /dev/null +++ b/pandas/tests/extension/test_integer.py @@ -0,0 +1,229 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +import numpy as np +import pandas as pd +import pytest + +from pandas.tests.extension import base +from pandas.core.dtypes.common import is_extension_array_dtype + +from pandas.core.arrays import IntegerArray, integer_array +from pandas.core.arrays.integer import ( + Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, + UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype) + + +def make_data(): + return (list(range(1, 9)) + [np.nan] + list(range(10, 98)) + + [np.nan] + [99, 100]) + + +@pytest.fixture(params=[Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, + UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype]) +def dtype(request): + return request.param() + + +@pytest.fixture +def data(dtype): + return integer_array(make_data(), dtype=dtype) + + +@pytest.fixture +def data_missing(dtype): + return integer_array([np.nan, 1], dtype=dtype) + + +@pytest.fixture +def data_repeated(data): + def gen(count): + for _ in range(count): + yield data + yield gen + + +@pytest.fixture +def data_for_sorting(dtype): + return integer_array([1, 2, 0], dtype=dtype) + + +@pytest.fixture +def data_missing_for_sorting(dtype): + return integer_array([1, np.nan, 0], dtype=dtype) + + +@pytest.fixture +def na_cmp(): + # we are np.nan + return lambda x, y: np.isnan(x) and np.isnan(y) + + +@pytest.fixture +def na_value(): + return np.nan + + +@pytest.fixture +def data_for_grouping(dtype): + b = 1 + a = 0 + c = 2 + na = np.nan + return integer_array([b, b, na, na, a, a, b, c], dtype=dtype) + + +class TestDtype(base.BaseDtypeTests): + + @pytest.mark.skip(reason="using multiple dtypes") + def test_is_dtype_unboxes_dtype(self): + # we have multiple dtypes, so skip + pass + + def test_array_type_with_arg(self, data, dtype): + assert dtype.construct_array_type() is IntegerArray + + +class TestArithmeticOps(base.BaseArithmeticOpsTests): + + def check_opname(self, s, op_name, other, exc=None): + # overwriting to indicate ops don't raise an error + super(TestArithmeticOps, self).check_opname(s, op_name, + other, exc=None) + + def _check_op(self, s, op, other, op_name, exc=NotImplementedError): + if exc is None: + if s.dtype.is_unsigned_integer and (op_name == '__rsub__'): + # TODO see https://github.com/pandas-dev/pandas/issues/22023 + pytest.skip("unsigned subtraction gives negative values") + + if (hasattr(other, 'dtype') + and not is_extension_array_dtype(other.dtype) + and pd.api.types.is_integer_dtype(other.dtype)): + # other is np.int64 and would therefore always result in + # upcasting, so keeping other as same numpy_dtype + other = other.astype(s.dtype.numpy_dtype) + + result = op(s, other) + expected = s.combine(other, op) + + if op_name == '__rdiv__': + # combine is not giving the correct result for this case + pytest.skip("skipping reverse div in python 2") + elif op_name in ('__rtruediv__', '__truediv__', '__div__'): + expected = expected.astype(float) + if op_name == '__rtruediv__': + # TODO reverse operators result in object dtype + result = result.astype(float) + elif op_name.startswith('__r'): + # TODO reverse operators result in object dtype + # see https://github.com/pandas-dev/pandas/issues/22024 + expected = expected.astype(s.dtype) + result = result.astype(s.dtype) + else: + # combine method result in 'biggest' (int64) dtype + expected = expected.astype(s.dtype) + pass + if (op_name == '__rpow__') and isinstance(other, pd.Series): + # TODO pow on Int arrays gives different result with NA + # see https://github.com/pandas-dev/pandas/issues/22022 + result = result.fillna(1) + + self.assert_series_equal(result, expected) + else: + with pytest.raises(exc): + op(s, other) + + def _check_divmod_op(self, s, op, other, exc=None): + super(TestArithmeticOps, self)._check_divmod_op(s, op, other, None) + + @pytest.mark.skip(reason="intNA does not error on ops") + def test_error(self, data, all_arithmetic_operators): + # other specific errors tested in the integer array specific tests + pass + + +class TestComparisonOps(base.BaseComparisonOpsTests): + + def check_opname(self, s, op_name, other, exc=None): + super(TestComparisonOps, self).check_opname(s, op_name, + other, exc=None) + + def _compare_other(self, s, data, op_name, other): + self.check_opname(s, op_name, other) + + +class TestInterface(base.BaseInterfaceTests): + pass + + +class TestConstructors(base.BaseConstructorsTests): + pass + + +class TestReshaping(base.BaseReshapingTests): + pass + + # for test_concat_mixed_dtypes test + # concat of an Integer and Int coerces to object dtype + # TODO(jreback) once integrated this would + + +class TestGetitem(base.BaseGetitemTests): + pass + + +class TestMissing(base.BaseMissingTests): + pass + + +class TestMethods(base.BaseMethodsTests): + + @pytest.mark.parametrize('dropna', [True, False]) + def test_value_counts(self, all_data, dropna): + all_data = all_data[:10] + if dropna: + other = np.array(all_data[~all_data.isna()]) + else: + other = all_data + + result = pd.Series(all_data).value_counts(dropna=dropna).sort_index() + expected = pd.Series(other).value_counts( + dropna=dropna).sort_index() + expected.index = expected.index.astype(all_data.dtype) + + self.assert_series_equal(result, expected) + + +class TestCasting(base.BaseCastingTests): + pass + + +class TestGroupby(base.BaseGroupbyTests): + + @pytest.mark.xfail(reason="groupby not working", strict=True) + def test_groupby_extension_no_sort(self, data_for_grouping): + super(TestGroupby, self).test_groupby_extension_no_sort( + data_for_grouping) + + @pytest.mark.parametrize('as_index', [ + pytest.param(True, + marks=pytest.mark.xfail(reason="groupby not working", + strict=True)), + False + ]) + def test_groupby_extension_agg(self, as_index, data_for_grouping): + super(TestGroupby, self).test_groupby_extension_agg( + as_index, data_for_grouping) diff --git a/pandas/tests/extension/interval/test_interval.py b/pandas/tests/extension/test_interval.py similarity index 54% rename from pandas/tests/extension/interval/test_interval.py rename to pandas/tests/extension/test_interval.py index a10a56ddfdfac..625619a90ed4c 100644 --- a/pandas/tests/extension/interval/test_interval.py +++ b/pandas/tests/extension/test_interval.py @@ -1,7 +1,22 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" import pytest import numpy as np -from pandas import Index, Interval, IntervalIndex, date_range, timedelta_range +from pandas import Interval from pandas.core.arrays import IntervalArray from pandas.core.dtypes.dtypes import IntervalDtype from pandas.tests.extension import base @@ -15,22 +30,6 @@ def make_data(): return [Interval(l, r) for l, r in zip(left, right)] -@pytest.fixture(params=[ - (Index([0, 2, 4]), Index([1, 3, 5])), - (Index([0., 1., 2.]), Index([1., 2., 3.])), - (timedelta_range('0 days', periods=3), - timedelta_range('1 day', periods=3)), - (date_range('20170101', periods=3), date_range('20170102', periods=3)), - (date_range('20170101', periods=3, tz='US/Eastern'), - date_range('20170102', periods=3, tz='US/Eastern'))], - ids=lambda x: str(x[0].dtype)) -def left_right_dtypes(request): - """ - Fixture for building an IntervalArray from various dtypes - """ - return request.param - - @pytest.fixture def dtype(): return IntervalDtype() @@ -111,30 +110,6 @@ class TestInterface(BaseInterval, base.BaseInterfaceTests): class TestMethods(BaseInterval, base.BaseMethodsTests): - @pytest.mark.parametrize('repeats', [0, 1, 5]) - def test_repeat(self, left_right_dtypes, repeats): - left, right = left_right_dtypes - result = IntervalArray.from_arrays(left, right).repeat(repeats) - expected = IntervalArray.from_arrays( - left.repeat(repeats), right.repeat(repeats)) - tm.assert_extension_array_equal(result, expected) - - @pytest.mark.parametrize('bad_repeats, msg', [ - (-1, 'negative dimensions are not allowed'), - ('foo', r'invalid literal for (int|long)\(\) with base 10')]) - def test_repeat_errors(self, bad_repeats, msg): - array = IntervalArray.from_breaks(range(4)) - with tm.assert_raises_regex(ValueError, msg): - array.repeat(bad_repeats) - - @pytest.mark.parametrize('new_closed', [ - 'left', 'right', 'both', 'neither']) - def test_set_closed(self, closed, new_closed): - # GH 21670 - array = IntervalArray.from_breaks(range(10), closed=closed) - result = array.set_closed(new_closed) - expected = IntervalArray.from_breaks(range(10), closed=new_closed) - tm.assert_extension_array_equal(result, expected) @pytest.mark.skip(reason='addition is not defined for intervals') def test_combine_add(self, data_repeated): @@ -173,21 +148,4 @@ class TestReshaping(BaseInterval, base.BaseReshapingTests): class TestSetitem(BaseInterval, base.BaseSetitemTests): - - def test_set_na(self, left_right_dtypes): - left, right = left_right_dtypes - result = IntervalArray.from_arrays(left, right) - result[0] = np.nan - - expected_left = Index([left._na_value] + list(left[1:])) - expected_right = Index([right._na_value] + list(right[1:])) - expected = IntervalArray.from_arrays(expected_left, expected_right) - - self.assert_extension_array_equal(result, expected) - - -def test_repr_matches(): - idx = IntervalIndex.from_breaks([1, 2, 3]) - a = repr(idx) - b = repr(idx.values) - assert a.replace("Index", "Array") == b + pass
Pull request to discuss what to do with the tests for internal EAs (and one of the comments I still had in https://github.com/pandas-dev/pandas/pull/21160) Basically, I would keep the `tests/extension/..` only for subclassing the base extension array test suite, and any array-specific functionality is tested in `tests/arrays/..` (eg closed attribute for IntervalArray, specific arithmetic behaviour for IntegerArray, ...) This means that when adding a test related to EAs, we need to think about: is this testing something that is applicable to all EAs? (-> add a base test to `tests/extension/base` so this is tested for all internal and external EAs) or is this testing something specific to a particular EA? (-> add a test in `tests/array/EAtype/..`) Of course often there can be some ambiguity here. Main reason that I would split them is that over time, we probably add a lot of EA-type-specific tests, and then keeping the general ones mixed with the specific ones will make it only confusing / hard to see what is going on. Drawback is of course that it is tested in two places. In practice what I propose in this PR, is also what we already do for Categorical at the moment: Categorical has its own tests in `tests/arrays/categorical` (and probably also some in `indexes` and `frame`, ..), but we also run the base extension tests for Categorical in `tests/extension/`
https://api.github.com/repos/pandas-dev/pandas/pulls/22026
2018-07-23T14:59:48Z
2018-09-06T10:11:30Z
2018-09-06T10:11:30Z
2018-09-06T10:11:33Z
[CLN] Dispatch (some) Frame ops to Series, avoiding _data.eval
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 9b71ab656920d..700916ba6066e 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -532,6 +532,35 @@ Current Behavior: ... OverflowError: Trying to coerce negative values to unsigned integers +.. _whatsnew_0240.api.crosstab_dtypes + +Crosstab Preserves Dtypes +^^^^^^^^^^^^^^^^^^^^^^^^^ + +:func:`crosstab` will preserve now dtypes in some cases that previously would +cast from integer dtype to floating dtype (:issue:`22019`) + +Previous Behavior: + +.. code-block:: ipython + + In [3]: df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4], + ...: 'c': [1, 1, np.nan, 1, 1]}) + In [4]: pd.crosstab(df.a, df.b, normalize='columns') + Out[4]: + b 3 4 + a + 1 0.5 0.0 + 2 0.5 1.0 + +Current Behavior: + +.. code-block:: ipython + + In [3]: df = pd.DataFrame({'a': [1, 2, 2, 2, 2], 'b': [3, 3, 4, 4, 4], + ...: 'c': [1, 1, np.nan, 1, 1]}) + In [4]: pd.crosstab(df.a, df.b, normalize='columns') + Datetimelike API Changes ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 138d1017aa43d..ff7590f6d5358 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4899,7 +4899,6 @@ def _arith_op(left, right): copy=False) def _combine_match_index(self, other, func, level=None): - assert isinstance(other, Series) left, right = self.align(other, join='outer', axis=0, level=level, copy=False) assert left.index.equals(right.index) @@ -4919,11 +4918,7 @@ def _combine_match_columns(self, other, func, level=None, try_cast=True): left, right = self.align(other, join='outer', axis=1, level=level, copy=False) assert left.columns.equals(right.index) - - new_data = left._data.eval(func=func, other=right, - axes=[left.columns, self.index], - try_cast=try_cast) - return self._constructor(new_data) + return ops.dispatch_to_series(left, right, func, axis="columns") def _combine_const(self, other, func, errors='raise', try_cast=True): if lib.is_scalar(other) or np.ndim(other) == 0: diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 8171840c96b6e..a05b2bad9bd3e 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -1666,7 +1666,7 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0): # ----------------------------------------------------------------------------- # DataFrame -def dispatch_to_series(left, right, func, str_rep=None): +def dispatch_to_series(left, right, func, str_rep=None, axis=None): """ Evaluate the frame operation func(left, right) by evaluating column-by-column, dispatching to the Series implementation. @@ -1677,6 +1677,7 @@ def dispatch_to_series(left, right, func, str_rep=None): right : scalar or DataFrame func : arithmetic or comparison operator str_rep : str or None, default None + axis : {None, 0, 1, "index", "columns"} Returns ------- @@ -1700,6 +1701,15 @@ def column_op(a, b): return {i: func(a.iloc[:, i], b.iloc[:, i]) for i in range(len(a.columns))} + elif isinstance(right, ABCSeries) and axis == "columns": + # We only get here if called via left._combine_match_columns, + # in which case we specifically want to operate row-by-row + assert right.index.equals(left.columns) + + def column_op(a, b): + return {i: func(a.iloc[:, i], b.iloc[i]) + for i in range(len(a.columns))} + elif isinstance(right, ABCSeries): assert right.index.equals(left.index) # Handle other cases later @@ -1844,7 +1854,10 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): pass_op = op if should_series_dispatch(self, other, op) else na_op return self._combine_frame(other, pass_op, fill_value, level) elif isinstance(other, ABCSeries): - return _combine_series_frame(self, other, na_op, + # For these values of `axis`, we end up dispatching to Series op, + # so do not want the masked op. + pass_op = op if axis in [0, "columns", None] else na_op + return _combine_series_frame(self, other, pass_op, fill_value=fill_value, axis=axis, level=level, try_cast=True) else: diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index 5050922173564..a09efe6d4761c 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -505,33 +505,25 @@ def test_tdi_add_dt64_array(self, box_df_broadcast_failure): # ------------------------------------------------------------------ # Operations with int-like others - def test_td64arr_add_int_series_invalid(self, box_df_broadcast_failure, - tdser): - box = box_df_broadcast_failure + def test_td64arr_add_int_series_invalid(self, box, tdser): tdser = tm.box_expected(tdser, box) err = TypeError if box is not pd.Index else NullFrequencyError with pytest.raises(err): tdser + Series([2, 3, 4]) - def test_td64arr_radd_int_series_invalid(self, box_df_broadcast_failure, - tdser): - box = box_df_broadcast_failure + def test_td64arr_radd_int_series_invalid(self, box, tdser): tdser = tm.box_expected(tdser, box) err = TypeError if box is not pd.Index else NullFrequencyError with pytest.raises(err): Series([2, 3, 4]) + tdser - def test_td64arr_sub_int_series_invalid(self, box_df_broadcast_failure, - tdser): - box = box_df_broadcast_failure + def test_td64arr_sub_int_series_invalid(self, box, tdser): tdser = tm.box_expected(tdser, box) err = TypeError if box is not pd.Index else NullFrequencyError with pytest.raises(err): tdser - Series([2, 3, 4]) - def test_td64arr_rsub_int_series_invalid(self, box_df_broadcast_failure, - tdser): - box = box_df_broadcast_failure + def test_td64arr_rsub_int_series_invalid(self, box, tdser): tdser = tm.box_expected(tdser, box) err = TypeError if box is not pd.Index else NullFrequencyError with pytest.raises(err): @@ -605,9 +597,10 @@ def test_td64arr_add_sub_numeric_scalar_invalid(self, box, scalar, tdser): Series([1, 2, 3]) # TODO: Add DataFrame in here? ], ids=lambda x: type(x).__name__) - def test_td64arr_add_sub_numeric_arr_invalid( - self, box_df_broadcast_failure, vec, dtype, tdser): - box = box_df_broadcast_failure + def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype, tdser): + if box is pd.DataFrame and not isinstance(vec, Series): + raise pytest.xfail(reason="Tries to broadcast incorrectly") + tdser = tm.box_expected(tdser, box) err = TypeError if box is pd.Index and not dtype.startswith('float'): @@ -930,9 +923,9 @@ def test_td64arr_sub_offset_array(self, box_df_broadcast_failure): @pytest.mark.parametrize('names', [(None, None, None), ('foo', 'bar', None), ('foo', 'foo', 'foo')]) - def test_td64arr_with_offset_series(self, names, box_df_broadcast_failure): + def test_td64arr_with_offset_series(self, names, box_df_fail): # GH#18849 - box = box_df_broadcast_failure + box = box_df_fail box2 = Series if box is pd.Index else box tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'], @@ -963,10 +956,11 @@ def test_td64arr_with_offset_series(self, names, box_df_broadcast_failure): tm.assert_equal(res3, expected_sub) @pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series]) - def test_td64arr_addsub_anchored_offset_arraylike( - self, obox, box_df_broadcast_failure): + def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box): # GH#18824 - box = box_df_broadcast_failure + if box is pd.DataFrame and obox is not pd.Series: + raise pytest.xfail(reason="Attempts to broadcast incorrectly") + tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00']) tdi = tm.box_expected(tdi, box) diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 0bc74c6890ee9..6186ce4d45ef2 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -721,7 +721,7 @@ def test_align_int_fill_bug(self): result = df1 - df1.mean() expected = df2 - df2.mean() - assert_frame_equal(result, expected) + assert_frame_equal(result.astype('f8'), expected) def test_align_multiindex(self): # GH 10665 diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 1ee48d0120c7d..1cb036dccf23c 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1566,8 +1566,9 @@ def test_crosstab_normalize(self): full_normal) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index'), row_normal) - tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='columns'), - col_normal) + tm.assert_frame_equal( + pd.crosstab(df.a, df.b, normalize='columns').astype('f8'), + col_normal) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=1), pd.crosstab(df.a, df.b, normalize='columns')) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=0), @@ -1600,7 +1601,8 @@ def test_crosstab_normalize(self): tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='index', margins=True), row_normal_margins) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize='columns', - margins=True), col_normal_margins) + margins=True).astype('f8'), + col_normal_margins) tm.assert_frame_equal(pd.crosstab(df.a, df.b, normalize=True, margins=True), all_normal_margins) diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 601e251d45b4b..f3ab197771d53 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -758,9 +758,6 @@ def test_operators_bitwise(self): def test_scalar_na_cmp_corners(self): s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10]) - def tester(a, b): - return a & b - with pytest.raises(TypeError): s & datetime(2005, 1, 1) @@ -780,8 +777,11 @@ def tester(a, b): # this is an alignment issue; these are equivalent # https://github.com/pandas-dev/pandas/issues/5284 - pytest.raises(ValueError, lambda: d.__and__(s, axis='columns')) - pytest.raises(ValueError, tester, s, d) + with pytest.raises(TypeError): + d.__and__(s, axis='columns') + + with pytest.raises(TypeError): + s & d # this is wrong as its not a boolean result # result = d.__and__(s,axis='index')
ATM there are exactly two places where `BlockManager.eval` is called: `DataFrame._combine_match_columns` and `DataFrame._combine_const`. This replaces the usage in `_combine_match_columns` with a dispatch-to-Series implementation. Some output dtypes get changed (see edits in `test_axis_select_reindex`, `test_pivot`), and some errors get changed from `ValueError` to `TypeError` (see `test_operators`). The other usage of `_data.eval` will be removed separately; that turns out to be a lot more trouble because a bunch of `DataFrame` behavior is currently incorrect (see #22017). This PR also: - Simplifies some of the special-casing in SparseDataFrame; trying to move towards not having separate implementations for these methods - Dispatches `_combine_match_index` to avoid calling `self.values` when doing so would require coercing to object-dtype.
https://api.github.com/repos/pandas-dev/pandas/pulls/22019
2018-07-22T22:44:29Z
2018-10-03T11:32:35Z
2018-10-03T11:32:35Z
2018-10-03T11:32:41Z
REF: move range-generation functions to EA mixin classes
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 7bb1c45998eb2..eb8821382037d 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +from datetime import datetime, timedelta import operator import warnings @@ -8,7 +9,7 @@ from pandas._libs.tslibs import timezones from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds, Timedelta from pandas._libs.tslibs.period import ( - DIFFERENT_FREQ_INDEX, IncompatibleFrequency) + Period, DIFFERENT_FREQ_INDEX, IncompatibleFrequency) from pandas.errors import NullFrequencyError, PerformanceWarning from pandas import compat @@ -19,6 +20,13 @@ from pandas.core.dtypes.common import ( needs_i8_conversion, is_list_like, + is_offsetlike, + is_extension_array_dtype, + is_datetime64_dtype, + is_datetime64_any_dtype, + is_datetime64tz_dtype, + is_float_dtype, + is_integer_dtype, is_bool_dtype, is_period_dtype, is_timedelta64_dtype, @@ -100,7 +108,7 @@ class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin): _freq and that the inheriting class has methods: - _validate_frequency + _generate_range """ @property @@ -132,6 +140,14 @@ def asi8(self): # ------------------------------------------------------------------ # Array-like Methods + @property + def shape(self): + return (len(self),) + + @property + def size(self): + return np.prod(self.shape) + def __len__(self): return len(self._data) @@ -296,6 +312,34 @@ def resolution(self): """ return frequencies.Resolution.get_str(self._resolution) + @classmethod + def _validate_frequency(cls, index, freq, **kwargs): + """ + Validate that a frequency is compatible with the values of a given + Datetime Array/Index or Timedelta Array/Index + + Parameters + ---------- + index : DatetimeIndex or TimedeltaIndex + The index on which to determine if the given frequency is valid + freq : DateOffset + The frequency to validate + """ + if is_period_dtype(cls): + # Frequency validation is not meaningful for Period Array/Index + return None + + inferred = index.inferred_freq + if index.size == 0 or inferred == freq.freqstr: + return None + + on_freq = cls._generate_range(start=index[0], end=None, + periods=len(index), freq=freq, **kwargs) + if not np.array_equal(index.asi8, on_freq.asi8): + raise ValueError('Inferred frequency {infer} from passed values ' + 'does not conform to passed frequency {passed}' + .format(infer=inferred, passed=freq.freqstr)) + # ------------------------------------------------------------------ # Arithmetic Methods @@ -477,6 +521,188 @@ def _addsub_offset_array(self, other, op): kwargs['freq'] = 'infer' return type(self)(res_values, **kwargs) + def shift(self, n, freq=None): + """ + Specialized shift which produces a Datetime/Timedelta Array/Index + + Parameters + ---------- + n : int + Periods to shift by + freq : DateOffset or timedelta-like, optional + + Returns + ------- + shifted : same type as self + """ + if freq is not None and freq != self.freq: + if isinstance(freq, compat.string_types): + freq = frequencies.to_offset(freq) + offset = n * freq + result = self + offset + + if hasattr(self, 'tz'): + result._tz = self.tz + + return result + + if n == 0: + # immutable so OK + return self + + if self.freq is None: + raise NullFrequencyError("Cannot shift with no freq") + + start = self[0] + n * self.freq + end = self[-1] + n * self.freq + attribs = self._get_attributes_dict() + return self._generate_range(start=start, end=end, periods=None, + **attribs) + + @classmethod + def _add_datetimelike_methods(cls): + """ + add in the datetimelike methods (as we may have to override the + superclass) + """ + + def __add__(self, other): + other = lib.item_from_zerodim(other) + if isinstance(other, (ABCSeries, ABCDataFrame)): + return NotImplemented + + # scalar others + elif other is NaT: + result = self._add_nat() + elif isinstance(other, (Tick, timedelta, np.timedelta64)): + result = self._add_delta(other) + elif isinstance(other, DateOffset): + # specifically _not_ a Tick + result = self._add_offset(other) + elif isinstance(other, (datetime, np.datetime64)): + result = self._add_datelike(other) + elif lib.is_integer(other): + # This check must come after the check for np.timedelta64 + # as is_integer returns True for these + result = self.shift(other) + + # array-like others + elif is_timedelta64_dtype(other): + # TimedeltaIndex, ndarray[timedelta64] + result = self._add_delta(other) + elif is_offsetlike(other): + # Array/Index of DateOffset objects + result = self._addsub_offset_array(other, operator.add) + elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other): + # DatetimeIndex, ndarray[datetime64] + return self._add_datelike(other) + elif is_integer_dtype(other): + result = self._addsub_int_array(other, operator.add) + elif is_float_dtype(other) or is_period_dtype(other): + # Explicitly catch invalid dtypes + raise TypeError("cannot add {dtype}-dtype to {cls}" + .format(dtype=other.dtype, + cls=type(self).__name__)) + elif is_extension_array_dtype(other): + # Categorical op will raise; defer explicitly + return NotImplemented + else: # pragma: no cover + return NotImplemented + + return result + + cls.__add__ = __add__ + + def __radd__(self, other): + # alias for __add__ + return self.__add__(other) + cls.__radd__ = __radd__ + + def __sub__(self, other): + other = lib.item_from_zerodim(other) + if isinstance(other, (ABCSeries, ABCDataFrame)): + return NotImplemented + + # scalar others + elif other is NaT: + result = self._sub_nat() + elif isinstance(other, (Tick, timedelta, np.timedelta64)): + result = self._add_delta(-other) + elif isinstance(other, DateOffset): + # specifically _not_ a Tick + result = self._add_offset(-other) + elif isinstance(other, (datetime, np.datetime64)): + result = self._sub_datelike(other) + elif lib.is_integer(other): + # This check must come after the check for np.timedelta64 + # as is_integer returns True for these + result = self.shift(-other) + elif isinstance(other, Period): + result = self._sub_period(other) + + # array-like others + elif is_timedelta64_dtype(other): + # TimedeltaIndex, ndarray[timedelta64] + result = self._add_delta(-other) + elif is_offsetlike(other): + # Array/Index of DateOffset objects + result = self._addsub_offset_array(other, operator.sub) + elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other): + # DatetimeIndex, ndarray[datetime64] + result = self._sub_datelike(other) + elif is_period_dtype(other): + # PeriodIndex + result = self._sub_period_array(other) + elif is_integer_dtype(other): + result = self._addsub_int_array(other, operator.sub) + elif isinstance(other, ABCIndexClass): + raise TypeError("cannot subtract {cls} and {typ}" + .format(cls=type(self).__name__, + typ=type(other).__name__)) + elif is_float_dtype(other): + # Explicitly catch invalid dtypes + raise TypeError("cannot subtract {dtype}-dtype from {cls}" + .format(dtype=other.dtype, + cls=type(self).__name__)) + elif is_extension_array_dtype(other): + # Categorical op will raise; defer explicitly + return NotImplemented + else: # pragma: no cover + return NotImplemented + + return result + + cls.__sub__ = __sub__ + + def __rsub__(self, other): + if is_datetime64_dtype(other) and is_timedelta64_dtype(self): + # ndarray[datetime64] cannot be subtracted from self, so + # we need to wrap in DatetimeArray/Index and flip the operation + if not isinstance(other, DatetimeLikeArrayMixin): + # Avoid down-casting DatetimeIndex + from pandas.core.arrays import DatetimeArrayMixin + other = DatetimeArrayMixin(other) + return other - self + elif (is_datetime64_any_dtype(self) and hasattr(other, 'dtype') and + not is_datetime64_any_dtype(other)): + # GH#19959 datetime - datetime is well-defined as timedelta, + # but any other type - datetime is not well-defined. + raise TypeError("cannot subtract {cls} from {typ}" + .format(cls=type(self).__name__, + typ=type(other).__name__)) + return -(self - other) + cls.__rsub__ = __rsub__ + + def __iadd__(self, other): + # alias for __add__ + return self.__add__(other) + cls.__iadd__ = __iadd__ + + def __isub__(self, other): + # alias for __sub__ + return self.__sub__(other) + cls.__isub__ = __isub__ + # -------------------------------------------------------------- # Comparison Methods diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 29f97b344f267..00d53ad82b2dc 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from datetime import datetime, timedelta +from datetime import datetime, timedelta, time import warnings import numpy as np @@ -8,11 +8,12 @@ from pandas._libs import tslib from pandas._libs.tslib import Timestamp, NaT, iNaT from pandas._libs.tslibs import ( + normalize_date, conversion, fields, timezones, resolution as libresolution) from pandas.util._decorators import cache_readonly -from pandas.errors import PerformanceWarning +from pandas.errors import PerformanceWarning, AbstractMethodError from pandas import compat from pandas.core.dtypes.common import ( @@ -30,11 +31,14 @@ from pandas.core.algorithms import checked_add_with_arr from pandas.tseries.frequencies import to_offset -from pandas.tseries.offsets import Tick +from pandas.tseries.offsets import Tick, Day, generate_range from pandas.core.arrays import datetimelike as dtl +_midnight = time(0, 0) + + def _to_m8(key, tz=None): """ Timestamp-like => dt64 @@ -177,13 +181,16 @@ def _simple_new(cls, values, freq=None, tz=None, **kwargs): result._tz = timezones.tz_standardize(tz) return result - def __new__(cls, values, freq=None, tz=None): + def __new__(cls, values, freq=None, tz=None, dtype=None): if tz is None and hasattr(values, 'tz'): # e.g. DatetimeIndex tz = values.tz freq, freq_infer = dtl.maybe_infer_freq(freq) + # if dtype has an embedded tz, capture it + tz = dtl.validate_tz_from_dtype(dtype, tz) + result = cls._simple_new(values, freq=freq, tz=tz) if freq_infer: inferred = result.inferred_freq @@ -194,6 +201,117 @@ def __new__(cls, values, freq=None, tz=None): # constructor, this does not call _deepcopy_if_needed return result + @classmethod + def _generate_range(cls, start, end, periods, freq, tz=None, + normalize=False, ambiguous='raise', closed=None): + if com.count_not_none(start, end, periods, freq) != 3: + raise ValueError('Of the four parameters: start, end, periods, ' + 'and freq, exactly three must be specified') + freq = to_offset(freq) + + if start is not None: + start = Timestamp(start) + + if end is not None: + end = Timestamp(end) + + if start is None and end is None: + if closed is not None: + raise ValueError("Closed has to be None if not both of start" + "and end are defined") + + left_closed, right_closed = dtl.validate_endpoints(closed) + + start, end, _normalized = _maybe_normalize_endpoints(start, end, + normalize) + + tz, inferred_tz = _infer_tz_from_endpoints(start, end, tz) + + if hasattr(freq, 'delta') and freq != Day(): + # sub-Day Tick + if inferred_tz is None and tz is not None: + # naive dates + if start is not None and start.tz is None: + start = start.tz_localize(tz, ambiguous=False) + + if end is not None and end.tz is None: + end = end.tz_localize(tz, ambiguous=False) + + if start and end: + if start.tz is None and end.tz is not None: + start = start.tz_localize(end.tz, ambiguous=False) + + if end.tz is None and start.tz is not None: + end = end.tz_localize(start.tz, ambiguous=False) + + if cls._use_cached_range(freq, _normalized, start, end): + index = cls._cached_range(start, end, periods=periods, + freq=freq) + else: + index = _generate_regular_range(cls, start, end, periods, freq) + + else: + + if tz is not None: + # naive dates + if start is not None and start.tz is not None: + start = start.replace(tzinfo=None) + + if end is not None and end.tz is not None: + end = end.replace(tzinfo=None) + + if start and end: + if start.tz is None and end.tz is not None: + end = end.replace(tzinfo=None) + + if end.tz is None and start.tz is not None: + start = start.replace(tzinfo=None) + + if freq is not None: + if cls._use_cached_range(freq, _normalized, start, end): + index = cls._cached_range(start, end, periods=periods, + freq=freq) + else: + index = _generate_regular_range(cls, start, end, + periods, freq) + + if tz is not None and getattr(index, 'tz', None) is None: + arr = conversion.tz_localize_to_utc( + ensure_int64(index.values), + tz, ambiguous=ambiguous) + + index = cls(arr) + + # index is localized datetime64 array -> have to convert + # start/end as well to compare + if start is not None: + start = start.tz_localize(tz).asm8 + if end is not None: + end = end.tz_localize(tz).asm8 + else: + # Create a linearly spaced date_range in local time + start = start.tz_localize(tz) + end = end.tz_localize(tz) + arr = np.linspace(start.value, end.value, periods) + index = cls._simple_new(arr.astype('M8[ns]'), freq=None, tz=tz) + + if not left_closed and len(index) and index[0] == start: + index = index[1:] + if not right_closed and len(index) and index[-1] == end: + index = index[:-1] + + return cls._simple_new(index.values, freq=freq, tz=tz) + + @classmethod + def _use_cached_range(cls, freq, _normalized, start, end): + # DatetimeArray is mutable, so is not cached + return False + + @classmethod + def _cached_range(cls, start=None, end=None, + periods=None, freq=None, **kwargs): + raise AbstractMethodError(cls) + # ----------------------------------------------------------------- # Descriptive Properties @@ -1085,3 +1203,109 @@ def to_julian_date(self): DatetimeArrayMixin._add_comparison_ops() +DatetimeArrayMixin._add_datetimelike_methods() + + +def _generate_regular_range(cls, start, end, periods, freq): + if isinstance(freq, Tick): + stride = freq.nanos + if periods is None: + b = Timestamp(start).value + # cannot just use e = Timestamp(end) + 1 because arange breaks when + # stride is too large, see GH10887 + e = (b + (Timestamp(end).value - b) // stride * stride + + stride // 2 + 1) + # end.tz == start.tz by this point due to _generate implementation + tz = start.tz + elif start is not None: + b = Timestamp(start).value + e = b + np.int64(periods) * stride + tz = start.tz + elif end is not None: + e = Timestamp(end).value + stride + b = e - np.int64(periods) * stride + tz = end.tz + else: + raise ValueError("at least 'start' or 'end' should be specified " + "if a 'period' is given.") + + data = np.arange(b, e, stride, dtype=np.int64) + data = cls._simple_new(data.view(_NS_DTYPE), None, tz=tz) + else: + tz = None + if isinstance(start, Timestamp): + tz = start.tz + start = start.to_pydatetime() + + if isinstance(end, Timestamp): + tz = end.tz + end = end.to_pydatetime() + + xdr = generate_range(start=start, end=end, + periods=periods, offset=freq) + + values = np.array([x.value for x in xdr]) + data = cls._simple_new(values, freq=freq, tz=tz) + + return data + + +def _infer_tz_from_endpoints(start, end, tz): + """ + If a timezone is not explicitly given via `tz`, see if one can + be inferred from the `start` and `end` endpoints. If more than one + of these inputs provides a timezone, require that they all agree. + + Parameters + ---------- + start : Timestamp + end : Timestamp + tz : tzinfo or None + + Returns + ------- + tz : tzinfo or None + inferred_tz : tzinfo or None + + Raises + ------ + TypeError : if start and end timezones do not agree + """ + try: + inferred_tz = timezones.infer_tzinfo(start, end) + except Exception: + raise TypeError('Start and end cannot both be tz-aware with ' + 'different timezones') + + inferred_tz = timezones.maybe_get_tz(inferred_tz) + tz = timezones.maybe_get_tz(tz) + + if tz is not None and inferred_tz is not None: + if not timezones.tz_compare(inferred_tz, tz): + raise AssertionError("Inferred time zone not equal to passed " + "time zone") + + elif inferred_tz is not None: + tz = inferred_tz + + return tz, inferred_tz + + +def _maybe_normalize_endpoints(start, end, normalize): + _normalized = True + + if start is not None: + if normalize: + start = normalize_date(start) + _normalized = True + else: + _normalized = _normalized and start.time() == _midnight + + if end is not None: + if normalize: + end = normalize_date(end) + _normalized = True + else: + _normalized = _normalized and end.time() == _midnight + + return start, end, _normalized diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 9c98f73312dbf..481d5313f0e25 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -386,6 +386,7 @@ def _maybe_convert_timedelta(self, other): PeriodArrayMixin._add_comparison_ops() +PeriodArrayMixin._add_datetimelike_methods() # ------------------------------------------------------------------- diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index cc93644677463..df9e57cb5f0e1 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -4,7 +4,7 @@ import numpy as np from pandas._libs import tslibs -from pandas._libs.tslibs import Timedelta, NaT +from pandas._libs.tslibs import Timedelta, Timestamp, NaT, iNaT from pandas._libs.tslibs.fields import get_timedelta_field from pandas._libs.tslibs.timedeltas import array_to_timedelta64 @@ -16,6 +16,7 @@ from pandas.core.dtypes.missing import isna import pandas.core.common as com +from pandas.core.algorithms import checked_add_with_arr from pandas.tseries.offsets import Tick from pandas.tseries.frequencies import to_offset @@ -230,6 +231,36 @@ def _add_delta(self, delta): return type(self)(new_values, freq='infer') + def _add_datelike(self, other): + # adding a timedeltaindex to a datetimelike + from pandas.core.arrays import DatetimeArrayMixin + if isinstance(other, (DatetimeArrayMixin, np.ndarray)): + # if other is an ndarray, we assume it is datetime64-dtype + # defer to implementation in DatetimeIndex + if not isinstance(other, DatetimeArrayMixin): + other = DatetimeArrayMixin(other) + return other + self + else: + assert other is not NaT + other = Timestamp(other) + i8 = self.asi8 + result = checked_add_with_arr(i8, other.value, + arr_mask=self._isnan) + result = self._maybe_mask_results(result, fill_value=iNaT) + return DatetimeArrayMixin(result) + + def _addsub_offset_array(self, other, op): + # Add or subtract Array-like of DateOffset objects + try: + # TimedeltaIndex can only operate with a subset of DateOffset + # subclasses. Incompatible classes will raise AttributeError, + # which we re-raise as TypeError + return dtl.DatetimeLikeArrayMixin._addsub_offset_array(self, other, + op) + except AttributeError: + raise TypeError("Cannot add/subtract non-tick DateOffset to {cls}" + .format(cls=type(self).__name__)) + def _evaluate_with_timedelta_like(self, other, op): if isinstance(other, ABCSeries): # GH#19042 @@ -370,6 +401,7 @@ def f(x): TimedeltaArrayMixin._add_comparison_ops() +TimedeltaArrayMixin._add_datetimelike_methods() # --------------------------------------------------------------------- diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 20926ea5163af..f09fe8c8abdcf 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -274,6 +274,26 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, return IntervalIndex(data, dtype=dtype, name=name, copy=copy, closed=closed) + elif (is_datetime64_any_dtype(data) or + (dtype is not None and is_datetime64_any_dtype(dtype)) or + 'tz' in kwargs): + from pandas import DatetimeIndex + result = DatetimeIndex(data, copy=copy, name=name, + dtype=dtype, **kwargs) + if dtype is not None and is_dtype_equal(_o_dtype, dtype): + return Index(result.to_pydatetime(), dtype=_o_dtype) + else: + return result + + elif (is_timedelta64_dtype(data) or + (dtype is not None and is_timedelta64_dtype(dtype))): + from pandas import TimedeltaIndex + result = TimedeltaIndex(data, copy=copy, name=name, **kwargs) + if dtype is not None and _o_dtype == dtype: + return Index(result.to_pytimedelta(), dtype=_o_dtype) + else: + return result + # extension dtype elif is_extension_array_dtype(data) or is_extension_array_dtype(dtype): data = np.asarray(data) @@ -290,27 +310,6 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, # index-like elif isinstance(data, (np.ndarray, Index, ABCSeries)): - - if (is_datetime64_any_dtype(data) or - (dtype is not None and is_datetime64_any_dtype(dtype)) or - 'tz' in kwargs): - from pandas import DatetimeIndex - result = DatetimeIndex(data, copy=copy, name=name, - dtype=dtype, **kwargs) - if dtype is not None and is_dtype_equal(_o_dtype, dtype): - return Index(result.to_pydatetime(), dtype=_o_dtype) - else: - return result - - elif (is_timedelta64_dtype(data) or - (dtype is not None and is_timedelta64_dtype(dtype))): - from pandas import TimedeltaIndex - result = TimedeltaIndex(data, copy=copy, name=name, **kwargs) - if dtype is not None and _o_dtype == dtype: - return Index(result.to_pytimedelta(), dtype=_o_dtype) - else: - return result - if dtype is not None: try: diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 8f05a9a887830..3f8c07fe7cd21 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -3,8 +3,6 @@ Base and utility classes for tseries type pandas objects. """ import warnings -import operator -from datetime import datetime, timedelta from pandas import compat from pandas.compat.numpy import function as nv @@ -13,7 +11,6 @@ import numpy as np from pandas._libs import lib, iNaT, NaT -from pandas._libs.tslibs.period import Period from pandas._libs.tslibs.timestamps import round_ns from pandas.core.dtypes.common import ( @@ -24,32 +21,23 @@ is_list_like, is_scalar, is_bool_dtype, - is_offsetlike, is_categorical_dtype, is_datetime_or_timedelta_dtype, is_float_dtype, is_integer_dtype, is_object_dtype, - is_string_dtype, - is_datetime64_dtype, - is_datetime64tz_dtype, - is_datetime64_any_dtype, - is_period_dtype, - is_timedelta64_dtype) + is_string_dtype) from pandas.core.dtypes.generic import ( - ABCIndex, ABCSeries, ABCDataFrame, ABCPeriodIndex, ABCIndexClass) + ABCIndex, ABCSeries, ABCPeriodIndex, ABCIndexClass) from pandas.core.dtypes.missing import isna from pandas.core import common as com, algorithms, ops -from pandas.errors import NullFrequencyError import pandas.io.formats.printing as printing from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util._decorators import Appender, cache_readonly import pandas.core.dtypes.concat as _concat -import pandas.tseries.frequencies as frequencies -from pandas.tseries.offsets import Tick, DateOffset import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -206,30 +194,6 @@ def floor(self, freq): def ceil(self, freq): return self._round(freq, np.ceil) - @classmethod - def _validate_frequency(cls, index, freq, **kwargs): - """ - Validate that a frequency is compatible with the values of a given - DatetimeIndex or TimedeltaIndex - - Parameters - ---------- - index : DatetimeIndex or TimedeltaIndex - The index on which to determine if the given frequency is valid - freq : DateOffset - The frequency to validate - """ - inferred = index.inferred_freq - if index.empty or inferred == freq.freqstr: - return None - - on_freq = cls._generate_range( - index[0], None, len(index), None, freq, **kwargs) - if not np.array_equal(index.asi8, on_freq.asi8): - msg = ('Inferred frequency {infer} from passed values does not ' - 'conform to passed frequency {passed}') - raise ValueError(msg.format(infer=inferred, passed=freq.freqstr)) - class DatetimeIndexOpsMixin(DatetimeLikeArrayMixin): """ common ops mixin to support a unified interface datetimelike Index """ @@ -584,56 +548,9 @@ def _add_datetimelike_methods(cls): """ def __add__(self, other): - other = lib.item_from_zerodim(other) - if isinstance(other, (ABCSeries, ABCDataFrame)): - return NotImplemented - - # scalar others - elif other is NaT: - result = self._add_nat() - elif isinstance(other, (Tick, timedelta, np.timedelta64)): - result = self._add_delta(other) - elif isinstance(other, DateOffset): - # specifically _not_ a Tick - result = self._add_offset(other) - elif isinstance(other, (datetime, np.datetime64)): - result = self._add_datelike(other) - elif is_integer(other): - # This check must come after the check for np.timedelta64 - # as is_integer returns True for these - result = self.shift(other) - - # array-like others - elif is_timedelta64_dtype(other): - # TimedeltaIndex, ndarray[timedelta64] - result = self._add_delta(other) - elif is_offsetlike(other): - # Array/Index of DateOffset objects - result = self._addsub_offset_array(other, operator.add) - elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other): - # DatetimeIndex, ndarray[datetime64] - return self._add_datelike(other) - elif is_integer_dtype(other): - result = self._addsub_int_array(other, operator.add) - elif is_float_dtype(other) or is_period_dtype(other): - # Explicitly catch invalid dtypes - raise TypeError("cannot add {dtype}-dtype to {cls}" - .format(dtype=other.dtype, - cls=type(self).__name__)) - elif is_categorical_dtype(other): - # Categorical op will raise; defer explicitly - return NotImplemented - else: # pragma: no cover - return NotImplemented - - if result is NotImplemented: - return NotImplemented - elif not isinstance(result, Index): - # Index.__new__ will choose appropriate subclass for dtype - result = Index(result) - res_name = ops.get_op_result_name(self, other) - result.name = res_name - return result + # dispatch to ExtensionArray implementation + result = super(cls, self).__add__(other) + return wrap_arithmetic_op(self, other, result) cls.__add__ = __add__ @@ -643,95 +560,17 @@ def __radd__(self, other): cls.__radd__ = __radd__ def __sub__(self, other): - from pandas import Index - - other = lib.item_from_zerodim(other) - if isinstance(other, (ABCSeries, ABCDataFrame)): - return NotImplemented - - # scalar others - elif other is NaT: - result = self._sub_nat() - elif isinstance(other, (Tick, timedelta, np.timedelta64)): - result = self._add_delta(-other) - elif isinstance(other, DateOffset): - # specifically _not_ a Tick - result = self._add_offset(-other) - elif isinstance(other, (datetime, np.datetime64)): - result = self._sub_datelike(other) - elif is_integer(other): - # This check must come after the check for np.timedelta64 - # as is_integer returns True for these - result = self.shift(-other) - elif isinstance(other, Period): - result = self._sub_period(other) - - # array-like others - elif is_timedelta64_dtype(other): - # TimedeltaIndex, ndarray[timedelta64] - result = self._add_delta(-other) - elif is_offsetlike(other): - # Array/Index of DateOffset objects - result = self._addsub_offset_array(other, operator.sub) - elif is_datetime64_dtype(other) or is_datetime64tz_dtype(other): - # DatetimeIndex, ndarray[datetime64] - result = self._sub_datelike(other) - elif is_period_dtype(other): - # PeriodIndex - result = self._sub_period_array(other) - elif is_integer_dtype(other): - result = self._addsub_int_array(other, operator.sub) - elif isinstance(other, Index): - raise TypeError("cannot subtract {cls} and {typ}" - .format(cls=type(self).__name__, - typ=type(other).__name__)) - elif is_float_dtype(other): - # Explicitly catch invalid dtypes - raise TypeError("cannot subtract {dtype}-dtype from {cls}" - .format(dtype=other.dtype, - cls=type(self).__name__)) - elif is_categorical_dtype(other): - # Categorical op will raise; defer explicitly - return NotImplemented - else: # pragma: no cover - return NotImplemented - - if result is NotImplemented: - return NotImplemented - elif not isinstance(result, Index): - # Index.__new__ will choose appropriate subclass for dtype - result = Index(result) - res_name = ops.get_op_result_name(self, other) - result.name = res_name - return result + # dispatch to ExtensionArray implementation + result = super(cls, self).__sub__(other) + return wrap_arithmetic_op(self, other, result) cls.__sub__ = __sub__ def __rsub__(self, other): - if is_datetime64_dtype(other) and is_timedelta64_dtype(self): - # ndarray[datetime64] cannot be subtracted from self, so - # we need to wrap in DatetimeIndex and flip the operation - from pandas import DatetimeIndex - return DatetimeIndex(other) - self - elif (is_datetime64_any_dtype(self) and hasattr(other, 'dtype') and - not is_datetime64_any_dtype(other)): - # GH#19959 datetime - datetime is well-defined as timedelta, - # but any other type - datetime is not well-defined. - raise TypeError("cannot subtract {cls} from {typ}" - .format(cls=type(self).__name__, - typ=type(other).__name__)) - return -(self - other) - cls.__rsub__ = __rsub__ + result = super(cls, self).__rsub__(other) + return wrap_arithmetic_op(self, other, result) - def __iadd__(self, other): - # alias for __add__ - return self.__add__(other) - cls.__iadd__ = __iadd__ - - def __isub__(self, other): - # alias for __sub__ - return self.__sub__(other) - cls.__isub__ = __isub__ + cls.__rsub__ = __rsub__ def isin(self, values): """ @@ -754,44 +593,6 @@ def isin(self, values): return algorithms.isin(self.asi8, values.asi8) - def shift(self, n, freq=None): - """ - Specialized shift which produces a DatetimeIndex - - Parameters - ---------- - n : int - Periods to shift by - freq : DateOffset or timedelta-like, optional - - Returns - ------- - shifted : DatetimeIndex - """ - if freq is not None and freq != self.freq: - if isinstance(freq, compat.string_types): - freq = frequencies.to_offset(freq) - offset = n * freq - result = self + offset - - if hasattr(self, 'tz'): - result._tz = self.tz - - return result - - if n == 0: - # immutable so OK - return self - - if self.freq is None: - raise NullFrequencyError("Cannot shift with no freq") - - start = self[0] + n * self.freq - end = self[-1] + n * self.freq - attribs = self._get_attributes_dict() - return self._generate_range(start=start, end=end, periods=None, - **attribs) - def repeat(self, repeats, *args, **kwargs): """ Analogous to ndarray.repeat @@ -896,3 +697,16 @@ def _ensure_datetimelike_to_i8(other): # period array cannot be coerces to int other = Index(other).asi8 return other + + +def wrap_arithmetic_op(self, other, result): + if result is NotImplemented: + return NotImplemented + + if not isinstance(result, Index): + # Index.__new__ will choose appropriate subclass for dtype + result = Index(result) + + res_name = ops.get_op_result_name(self, other) + result.name = res_name + return result diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 933e7406b5af3..3ee91a106f36b 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -40,7 +40,7 @@ from pandas.core.indexes.datetimelike import ( DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin) from pandas.tseries.offsets import ( - generate_range, Tick, CDay, prefix_mapping) + generate_range, CDay, prefix_mapping) from pandas.core.tools.timedeltas import to_timedelta from pandas.util._decorators import ( @@ -49,7 +49,7 @@ import pandas.tseries.offsets as offsets import pandas.core.tools.datetimes as tools -from pandas._libs import (lib, index as libindex, tslibs, tslib as libts, +from pandas._libs import (lib, index as libindex, tslib as libts, join as libjoin, Timestamp) from pandas._libs.tslibs import (timezones, conversion, fields, parsing, ccalendar) @@ -98,9 +98,6 @@ def wrapper(self, other): return compat.set_function_name(wrapper, opname, cls) -_midnight = time(0, 0) - - def _new_DatetimeIndex(cls, d): """ This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__ """ @@ -323,7 +320,7 @@ def __new__(cls, data=None, data = tools.to_datetime(data, dayfirst=dayfirst, yearfirst=yearfirst) - if isinstance(data, DatetimeIndex): + if isinstance(data, DatetimeArrayMixin): if tz is None: tz = data.tz elif data.tz is None: @@ -375,135 +372,19 @@ def __new__(cls, data=None, return subarr._deepcopy_if_needed(ref_to_data, copy) @classmethod - def _generate_range(cls, start, end, periods, name, freq, tz=None, - normalize=False, ambiguous='raise', closed=None): - if com.count_not_none(start, end, periods, freq) != 3: - raise ValueError('Of the four parameters: start, end, periods, ' - 'and freq, exactly three must be specified') - - _normalized = True - - if start is not None: - start = Timestamp(start) - - if end is not None: - end = Timestamp(end) - - if start is None and end is None: - if closed is not None: - raise ValueError("Closed has to be None if not both of start" - "and end are defined") - - left_closed, right_closed = dtl.validate_endpoints(closed) - - try: - inferred_tz = timezones.infer_tzinfo(start, end) - except Exception: - raise TypeError('Start and end cannot both be tz-aware with ' - 'different timezones') - - inferred_tz = timezones.maybe_get_tz(inferred_tz) - tz = timezones.maybe_get_tz(tz) - - if tz is not None and inferred_tz is not None: - if not timezones.tz_compare(inferred_tz, tz): - raise AssertionError("Inferred time zone not equal to passed " - "time zone") - - elif inferred_tz is not None: - tz = inferred_tz - - if start is not None: - if normalize: - start = tslibs.normalize_date(start) - _normalized = True - else: - _normalized = _normalized and start.time() == _midnight - - if end is not None: - if normalize: - end = tslibs.normalize_date(end) - _normalized = True - else: - _normalized = _normalized and end.time() == _midnight - - if hasattr(freq, 'delta') and freq != offsets.Day(): - if inferred_tz is None and tz is not None: - # naive dates - if start is not None and start.tz is None: - start = start.tz_localize(tz, ambiguous=False) - - if end is not None and end.tz is None: - end = end.tz_localize(tz, ambiguous=False) - - if start and end: - if start.tz is None and end.tz is not None: - start = start.tz_localize(end.tz, ambiguous=False) + @Appender(DatetimeArrayMixin._generate_range.__doc__) + def _generate_range(cls, start, end, periods, name=None, freq=None, + tz=None, normalize=False, ambiguous='raise', + closed=None): + out = super(DatetimeIndex, cls)._generate_range( + start, end, periods, freq, + tz=tz, normalize=normalize, ambiguous=ambiguous, closed=closed) + out.name = name + return out - if end.tz is None and start.tz is not None: - end = end.tz_localize(start.tz, ambiguous=False) - - if _use_cached_range(freq, _normalized, start, end): - index = cls._cached_range(start, end, periods=periods, - freq=freq, name=name) - else: - index = _generate_regular_range(cls, start, end, periods, freq) - - else: - - if tz is not None: - # naive dates - if start is not None and start.tz is not None: - start = start.replace(tzinfo=None) - - if end is not None and end.tz is not None: - end = end.replace(tzinfo=None) - - if start and end: - if start.tz is None and end.tz is not None: - end = end.replace(tzinfo=None) - - if end.tz is None and start.tz is not None: - start = start.replace(tzinfo=None) - - if freq is not None: - if _use_cached_range(freq, _normalized, start, end): - index = cls._cached_range(start, end, periods=periods, - freq=freq, name=name) - else: - index = _generate_regular_range(cls, start, end, - periods, freq) - - if tz is not None and getattr(index, 'tz', None) is None: - arr = conversion.tz_localize_to_utc(ensure_int64(index), - tz, - ambiguous=ambiguous) - - index = cls(arr) - - # index is localized datetime64 array -> have to convert - # start/end as well to compare - if start is not None: - start = start.tz_localize(tz).asm8 - if end is not None: - end = end.tz_localize(tz).asm8 - else: - # Create a linearly spaced date_range in local time - start = start.tz_localize(tz) - end = end.tz_localize(tz) - index = tools.to_datetime(np.linspace(start.value, - end.value, periods), - utc=True) - index = index.tz_convert(tz) - - if not left_closed and len(index) and index[0] == start: - index = index[1:] - if not right_closed and len(index) and index[-1] == end: - index = index[:-1] - - index = cls._simple_new(index.values, name=name, freq=freq, tz=tz) - - return index + @classmethod + def _use_cached_range(cls, freq, _normalized, start, end): + return _use_cached_range(freq, _normalized, start, end) def _convert_for_op(self, value): """ Convert value to be insertable to ndarray """ @@ -1685,48 +1566,6 @@ def to_julian_date(self): DatetimeIndex._add_datetimelike_methods() -def _generate_regular_range(cls, start, end, periods, freq): - if isinstance(freq, Tick): - stride = freq.nanos - if periods is None: - b = Timestamp(start).value - # cannot just use e = Timestamp(end) + 1 because arange breaks when - # stride is too large, see GH10887 - e = (b + (Timestamp(end).value - b) // stride * stride + - stride // 2 + 1) - # end.tz == start.tz by this point due to _generate implementation - tz = start.tz - elif start is not None: - b = Timestamp(start).value - e = b + np.int64(periods) * stride - tz = start.tz - elif end is not None: - e = Timestamp(end).value + stride - b = e - np.int64(periods) * stride - tz = end.tz - else: - raise ValueError("at least 'start' or 'end' should be specified " - "if a 'period' is given.") - - data = np.arange(b, e, stride, dtype=np.int64) - data = cls._simple_new(data.view(_NS_DTYPE), None, tz=tz) - else: - if isinstance(start, Timestamp): - start = start.to_pydatetime() - - if isinstance(end, Timestamp): - end = end.to_pydatetime() - - xdr = generate_range(start=start, end=end, - periods=periods, offset=freq) - - dates = list(xdr) - # utc = len(dates) > 0 and dates[0].tzinfo is not None - data = tools.to_datetime(dates) - - return data - - def date_range(start=None, end=None, periods=None, freq=None, tz=None, normalize=False, name=None, closed=None, **kwargs): """ diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 006758f276f87..9f14d4cfd5863 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -14,7 +14,6 @@ pandas_dtype, ensure_int64) from pandas.core.dtypes.missing import isna -from pandas.core.dtypes.generic import ABCSeries from pandas.core.arrays.timedeltas import ( TimedeltaArrayMixin, _is_convertible_to_td, _to_m8) @@ -25,18 +24,17 @@ import pandas.compat as compat from pandas.tseries.frequencies import to_offset -from pandas.core.algorithms import checked_add_with_arr from pandas.core.base import _shared_docs from pandas.core.indexes.base import _index_shared_docs import pandas.core.common as com import pandas.core.dtypes.concat as _concat from pandas.util._decorators import Appender, Substitution, deprecate_kwarg from pandas.core.indexes.datetimelike import ( - TimelikeOps, DatetimeIndexOpsMixin) + TimelikeOps, DatetimeIndexOpsMixin, wrap_arithmetic_op) from pandas.core.tools.timedeltas import ( to_timedelta, _coerce_scalar_to_timedelta_type) from pandas._libs import (lib, index as libindex, - join as libjoin, Timedelta, NaT, iNaT) + join as libjoin, Timedelta, NaT) def _wrap_field_accessor(name): @@ -197,11 +195,10 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, if unit is not None: data = to_timedelta(data, unit=unit, box=False) - if not isinstance(data, (np.ndarray, Index, ABCSeries)): - if is_scalar(data): - raise ValueError('TimedeltaIndex() must be called with a ' - 'collection of some kind, %s was passed' - % repr(data)) + if is_scalar(data): + raise ValueError('TimedeltaIndex() must be called with a ' + 'collection of some kind, {data} was passed' + .format(data=repr(data))) # convert if not already if getattr(data, 'dtype', None) != _TD_DTYPE: @@ -223,7 +220,8 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, return subarr @classmethod - def _generate_range(cls, start, end, periods, name, freq, closed=None): + def _generate_range(cls, start, end, periods, + name=None, freq=None, closed=None): # TimedeltaArray gets `name` via **kwargs, so we need to explicitly # override it if name is passed as a positional argument return super(TimedeltaIndex, cls)._generate_range(start, end, @@ -262,37 +260,7 @@ def _maybe_update_attributes(self, attrs): def _evaluate_with_timedelta_like(self, other, op): result = TimedeltaArrayMixin._evaluate_with_timedelta_like(self, other, op) - if result is NotImplemented: - return NotImplemented - return Index(result, name=self.name, copy=False) - - def _add_datelike(self, other): - # adding a timedeltaindex to a datetimelike - from pandas import Timestamp, DatetimeIndex - if isinstance(other, (DatetimeIndex, np.ndarray)): - # if other is an ndarray, we assume it is datetime64-dtype - # defer to implementation in DatetimeIndex - other = DatetimeIndex(other) - return other + self - else: - assert other is not NaT - other = Timestamp(other) - i8 = self.asi8 - result = checked_add_with_arr(i8, other.value, - arr_mask=self._isnan) - result = self._maybe_mask_results(result, fill_value=iNaT) - return DatetimeIndex(result) - - def _addsub_offset_array(self, other, op): - # Add or subtract Array-like of DateOffset objects - try: - # TimedeltaIndex can only operate with a subset of DateOffset - # subclasses. Incompatible classes will raise AttributeError, - # which we re-raise as TypeError - return DatetimeIndexOpsMixin._addsub_offset_array(self, other, op) - except AttributeError: - raise TypeError("Cannot add/subtract non-tick DateOffset to {cls}" - .format(cls=type(self).__name__)) + return wrap_arithmetic_op(self, other, result) def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs): from pandas.io.formats.format import Timedelta64Formatter diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 69e802fbaa3f0..24f34884dc077 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -16,6 +16,11 @@ def test_from_dti(self, tz_naive_fixture): arr = DatetimeArrayMixin(dti) assert list(dti) == list(arr) + # Check that Index.__new__ knows what to do with DatetimeArray + dti2 = pd.Index(arr) + assert isinstance(dti2, pd.DatetimeIndex) + assert list(dti2) == list(arr) + def test_astype_object(self, tz_naive_fixture): tz = tz_naive_fixture dti = pd.date_range('2016-01-01', periods=3, tz=tz) @@ -32,6 +37,11 @@ def test_from_tdi(self): arr = TimedeltaArrayMixin(tdi) assert list(arr) == list(tdi) + # Check that Index.__new__ knows what to do with TimedeltaArray + tdi2 = pd.Index(arr) + assert isinstance(tdi2, pd.TimedeltaIndex) + assert list(tdi2) == list(arr) + def test_astype_object(self): tdi = pd.TimedeltaIndex(['1 Day', '3 Hours']) arr = TimedeltaArrayMixin(tdi) @@ -48,6 +58,11 @@ def test_from_pi(self): arr = PeriodArrayMixin(pi) assert list(arr) == list(pi) + # Check that Index.__new__ knows what to do with TimedeltaArray + pi2 = pd.Index(arr) + assert isinstance(pi2, pd.PeriodIndex) + assert list(pi2) == list(arr) + def test_astype_object(self): pi = pd.period_range('2016', freq='Q', periods=3) arr = PeriodArrayMixin(pi)
With the DatetimeArray range-generating functions moved, we are finally able to move `shift`, and in turn `__add__`, `__sub__`, etc. Two non-trivial changes made during the move process, see inline comments. Upcoming commits will add docstrings and port tests. ATM many tests are a PITA bc the Array contructors don't know how to handle lists of strings. Trying to find a way to implement that in `__new__` without having it become a total mess.
https://api.github.com/repos/pandas-dev/pandas/pulls/22016
2018-07-21T21:00:40Z
2018-07-26T12:49:40Z
2018-07-26T12:49:40Z
2018-07-26T21:13:14Z
ENH: Number formatting support for excel styles
diff --git a/doc/source/style.ipynb b/doc/source/style.ipynb index 152ca90049bf1..6f66c1a9bf7f9 100644 --- a/doc/source/style.ipynb +++ b/doc/source/style.ipynb @@ -985,7 +985,10 @@ "- `vertical-align`\n", "- `white-space: nowrap`\n", "\n", - "Only CSS2 named colors and hex colors of the form `#rgb` or `#rrggbb` are currently supported." + "Only CSS2 named colors and hex colors of the form `#rgb` or `#rrggbb` are currently supported.\n", + "\n", + "The following pseudo CSS properties are also available to set excel specific style properties:\n", + "- `number-format`\n" ] }, { diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 973b75f0e1451..137fd5aafe5bd 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -372,6 +372,7 @@ Other API Changes - Trying to reindex a ``DataFrame`` with a non unique ``MultiIndex`` now raises a ``ValueError`` instead of an ``Exception`` (:issue:`21770`) - :meth:`PeriodIndex.tz_convert` and :meth:`PeriodIndex.tz_localize` have been removed (:issue:`21781`) - :class:`Index` subtraction will attempt to operate element-wise instead of raising ``TypeError`` (:issue:`19369`) +- :class:`pandas.io.formats.style.Styler` supports a ``number-format`` property when using :meth:`~pandas.io.formats.style.Styler.to_excel` (:issue:`22015`) .. _whatsnew_0240.deprecations: diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index ec95ce7a970ad..0bc268bc18b95 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -98,8 +98,8 @@ def build_xlstyle(self, props): 'border': self.build_border(props), 'fill': self.build_fill(props), 'font': self.build_font(props), + 'number_format': self.build_number_format(props), } - # TODO: support number format # TODO: handle cell width and height: needs support in pandas.io.excel def remove_none(d): @@ -314,6 +314,9 @@ def color_to_excel(self, val): warnings.warn('Unhandled color format: {val!r}'.format(val=val), CSSWarning) + def build_number_format(self, props): + return {'format_code': props.get('number-format')} + class ExcelFormatter(object): """ diff --git a/pandas/tests/io/formats/test_to_excel.py b/pandas/tests/io/formats/test_to_excel.py index 2d691bf2c5d8e..9fc16c43f5c1d 100644 --- a/pandas/tests/io/formats/test_to_excel.py +++ b/pandas/tests/io/formats/test_to_excel.py @@ -172,6 +172,9 @@ {'alignment': {'wrap_text': False}}), ('white-space: normal', {'alignment': {'wrap_text': True}}), + # NUMBER FORMAT + ('number-format: 0%', + {'number_format': {'format_code': '0%'}}), ]) def test_css_to_excel(css, expected): convert = CSSToExcelConverter() diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index d1eab16e7c22c..e51780891534f 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -2241,6 +2241,7 @@ def style(df): ['', 'font-style: italic', ''], ['', '', 'text-align: right'], ['background-color: red', '', ''], + ['number-format: 0%', '', ''], ['', '', ''], ['', '', ''], ['', '', '']], @@ -2266,7 +2267,7 @@ def custom_converter(css): # Prepare spreadsheets - df = DataFrame(np.random.randn(10, 3)) + df = DataFrame(np.random.randn(11, 3)) with ensure_clean('.xlsx' if engine != 'xlwt' else '.xls') as path: writer = ExcelWriter(path, engine=engine) df.to_excel(writer, sheet_name='frame') @@ -2294,7 +2295,7 @@ def custom_converter(css): n_cells += 1 # ensure iteration actually happened: - assert n_cells == (10 + 1) * (3 + 1) + assert n_cells == (11 + 1) * (3 + 1) # (2) check styling with default converter @@ -2344,13 +2345,16 @@ def custom_converter(css): assert cell1.fill.patternType != cell2.fill.patternType assert cell2.fill.fgColor.rgb == alpha + 'FF0000' assert cell2.fill.patternType == 'solid' + elif ref == 'B9': + assert cell1.number_format == 'General' + assert cell2.number_format == '0%' else: assert_equal_style(cell1, cell2) assert cell1.value == cell2.value n_cells += 1 - assert n_cells == (10 + 1) * (3 + 1) + assert n_cells == (11 + 1) * (3 + 1) # (3) check styling with custom converter n_cells = 0 @@ -2359,7 +2363,7 @@ def custom_converter(css): assert len(col1) == len(col2) for cell1, cell2 in zip(col1, col2): ref = '%s%d' % (cell2.column, cell2.row) - if ref in ('B2', 'C3', 'D4', 'B5', 'C6', 'D7', 'B8'): + if ref in ('B2', 'C3', 'D4', 'B5', 'C6', 'D7', 'B8', 'B9'): assert not cell1.font.bold assert cell2.font.bold else: @@ -2368,7 +2372,7 @@ def custom_converter(css): assert cell1.value == cell2.value n_cells += 1 - assert n_cells == (10 + 1) * (3 + 1) + assert n_cells == (11 + 1) * (3 + 1) @td.skip_if_no('openpyxl')
- [x] closes #22027 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Add number formatting support to the excel styles using a fake css entry. ```css number-format: 0%; ``` Added new tests, updated docs, and manually verified outputs using both excel output engines that support styling.
https://api.github.com/repos/pandas-dev/pandas/pulls/22015
2018-07-21T20:36:24Z
2018-07-24T13:11:19Z
2018-07-24T13:11:18Z
2018-07-24T16:04:32Z
[REF] separate blocks.py out of internals.__init__
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index fde3aaa14ac5d..a4cd301806569 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -1,12 +1,7 @@ # -*- coding: utf-8 -*- -import warnings import copy -from warnings import catch_warnings -import inspect import itertools -import re import operator -from datetime import datetime, timedelta, date from collections import defaultdict from functools import partial @@ -17,3203 +12,51 @@ from pandas.core.base import PandasObject from pandas.core.dtypes.dtypes import ( - ExtensionDtype, DatetimeTZDtype, - PandasExtensionDtype, - CategoricalDtype) + ExtensionDtype, + PandasExtensionDtype) from pandas.core.dtypes.common import ( - _TD_DTYPE, _NS_DTYPE, - ensure_int64, ensure_platform_int, - is_integer, - is_dtype_equal, + _NS_DTYPE, + ensure_int64, is_timedelta64_dtype, - is_datetime64_dtype, is_datetimetz, is_sparse, - is_categorical, is_categorical_dtype, - is_integer_dtype, - is_datetime64tz_dtype, - is_bool_dtype, - is_object_dtype, + is_datetime64_dtype, is_datetimetz, + is_categorical_dtype, is_datetimelike_v_numeric, is_float_dtype, is_numeric_dtype, is_numeric_v_string_like, is_extension_type, is_extension_array_dtype, - is_list_like, - is_re, - is_re_compilable, is_scalar, - _get_dtype, - pandas_dtype) + _get_dtype) from pandas.core.dtypes.cast import ( - maybe_downcast_to_dtype, - maybe_upcast, maybe_promote, - infer_dtype_from, infer_dtype_from_scalar, - soft_convert_objects, - maybe_convert_objects, - astype_nansafe, - find_common_type, - maybe_infer_dtype_type) -from pandas.core.dtypes.missing import ( - isna, notna, array_equivalent, - _isna_compat, - is_null_datelike_scalar) + find_common_type) +from pandas.core.dtypes.missing import isna import pandas.core.dtypes.concat as _concat +from pandas.core.dtypes.generic import ABCSeries, ABCExtensionArray -from pandas.core.dtypes.generic import ( - ABCSeries, - ABCDatetimeIndex, - ABCExtensionArray, - ABCIndexClass) -import pandas.core.common as com import pandas.core.algorithms as algos from pandas.core.index import Index, MultiIndex, ensure_index -from pandas.core.indexing import maybe_convert_indices, check_setitem_lengths -from pandas.core.arrays import Categorical -from pandas.core.indexes.datetimes import DatetimeIndex -from pandas.core.indexes.timedeltas import TimedeltaIndex +from pandas.core.indexing import maybe_convert_indices from pandas.io.formats.printing import pprint_thing -import pandas.core.missing as missing -from pandas.core.sparse.array import _maybe_to_sparse, SparseArray -from pandas._libs import lib, tslib, tslibs -from pandas._libs.tslibs import conversion, Timedelta +from pandas.core.sparse.array import _maybe_to_sparse +from pandas._libs import lib, tslibs from pandas._libs.internals import BlockPlacement from pandas.util._decorators import cache_readonly from pandas.util._validators import validate_bool_kwarg -from pandas import compat from pandas.compat import range, map, zip, u - -class Block(PandasObject): - """ - Canonical n-dimensional unit of homogeneous dtype contained in a pandas - data structure - - Index-ignorant; let the container take care of that - """ - __slots__ = ['_mgr_locs', 'values', 'ndim'] - is_numeric = False - is_float = False - is_integer = False - is_complex = False - is_datetime = False - is_datetimetz = False - is_timedelta = False - is_bool = False - is_object = False - is_categorical = False - is_sparse = False - is_extension = False - _box_to_block_values = True - _can_hold_na = False - _can_consolidate = True - _verify_integrity = True - _validate_ndim = True - _ftype = 'dense' - _concatenator = staticmethod(np.concatenate) - - def __init__(self, values, placement, ndim=None): - self.ndim = self._check_ndim(values, ndim) - self.mgr_locs = placement - self.values = values - - if (self._validate_ndim and self.ndim and - len(self.mgr_locs) != len(self.values)): - raise ValueError( - 'Wrong number of items passed {val}, placement implies ' - '{mgr}'.format(val=len(self.values), mgr=len(self.mgr_locs))) - - def _check_ndim(self, values, ndim): - """ndim inference and validation. - - Infers ndim from 'values' if not provided to __init__. - Validates that values.ndim and ndim are consistent if and only if - the class variable '_validate_ndim' is True. - - Parameters - ---------- - values : array-like - ndim : int or None - - Returns - ------- - ndim : int - - Raises - ------ - ValueError : the number of dimensions do not match - """ - if ndim is None: - ndim = values.ndim - - if self._validate_ndim and values.ndim != ndim: - msg = ("Wrong number of dimensions. values.ndim != ndim " - "[{} != {}]") - raise ValueError(msg.format(values.ndim, ndim)) - - return ndim - - @property - def _holder(self): - """The array-like that can hold the underlying values. - - None for 'Block', overridden by subclasses that don't - use an ndarray. - """ - return None - - @property - def _consolidate_key(self): - return (self._can_consolidate, self.dtype.name) - - @property - def _is_single_block(self): - return self.ndim == 1 - - @property - def is_view(self): - """ return a boolean if I am possibly a view """ - return self.values.base is not None - - @property - def is_datelike(self): - """ return True if I am a non-datelike """ - return self.is_datetime or self.is_timedelta - - def is_categorical_astype(self, dtype): - """ - validate that we have a astypeable to categorical, - returns a boolean if we are a categorical - """ - if dtype is Categorical or dtype is CategoricalDtype: - # this is a pd.Categorical, but is not - # a valid type for astypeing - raise TypeError("invalid type {0} for astype".format(dtype)) - - elif is_categorical_dtype(dtype): - return True - - return False - - def external_values(self, dtype=None): - """ return an outside world format, currently just the ndarray """ - return self.values - - def internal_values(self, dtype=None): - """ return an internal format, currently just the ndarray - this should be the pure internal API format - """ - return self.values - - def formatting_values(self): - """Return the internal values used by the DataFrame/SeriesFormatter""" - return self.internal_values() - - def get_values(self, dtype=None): - """ - return an internal format, currently just the ndarray - this is often overridden to handle to_dense like operations - """ - if is_object_dtype(dtype): - return self.values.astype(object) - return self.values - - def to_dense(self): - return self.values.view() - - @property - def _na_value(self): - return np.nan - - @property - def fill_value(self): - return np.nan - - @property - def mgr_locs(self): - return self._mgr_locs - - @mgr_locs.setter - def mgr_locs(self, new_mgr_locs): - if not isinstance(new_mgr_locs, BlockPlacement): - new_mgr_locs = BlockPlacement(new_mgr_locs) - - self._mgr_locs = new_mgr_locs - - @property - def array_dtype(self): - """ the dtype to return if I want to construct this block as an - array - """ - return self.dtype - - def make_block(self, values, placement=None, ndim=None): - """ - Create a new block, with type inference propagate any values that are - not specified - """ - if placement is None: - placement = self.mgr_locs - if ndim is None: - ndim = self.ndim - - return make_block(values, placement=placement, ndim=ndim) - - def make_block_scalar(self, values): - """ - Create a ScalarBlock - """ - return ScalarBlock(values) - - def make_block_same_class(self, values, placement=None, ndim=None, - dtype=None): - """ Wrap given values in a block of same type as self. """ - if dtype is not None: - # issue 19431 fastparquet is passing this - warnings.warn("dtype argument is deprecated, will be removed " - "in a future release.", DeprecationWarning) - if placement is None: - placement = self.mgr_locs - return make_block(values, placement=placement, ndim=ndim, - klass=self.__class__, dtype=dtype) - - def __unicode__(self): - - # don't want to print out all of the items here - name = pprint_thing(self.__class__.__name__) - if self._is_single_block: - - result = '{name}: {len} dtype: {dtype}'.format( - name=name, len=len(self), dtype=self.dtype) - - else: - - shape = ' x '.join(pprint_thing(s) for s in self.shape) - result = '{name}: {index}, {shape}, dtype: {dtype}'.format( - name=name, index=pprint_thing(self.mgr_locs.indexer), - shape=shape, dtype=self.dtype) - - return result - - def __len__(self): - return len(self.values) - - def __getstate__(self): - return self.mgr_locs.indexer, self.values - - def __setstate__(self, state): - self.mgr_locs = BlockPlacement(state[0]) - self.values = state[1] - self.ndim = self.values.ndim - - def _slice(self, slicer): - """ return a slice of my values """ - return self.values[slicer] - - def reshape_nd(self, labels, shape, ref_items, mgr=None): - """ - Parameters - ---------- - labels : list of new axis labels - shape : new shape - ref_items : new ref_items - - return a new block that is transformed to a nd block - """ - return _block2d_to_blocknd(values=self.get_values().T, - placement=self.mgr_locs, shape=shape, - labels=labels, ref_items=ref_items) - - def getitem_block(self, slicer, new_mgr_locs=None): - """ - Perform __getitem__-like, return result as block. - - As of now, only supports slices that preserve dimensionality. - """ - if new_mgr_locs is None: - if isinstance(slicer, tuple): - axis0_slicer = slicer[0] - else: - axis0_slicer = slicer - new_mgr_locs = self.mgr_locs[axis0_slicer] - - new_values = self._slice(slicer) - - if self._validate_ndim and new_values.ndim != self.ndim: - raise ValueError("Only same dim slicing is allowed") - - return self.make_block_same_class(new_values, new_mgr_locs) - - @property - def shape(self): - return self.values.shape - - @property - def dtype(self): - return self.values.dtype - - @property - def ftype(self): - return "{dtype}:{ftype}".format(dtype=self.dtype, ftype=self._ftype) - - def merge(self, other): - return _merge_blocks([self, other]) - - def concat_same_type(self, to_concat, placement=None): - """ - Concatenate list of single blocks of the same type. - """ - values = self._concatenator([blk.values for blk in to_concat], - axis=self.ndim - 1) - return self.make_block_same_class( - values, placement=placement or slice(0, len(values), 1)) - - def iget(self, i): - return self.values[i] - - def set(self, locs, values, check=False): - """ - Modify Block in-place with new item value - - Returns - ------- - None - """ - self.values[locs] = values - - def delete(self, loc): - """ - Delete given loc(-s) from block in-place. - """ - self.values = np.delete(self.values, loc, 0) - self.mgr_locs = self.mgr_locs.delete(loc) - - def apply(self, func, mgr=None, **kwargs): - """ apply the function to my values; return a block if we are not - one - """ - with np.errstate(all='ignore'): - result = func(self.values, **kwargs) - if not isinstance(result, Block): - result = self.make_block(values=_block_shape(result, - ndim=self.ndim)) - - return result - - def fillna(self, value, limit=None, inplace=False, downcast=None, - mgr=None): - """ fillna on the block with the value. If we fail, then convert to - ObjectBlock and try again - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - - if not self._can_hold_na: - if inplace: - return self - else: - return self.copy() - - mask = isna(self.values) - if limit is not None: - if not is_integer(limit): - raise ValueError('Limit must be an integer') - if limit < 1: - raise ValueError('Limit must be greater than 0') - if self.ndim > 2: - raise NotImplementedError("number of dimensions for 'fillna' " - "is currently limited to 2") - mask[mask.cumsum(self.ndim - 1) > limit] = False - - # fillna, but if we cannot coerce, then try again as an ObjectBlock - try: - values, _, _, _ = self._try_coerce_args(self.values, value) - blocks = self.putmask(mask, value, inplace=inplace) - blocks = [b.make_block(values=self._try_coerce_result(b.values)) - for b in blocks] - return self._maybe_downcast(blocks, downcast) - except (TypeError, ValueError): - - # we can't process the value, but nothing to do - if not mask.any(): - return self if inplace else self.copy() - - # operate column-by-column - def f(m, v, i): - block = self.coerce_to_target_dtype(value) - - # slice out our block - if i is not None: - block = block.getitem_block(slice(i, i + 1)) - return block.fillna(value, - limit=limit, - inplace=inplace, - downcast=None) - - return self.split_and_operate(mask, f, inplace) - - def split_and_operate(self, mask, f, inplace): - """ - split the block per-column, and apply the callable f - per-column, return a new block for each. Handle - masking which will not change a block unless needed. - - Parameters - ---------- - mask : 2-d boolean mask - f : callable accepting (1d-mask, 1d values, indexer) - inplace : boolean - - Returns - ------- - list of blocks - """ - - if mask is None: - mask = np.ones(self.shape, dtype=bool) - new_values = self.values - - def make_a_block(nv, ref_loc): - if isinstance(nv, Block): - block = nv - elif isinstance(nv, list): - block = nv[0] - else: - # Put back the dimension that was taken from it and make - # a block out of the result. - try: - nv = _block_shape(nv, ndim=self.ndim) - except (AttributeError, NotImplementedError): - pass - block = self.make_block(values=nv, - placement=ref_loc) - return block - - # ndim == 1 - if self.ndim == 1: - if mask.any(): - nv = f(mask, new_values, None) - else: - nv = new_values if inplace else new_values.copy() - block = make_a_block(nv, self.mgr_locs) - return [block] - - # ndim > 1 - new_blocks = [] - for i, ref_loc in enumerate(self.mgr_locs): - m = mask[i] - v = new_values[i] - - # need a new block - if m.any(): - nv = f(m, v, i) - else: - nv = v if inplace else v.copy() - - block = make_a_block(nv, [ref_loc]) - new_blocks.append(block) - - return new_blocks - - def _maybe_downcast(self, blocks, downcast=None): - - # no need to downcast our float - # unless indicated - if downcast is None and self.is_float: - return blocks - elif downcast is None and (self.is_timedelta or self.is_datetime): - return blocks - - if not isinstance(blocks, list): - blocks = [blocks] - return _extend_blocks([b.downcast(downcast) for b in blocks]) - - def downcast(self, dtypes=None, mgr=None): - """ try to downcast each item to the dict of dtypes if present """ - - # turn it off completely - if dtypes is False: - return self - - values = self.values - - # single block handling - if self._is_single_block: - - # try to cast all non-floats here - if dtypes is None: - dtypes = 'infer' - - nv = maybe_downcast_to_dtype(values, dtypes) - return self.make_block(nv) - - # ndim > 1 - if dtypes is None: - return self - - if not (dtypes == 'infer' or isinstance(dtypes, dict)): - raise ValueError("downcast must have a dictionary or 'infer' as " - "its argument") - - # operate column-by-column - # this is expensive as it splits the blocks items-by-item - def f(m, v, i): - - if dtypes == 'infer': - dtype = 'infer' - else: - raise AssertionError("dtypes as dict is not supported yet") - - if dtype is not None: - v = maybe_downcast_to_dtype(v, dtype) - return v - - return self.split_and_operate(None, f, False) - - def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs): - return self._astype(dtype, copy=copy, errors=errors, values=values, - **kwargs) - - def _astype(self, dtype, copy=False, errors='raise', values=None, - klass=None, mgr=None, **kwargs): - """Coerce to the new type - - Parameters - ---------- - dtype : str, dtype convertible - copy : boolean, default False - copy if indicated - errors : str, {'raise', 'ignore'}, default 'ignore' - - ``raise`` : allow exceptions to be raised - - ``ignore`` : suppress exceptions. On error return original object - - Returns - ------- - Block - """ - errors_legal_values = ('raise', 'ignore') - - if errors not in errors_legal_values: - invalid_arg = ("Expected value of kwarg 'errors' to be one of {}. " - "Supplied value is '{}'".format( - list(errors_legal_values), errors)) - raise ValueError(invalid_arg) - - if (inspect.isclass(dtype) and - issubclass(dtype, (PandasExtensionDtype, ExtensionDtype))): - msg = ("Expected an instance of {}, but got the class instead. " - "Try instantiating 'dtype'.".format(dtype.__name__)) - raise TypeError(msg) - - # may need to convert to categorical - if self.is_categorical_astype(dtype): - - # deprecated 17636 - if ('categories' in kwargs or 'ordered' in kwargs): - if isinstance(dtype, CategoricalDtype): - raise TypeError( - "Cannot specify a CategoricalDtype and also " - "`categories` or `ordered`. Use " - "`dtype=CategoricalDtype(categories, ordered)`" - " instead.") - warnings.warn("specifying 'categories' or 'ordered' in " - ".astype() is deprecated; pass a " - "CategoricalDtype instead", - FutureWarning, stacklevel=7) - - categories = kwargs.get('categories', None) - ordered = kwargs.get('ordered', None) - if com._any_not_none(categories, ordered): - dtype = CategoricalDtype(categories, ordered) - - if is_categorical_dtype(self.values): - # GH 10696/18593: update an existing categorical efficiently - return self.make_block(self.values.astype(dtype, copy=copy)) - - return self.make_block(Categorical(self.values, dtype=dtype)) - - # convert dtypes if needed - dtype = pandas_dtype(dtype) - - # astype processing - if is_dtype_equal(self.dtype, dtype): - if copy: - return self.copy() - return self - - if klass is None: - if dtype == np.object_: - klass = ObjectBlock - try: - # force the copy here - if values is None: - - if issubclass(dtype.type, - (compat.text_type, compat.string_types)): - - # use native type formatting for datetime/tz/timedelta - if self.is_datelike: - values = self.to_native_types() - - # astype formatting - else: - values = self.get_values() - - else: - values = self.get_values(dtype=dtype) - - # _astype_nansafe works fine with 1-d only - values = astype_nansafe(values.ravel(), dtype, copy=True) - - # TODO(extension) - # should we make this attribute? - try: - values = values.reshape(self.shape) - except AttributeError: - pass - - newb = make_block(values, placement=self.mgr_locs, - klass=klass) - except: - if errors == 'raise': - raise - newb = self.copy() if copy else self - - if newb.is_numeric and self.is_numeric: - if newb.shape != self.shape: - raise TypeError( - "cannot set astype for copy = [{copy}] for dtype " - "({dtype} [{itemsize}]) with smaller itemsize than " - "current ({newb_dtype} [{newb_size}])".format( - copy=copy, dtype=self.dtype.name, - itemsize=self.itemsize, newb_dtype=newb.dtype.name, - newb_size=newb.itemsize)) - return newb - - def convert(self, copy=True, **kwargs): - """ attempt to coerce any object types to better types return a copy - of the block (if copy = True) by definition we are not an ObjectBlock - here! - """ - - return self.copy() if copy else self - - def _can_hold_element(self, element): - """ require the same dtype as ourselves """ - dtype = self.values.dtype.type - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return issubclass(tipo.type, dtype) - return isinstance(element, dtype) - - def _try_cast_result(self, result, dtype=None): - """ try to cast the result to our original type, we may have - roundtripped thru object in the mean-time - """ - if dtype is None: - dtype = self.dtype - - if self.is_integer or self.is_bool or self.is_datetime: - pass - elif self.is_float and result.dtype == self.dtype: - - # protect against a bool/object showing up here - if isinstance(dtype, compat.string_types) and dtype == 'infer': - return result - if not isinstance(dtype, type): - dtype = dtype.type - if issubclass(dtype, (np.bool_, np.object_)): - if issubclass(dtype, np.bool_): - if isna(result).all(): - return result.astype(np.bool_) - else: - result = result.astype(np.object_) - result[result == 1] = True - result[result == 0] = False - return result - else: - return result.astype(np.object_) - - return result - - # may need to change the dtype here - return maybe_downcast_to_dtype(result, dtype) - - def _try_coerce_args(self, values, other): - """ provide coercion to our input arguments """ - - if np.any(notna(other)) and not self._can_hold_element(other): - # coercion issues - # let higher levels handle - raise TypeError("cannot convert {} to an {}".format( - type(other).__name__, - type(self).__name__.lower().replace('Block', ''))) - - return values, False, other, False - - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - return result - - def _try_coerce_and_cast_result(self, result, dtype=None): - result = self._try_coerce_result(result) - result = self._try_cast_result(result, dtype=dtype) - return result - - def to_native_types(self, slicer=None, na_rep='nan', quoting=None, - **kwargs): - """ convert to our native types format, slicing if desired """ - - values = self.get_values() - - if slicer is not None: - values = values[:, slicer] - mask = isna(values) - - if not self.is_object and not quoting: - values = values.astype(str) - else: - values = np.array(values, dtype='object') - - values[mask] = na_rep - return values - - # block actions #### - def copy(self, deep=True, mgr=None): - """ copy constructor """ - values = self.values - if deep: - values = values.copy() - return self.make_block_same_class(values) - - def replace(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True, mgr=None): - """ replace the to_replace value with value, possible to create new - blocks here this is just a call to putmask. regex is not used here. - It is used in ObjectBlocks. It is here for API - compatibility. - """ - - inplace = validate_bool_kwarg(inplace, 'inplace') - original_to_replace = to_replace - - # try to replace, if we raise an error, convert to ObjectBlock and - # retry - try: - values, _, to_replace, _ = self._try_coerce_args(self.values, - to_replace) - mask = missing.mask_missing(values, to_replace) - if filter is not None: - filtered_out = ~self.mgr_locs.isin(filter) - mask[filtered_out.nonzero()[0]] = False - - blocks = self.putmask(mask, value, inplace=inplace) - if convert: - blocks = [b.convert(by_item=True, numeric=False, - copy=not inplace) for b in blocks] - return blocks - except (TypeError, ValueError): - - # try again with a compatible block - block = self.astype(object) - return block.replace( - to_replace=original_to_replace, value=value, inplace=inplace, - filter=filter, regex=regex, convert=convert) - - def _replace_single(self, *args, **kwargs): - """ no-op on a non-ObjectBlock """ - return self if kwargs['inplace'] else self.copy() - - def setitem(self, indexer, value, mgr=None): - """Set the value inplace, returning a a maybe different typed block. - - Parameters - ---------- - indexer : tuple, list-like, array-like, slice - The subset of self.values to set - value : object - The value being set - mgr : BlockPlacement, optional - - Returns - ------- - Block - - Notes - ----- - `indexer` is a direct slice/positional indexer. `value` must - be a compatible shape. - """ - # coerce None values, if appropriate - if value is None: - if self.is_numeric: - value = np.nan - - # coerce if block dtype can store value - values = self.values - try: - values, _, value, _ = self._try_coerce_args(values, value) - # can keep its own dtype - if hasattr(value, 'dtype') and is_dtype_equal(values.dtype, - value.dtype): - dtype = self.dtype - else: - dtype = 'infer' - - except (TypeError, ValueError): - # current dtype cannot store value, coerce to common dtype - find_dtype = False - - if hasattr(value, 'dtype'): - dtype = value.dtype - find_dtype = True - - elif is_scalar(value): - if isna(value): - # NaN promotion is handled in latter path - dtype = False - else: - dtype, _ = infer_dtype_from_scalar(value, - pandas_dtype=True) - find_dtype = True - else: - dtype = 'infer' - - if find_dtype: - dtype = find_common_type([values.dtype, dtype]) - if not is_dtype_equal(self.dtype, dtype): - b = self.astype(dtype) - return b.setitem(indexer, value, mgr=mgr) - - # value must be storeable at this moment - arr_value = np.array(value) - - # cast the values to a type that can hold nan (if necessary) - if not self._can_hold_element(value): - dtype, _ = maybe_promote(arr_value.dtype) - values = values.astype(dtype) - - transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x) - values = transf(values) - - # length checking - check_setitem_lengths(indexer, value, values) - - def _is_scalar_indexer(indexer): - # return True if we are all scalar indexers - - if arr_value.ndim == 1: - if not isinstance(indexer, tuple): - indexer = tuple([indexer]) - return any(isinstance(idx, np.ndarray) and len(idx) == 0 - for idx in indexer) - return False - - def _is_empty_indexer(indexer): - # return a boolean if we have an empty indexer - - if is_list_like(indexer) and not len(indexer): - return True - if arr_value.ndim == 1: - if not isinstance(indexer, tuple): - indexer = tuple([indexer]) - return any(isinstance(idx, np.ndarray) and len(idx) == 0 - for idx in indexer) - return False - - # empty indexers - # 8669 (empty) - if _is_empty_indexer(indexer): - pass - - # setting a single element for each dim and with a rhs that could - # be say a list - # GH 6043 - elif _is_scalar_indexer(indexer): - values[indexer] = value - - # if we are an exact match (ex-broadcasting), - # then use the resultant dtype - elif (len(arr_value.shape) and - arr_value.shape[0] == values.shape[0] and - np.prod(arr_value.shape) == np.prod(values.shape)): - values[indexer] = value - try: - values = values.astype(arr_value.dtype) - except ValueError: - pass - - # set - else: - values[indexer] = value - - # coerce and try to infer the dtypes of the result - values = self._try_coerce_and_cast_result(values, dtype) - block = self.make_block(transf(values)) - return block - - def putmask(self, mask, new, align=True, inplace=False, axis=0, - transpose=False, mgr=None): - """ putmask the data to the block; it is possible that we may create a - new dtype of block - - return the resulting block(s) - - Parameters - ---------- - mask : the condition to respect - new : a ndarray/object - align : boolean, perform alignment on other/cond, default is True - inplace : perform inplace modification, default is False - axis : int - transpose : boolean - Set to True if self is stored with axes reversed - - Returns - ------- - a list of new blocks, the result of the putmask - """ - - new_values = self.values if inplace else self.values.copy() - - new = getattr(new, 'values', new) - mask = getattr(mask, 'values', mask) - - # if we are passed a scalar None, convert it here - if not is_list_like(new) and isna(new) and not self.is_object: - new = self.fill_value - - if self._can_hold_element(new): - _, _, new, _ = self._try_coerce_args(new_values, new) - - if transpose: - new_values = new_values.T - - # If the default repeat behavior in np.putmask would go in the - # wrong direction, then explicitly repeat and reshape new instead - if getattr(new, 'ndim', 0) >= 1: - if self.ndim - 1 == new.ndim and axis == 1: - new = np.repeat( - new, new_values.shape[-1]).reshape(self.shape) - new = new.astype(new_values.dtype) - - # we require exact matches between the len of the - # values we are setting (or is compat). np.putmask - # doesn't check this and will simply truncate / pad - # the output, but we want sane error messages - # - # TODO: this prob needs some better checking - # for 2D cases - if ((is_list_like(new) and - np.any(mask[mask]) and - getattr(new, 'ndim', 1) == 1)): - - if not (mask.shape[-1] == len(new) or - mask[mask].shape[-1] == len(new) or - len(new) == 1): - raise ValueError("cannot assign mismatch " - "length to masked array") - - np.putmask(new_values, mask, new) - - # maybe upcast me - elif mask.any(): - if transpose: - mask = mask.T - if isinstance(new, np.ndarray): - new = new.T - axis = new_values.ndim - axis - 1 - - # Pseudo-broadcast - if getattr(new, 'ndim', 0) >= 1: - if self.ndim - 1 == new.ndim: - new_shape = list(new.shape) - new_shape.insert(axis, 1) - new = new.reshape(tuple(new_shape)) - - # operate column-by-column - def f(m, v, i): - - if i is None: - # ndim==1 case. - n = new - else: - - if isinstance(new, np.ndarray): - n = np.squeeze(new[i % new.shape[0]]) - else: - n = np.array(new) - - # type of the new block - dtype, _ = maybe_promote(n.dtype) - - # we need to explicitly astype here to make a copy - n = n.astype(dtype) - - nv = _putmask_smart(v, m, n) - return nv - - new_blocks = self.split_and_operate(mask, f, inplace) - return new_blocks - - if inplace: - return [self] - - if transpose: - new_values = new_values.T - - return [self.make_block(new_values)] - - def coerce_to_target_dtype(self, other): - """ - coerce the current block to a dtype compat for other - we will return a block, possibly object, and not raise - - we can also safely try to coerce to the same dtype - and will receive the same block - """ - - # if we cannot then coerce to object - dtype, _ = infer_dtype_from(other, pandas_dtype=True) - - if is_dtype_equal(self.dtype, dtype): - return self - - if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype): - # we don't upcast to bool - return self.astype(object) - - elif ((self.is_float or self.is_complex) and - (is_integer_dtype(dtype) or is_float_dtype(dtype))): - # don't coerce float/complex to int - return self - - elif (self.is_datetime or - is_datetime64_dtype(dtype) or - is_datetime64tz_dtype(dtype)): - - # not a datetime - if not ((is_datetime64_dtype(dtype) or - is_datetime64tz_dtype(dtype)) and self.is_datetime): - return self.astype(object) - - # don't upcast timezone with different timezone or no timezone - mytz = getattr(self.dtype, 'tz', None) - othertz = getattr(dtype, 'tz', None) - - if str(mytz) != str(othertz): - return self.astype(object) - - raise AssertionError("possible recursion in " - "coerce_to_target_dtype: {} {}".format( - self, other)) - - elif (self.is_timedelta or is_timedelta64_dtype(dtype)): - - # not a timedelta - if not (is_timedelta64_dtype(dtype) and self.is_timedelta): - return self.astype(object) - - raise AssertionError("possible recursion in " - "coerce_to_target_dtype: {} {}".format( - self, other)) - - try: - return self.astype(dtype) - except (ValueError, TypeError): - pass - - return self.astype(object) - - def interpolate(self, method='pad', axis=0, index=None, values=None, - inplace=False, limit=None, limit_direction='forward', - limit_area=None, fill_value=None, coerce=False, - downcast=None, mgr=None, **kwargs): - - inplace = validate_bool_kwarg(inplace, 'inplace') - - def check_int_bool(self, inplace): - # Only FloatBlocks will contain NaNs. - # timedelta subclasses IntBlock - if (self.is_bool or self.is_integer) and not self.is_timedelta: - if inplace: - return self - else: - return self.copy() - - # a fill na type method - try: - m = missing.clean_fill_method(method) - except: - m = None - - if m is not None: - r = check_int_bool(self, inplace) - if r is not None: - return r - return self._interpolate_with_fill(method=m, axis=axis, - inplace=inplace, limit=limit, - fill_value=fill_value, - coerce=coerce, - downcast=downcast, mgr=mgr) - # try an interp method - try: - m = missing.clean_interp_method(method, **kwargs) - except: - m = None - - if m is not None: - r = check_int_bool(self, inplace) - if r is not None: - return r - return self._interpolate(method=m, index=index, values=values, - axis=axis, limit=limit, - limit_direction=limit_direction, - limit_area=limit_area, - fill_value=fill_value, inplace=inplace, - downcast=downcast, mgr=mgr, **kwargs) - - raise ValueError("invalid method '{0}' to interpolate.".format(method)) - - def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, - limit=None, fill_value=None, coerce=False, - downcast=None, mgr=None): - """ fillna but using the interpolate machinery """ - - inplace = validate_bool_kwarg(inplace, 'inplace') - - # if we are coercing, then don't force the conversion - # if the block can't hold the type - if coerce: - if not self._can_hold_na: - if inplace: - return [self] - else: - return [self.copy()] - - values = self.values if inplace else self.values.copy() - values, _, fill_value, _ = self._try_coerce_args(values, fill_value) - values = missing.interpolate_2d(values, method=method, axis=axis, - limit=limit, fill_value=fill_value, - dtype=self.dtype) - values = self._try_coerce_result(values) - - blocks = [self.make_block_same_class(values, ndim=self.ndim)] - return self._maybe_downcast(blocks, downcast) - - def _interpolate(self, method=None, index=None, values=None, - fill_value=None, axis=0, limit=None, - limit_direction='forward', limit_area=None, - inplace=False, downcast=None, mgr=None, **kwargs): - """ interpolate using scipy wrappers """ - - inplace = validate_bool_kwarg(inplace, 'inplace') - data = self.values if inplace else self.values.copy() - - # only deal with floats - if not self.is_float: - if not self.is_integer: - return self - data = data.astype(np.float64) - - if fill_value is None: - fill_value = self.fill_value - - if method in ('krogh', 'piecewise_polynomial', 'pchip'): - if not index.is_monotonic: - raise ValueError("{0} interpolation requires that the " - "index be monotonic.".format(method)) - # process 1-d slices in the axis direction - - def func(x): - - # process a 1-d slice, returning it - # should the axis argument be handled below in apply_along_axis? - # i.e. not an arg to missing.interpolate_1d - return missing.interpolate_1d(index, x, method=method, limit=limit, - limit_direction=limit_direction, - limit_area=limit_area, - fill_value=fill_value, - bounds_error=False, **kwargs) - - # interp each column independently - interp_values = np.apply_along_axis(func, axis, data) - - blocks = [self.make_block_same_class(interp_values)] - return self._maybe_downcast(blocks, downcast) - - def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): - """ - Take values according to indexer and return them as a block.bb - - """ - - # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock - # so need to preserve types - # sparse is treated like an ndarray, but needs .get_values() shaping - - values = self.values - if self.is_sparse: - values = self.get_values() - - if fill_tuple is None: - fill_value = self.fill_value - new_values = algos.take_nd(values, indexer, axis=axis, - allow_fill=False) - else: - fill_value = fill_tuple[0] - new_values = algos.take_nd(values, indexer, axis=axis, - allow_fill=True, fill_value=fill_value) - - if new_mgr_locs is None: - if axis == 0: - slc = libinternals.indexer_as_slice(indexer) - if slc is not None: - new_mgr_locs = self.mgr_locs[slc] - else: - new_mgr_locs = self.mgr_locs[indexer] - else: - new_mgr_locs = self.mgr_locs - - if not is_dtype_equal(new_values.dtype, self.dtype): - return self.make_block(new_values, new_mgr_locs) - else: - return self.make_block_same_class(new_values, new_mgr_locs) - - def diff(self, n, axis=1, mgr=None): - """ return block for the diff of the values """ - new_values = algos.diff(self.values, n, axis=axis) - return [self.make_block(values=new_values)] - - def shift(self, periods, axis=0, mgr=None): - """ shift the block by periods, possibly upcast """ - - # convert integer to float if necessary. need to do a lot more than - # that, handle boolean etc also - new_values, fill_value = maybe_upcast(self.values) - - # make sure array sent to np.roll is c_contiguous - f_ordered = new_values.flags.f_contiguous - if f_ordered: - new_values = new_values.T - axis = new_values.ndim - axis - 1 - - if np.prod(new_values.shape): - new_values = np.roll(new_values, ensure_platform_int(periods), - axis=axis) - - axis_indexer = [slice(None)] * self.ndim - if periods > 0: - axis_indexer[axis] = slice(None, periods) - else: - axis_indexer[axis] = slice(periods, None) - new_values[tuple(axis_indexer)] = fill_value - - # restore original order - if f_ordered: - new_values = new_values.T - - return [self.make_block(new_values)] - - def eval(self, func, other, errors='raise', try_cast=False, mgr=None): - """ - evaluate the block; return result block from the result - - Parameters - ---------- - func : how to combine self, other - other : a ndarray/object - errors : str, {'raise', 'ignore'}, default 'raise' - - ``raise`` : allow exceptions to be raised - - ``ignore`` : suppress exceptions. On error return original object - - try_cast : try casting the results to the input type - - Returns - ------- - a new block, the result of the func - """ - orig_other = other - values = self.values - - other = getattr(other, 'values', other) - - # make sure that we can broadcast - is_transposed = False - if hasattr(other, 'ndim') and hasattr(values, 'ndim'): - if values.ndim != other.ndim: - is_transposed = True - else: - if values.shape == other.shape[::-1]: - is_transposed = True - elif values.shape[0] == other.shape[-1]: - is_transposed = True - else: - # this is a broadcast error heree - raise ValueError( - "cannot broadcast shape [{t_shape}] with " - "block values [{oth_shape}]".format( - t_shape=values.T.shape, oth_shape=other.shape)) - - transf = (lambda x: x.T) if is_transposed else (lambda x: x) - - # coerce/transpose the args if needed - try: - values, values_mask, other, other_mask = self._try_coerce_args( - transf(values), other) - except TypeError: - block = self.coerce_to_target_dtype(orig_other) - return block.eval(func, orig_other, - errors=errors, - try_cast=try_cast, mgr=mgr) - - # get the result, may need to transpose the other - def get_result(other): - - # avoid numpy warning of comparisons again None - if other is None: - result = not func.__name__ == 'eq' - - # avoid numpy warning of elementwise comparisons to object - elif is_numeric_v_string_like(values, other): - result = False - - # avoid numpy warning of elementwise comparisons - elif func.__name__ == 'eq': - if is_list_like(other) and not isinstance(other, np.ndarray): - other = np.asarray(other) - - # if we can broadcast, then ok - if values.shape[-1] != other.shape[-1]: - return False - result = func(values, other) - else: - result = func(values, other) - - # mask if needed - if isinstance(values_mask, np.ndarray) and values_mask.any(): - result = result.astype('float64', copy=False) - result[values_mask] = np.nan - if other_mask is True: - result = result.astype('float64', copy=False) - result[:] = np.nan - elif isinstance(other_mask, np.ndarray) and other_mask.any(): - result = result.astype('float64', copy=False) - result[other_mask.ravel()] = np.nan - - return result - - # error handler if we have an issue operating with the function - def handle_error(): - - if errors == 'raise': - # The 'detail' variable is defined in outer scope. - raise TypeError( - 'Could not operate {other!r} with block values ' - '{detail!s}'.format(other=other, detail=detail)) # noqa - else: - # return the values - result = np.empty(values.shape, dtype='O') - result.fill(np.nan) - return result - - # get the result - try: - with np.errstate(all='ignore'): - result = get_result(other) - - # if we have an invalid shape/broadcast error - # GH4576, so raise instead of allowing to pass through - except ValueError as detail: - raise - except Exception as detail: - result = handle_error() - - # technically a broadcast error in numpy can 'work' by returning a - # boolean False - if not isinstance(result, np.ndarray): - if not isinstance(result, np.ndarray): - - # differentiate between an invalid ndarray-ndarray comparison - # and an invalid type comparison - if isinstance(values, np.ndarray) and is_list_like(other): - raise ValueError( - 'Invalid broadcasting comparison [{other!r}] with ' - 'block values'.format(other=other)) - - raise TypeError('Could not compare [{other!r}] ' - 'with block values'.format(other=other)) - - # transpose if needed - result = transf(result) - - # try to cast if requested - if try_cast: - result = self._try_cast_result(result) - - result = _block_shape(result, ndim=self.ndim) - return [self.make_block(result)] - - def where(self, other, cond, align=True, errors='raise', - try_cast=False, axis=0, transpose=False, mgr=None): - """ - evaluate the block; return result block(s) from the result - - Parameters - ---------- - other : a ndarray/object - cond : the condition to respect - align : boolean, perform alignment on other/cond - errors : str, {'raise', 'ignore'}, default 'raise' - - ``raise`` : allow exceptions to be raised - - ``ignore`` : suppress exceptions. On error return original object - - axis : int - transpose : boolean - Set to True if self is stored with axes reversed - - Returns - ------- - a new block(s), the result of the func - """ - import pandas.core.computation.expressions as expressions - assert errors in ['raise', 'ignore'] - - values = self.values - orig_other = other - if transpose: - values = values.T - - other = getattr(other, '_values', getattr(other, 'values', other)) - cond = getattr(cond, 'values', cond) - - # If the default broadcasting would go in the wrong direction, then - # explicitly reshape other instead - if getattr(other, 'ndim', 0) >= 1: - if values.ndim - 1 == other.ndim and axis == 1: - other = other.reshape(tuple(other.shape + (1, ))) - elif transpose and values.ndim == self.ndim - 1: - cond = cond.T - - if not hasattr(cond, 'shape'): - raise ValueError("where must have a condition that is ndarray " - "like") - - # our where function - def func(cond, values, other): - if cond.ravel().all(): - return values - - values, values_mask, other, other_mask = self._try_coerce_args( - values, other) - - try: - return self._try_coerce_result(expressions.where( - cond, values, other)) - except Exception as detail: - if errors == 'raise': - raise TypeError( - 'Could not operate [{other!r}] with block values ' - '[{detail!s}]'.format(other=other, detail=detail)) - else: - # return the values - result = np.empty(values.shape, dtype='float64') - result.fill(np.nan) - return result - - # see if we can operate on the entire block, or need item-by-item - # or if we are a single block (ndim == 1) - try: - result = func(cond, values, other) - except TypeError: - - # we cannot coerce, return a compat dtype - # we are explicitly ignoring errors - block = self.coerce_to_target_dtype(other) - blocks = block.where(orig_other, cond, align=align, - errors=errors, - try_cast=try_cast, axis=axis, - transpose=transpose) - return self._maybe_downcast(blocks, 'infer') - - if self._can_hold_na or self.ndim == 1: - - if transpose: - result = result.T - - # try to cast if requested - if try_cast: - result = self._try_cast_result(result) - - return self.make_block(result) - - # might need to separate out blocks - axis = cond.ndim - 1 - cond = cond.swapaxes(axis, 0) - mask = np.array([cond[i].all() for i in range(cond.shape[0])], - dtype=bool) - - result_blocks = [] - for m in [mask, ~mask]: - if m.any(): - r = self._try_cast_result(result.take(m.nonzero()[0], - axis=axis)) - result_blocks.append( - self.make_block(r.T, placement=self.mgr_locs[m])) - - return result_blocks - - def equals(self, other): - if self.dtype != other.dtype or self.shape != other.shape: - return False - return array_equivalent(self.values, other.values) - - def _unstack(self, unstacker_func, new_columns): - """Return a list of unstacked blocks of self - - Parameters - ---------- - unstacker_func : callable - Partially applied unstacker. - new_columns : Index - All columns of the unstacked BlockManager. - - Returns - ------- - blocks : list of Block - New blocks of unstacked values. - mask : array_like of bool - The mask of columns of `blocks` we should keep. - """ - unstacker = unstacker_func(self.values.T) - new_items = unstacker.get_new_columns() - new_placement = new_columns.get_indexer(new_items) - new_values, mask = unstacker.get_new_values() - - mask = mask.any(0) - new_values = new_values.T[mask] - new_placement = new_placement[mask] - - blocks = [make_block(new_values, placement=new_placement)] - return blocks, mask - - def quantile(self, qs, interpolation='linear', axis=0, mgr=None): - """ - compute the quantiles of the - - Parameters - ---------- - qs: a scalar or list of the quantiles to be computed - interpolation: type of interpolation, default 'linear' - axis: axis to compute, default 0 - - Returns - ------- - tuple of (axis, block) - - """ - kw = {'interpolation': interpolation} - values = self.get_values() - values, _, _, _ = self._try_coerce_args(values, values) - - def _nanpercentile1D(values, mask, q, **kw): - values = values[~mask] - - if len(values) == 0: - if is_scalar(q): - return self._na_value - else: - return np.array([self._na_value] * len(q), - dtype=values.dtype) - - return np.percentile(values, q, **kw) - - def _nanpercentile(values, q, axis, **kw): - - mask = isna(self.values) - if not is_scalar(mask) and mask.any(): - if self.ndim == 1: - return _nanpercentile1D(values, mask, q, **kw) - else: - # for nonconsolidatable blocks mask is 1D, but values 2D - if mask.ndim < values.ndim: - mask = mask.reshape(values.shape) - if axis == 0: - values = values.T - mask = mask.T - result = [_nanpercentile1D(val, m, q, **kw) for (val, m) - in zip(list(values), list(mask))] - result = np.array(result, dtype=values.dtype, copy=False).T - return result - else: - return np.percentile(values, q, axis=axis, **kw) - - from pandas import Float64Index - is_empty = values.shape[axis] == 0 - if is_list_like(qs): - ax = Float64Index(qs) - - if is_empty: - if self.ndim == 1: - result = self._na_value - else: - # create the array of na_values - # 2d len(values) * len(qs) - result = np.repeat(np.array([self._na_value] * len(qs)), - len(values)).reshape(len(values), - len(qs)) - else: - - try: - result = _nanpercentile(values, np.array(qs) * 100, - axis=axis, **kw) - except ValueError: - - # older numpies don't handle an array for q - result = [_nanpercentile(values, q * 100, - axis=axis, **kw) for q in qs] - - result = np.array(result, copy=False) - if self.ndim > 1: - result = result.T - - else: - - if self.ndim == 1: - ax = Float64Index([qs]) - else: - ax = mgr.axes[0] - - if is_empty: - if self.ndim == 1: - result = self._na_value - else: - result = np.array([self._na_value] * len(self)) - else: - result = _nanpercentile(values, qs * 100, axis=axis, **kw) - - ndim = getattr(result, 'ndim', None) or 0 - result = self._try_coerce_result(result) - if is_scalar(result): - return ax, self.make_block_scalar(result) - return ax, make_block(result, - placement=np.arange(len(result)), - ndim=ndim) - - -class ScalarBlock(Block): - """ - a scalar compat Block - """ - __slots__ = ['_mgr_locs', 'values', 'ndim'] - - def __init__(self, values): - self.ndim = 0 - self.mgr_locs = [0] - self.values = values - - @property - def dtype(self): - return type(self.values) - - @property - def shape(self): - return tuple([0]) - - def __len__(self): - return 0 - - -class NonConsolidatableMixIn(object): - """ hold methods for the nonconsolidatable blocks """ - _can_consolidate = False - _verify_integrity = False - _validate_ndim = False - - def __init__(self, values, placement, ndim=None): - """Initialize a non-consolidatable block. - - 'ndim' may be inferred from 'placement'. - - This will call continue to call __init__ for the other base - classes mixed in with this Mixin. - """ - # Placement must be converted to BlockPlacement so that we can check - # its length - if not isinstance(placement, BlockPlacement): - placement = BlockPlacement(placement) - - # Maybe infer ndim from placement - if ndim is None: - if len(placement) != 1: - ndim = 1 - else: - ndim = 2 - super(NonConsolidatableMixIn, self).__init__(values, placement, - ndim=ndim) - - @property - def shape(self): - if self.ndim == 1: - return (len(self.values)), - return (len(self.mgr_locs), len(self.values)) - - def get_values(self, dtype=None): - """ need to to_dense myself (and always return a ndim sized object) """ - values = self.values.to_dense() - if values.ndim == self.ndim - 1: - values = values.reshape((1,) + values.shape) - return values - - def iget(self, col): - - if self.ndim == 2 and isinstance(col, tuple): - col, loc = col - if not com.is_null_slice(col) and col != 0: - raise IndexError("{0} only contains one item".format(self)) - return self.values[loc] - else: - if col != 0: - raise IndexError("{0} only contains one item".format(self)) - return self.values - - def should_store(self, value): - return isinstance(value, self._holder) - - def set(self, locs, values, check=False): - assert locs.tolist() == [0] - self.values = values - - def putmask(self, mask, new, align=True, inplace=False, axis=0, - transpose=False, mgr=None): - """ - putmask the data to the block; we must be a single block and not - generate other blocks - - return the resulting block - - Parameters - ---------- - mask : the condition to respect - new : a ndarray/object - align : boolean, perform alignment on other/cond, default is True - inplace : perform inplace modification, default is False - - Returns - ------- - a new block, the result of the putmask - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - - # use block's copy logic. - # .values may be an Index which does shallow copy by default - new_values = self.values if inplace else self.copy().values - new_values, _, new, _ = self._try_coerce_args(new_values, new) - - if isinstance(new, np.ndarray) and len(new) == len(mask): - new = new[mask] - - mask = _safe_reshape(mask, new_values.shape) - - new_values[mask] = new - new_values = self._try_coerce_result(new_values) - return [self.make_block(values=new_values)] - - def _slice(self, slicer): - """ return a slice of my values (but densify first) """ - return self.get_values()[slicer] - - def _try_cast_result(self, result, dtype=None): - return result - - def _unstack(self, unstacker_func, new_columns): - """Return a list of unstacked blocks of self - - Parameters - ---------- - unstacker_func : callable - Partially applied unstacker. - new_columns : Index - All columns of the unstacked BlockManager. - - Returns - ------- - blocks : list of Block - New blocks of unstacked values. - mask : array_like of bool - The mask of columns of `blocks` we should keep. - """ - # NonConsolidatable blocks can have a single item only, so we return - # one block per item - unstacker = unstacker_func(self.values.T) - new_items = unstacker.get_new_columns() - new_placement = new_columns.get_indexer(new_items) - new_values, mask = unstacker.get_new_values() - - mask = mask.any(0) - new_values = new_values.T[mask] - new_placement = new_placement[mask] - - blocks = [self.make_block_same_class(vals, [place]) - for vals, place in zip(new_values, new_placement)] - return blocks, mask - - -class ExtensionBlock(NonConsolidatableMixIn, Block): - """Block for holding extension types. - - Notes - ----- - This holds all 3rd-party extension array types. It's also the immediate - parent class for our internal extension types' blocks, CategoricalBlock. - - ExtensionArrays are limited to 1-D. - """ - is_extension = True - - def __init__(self, values, placement, ndim=None): - values = self._maybe_coerce_values(values) - super(ExtensionBlock, self).__init__(values, placement, ndim) - - def _maybe_coerce_values(self, values): - """Unbox to an extension array. - - This will unbox an ExtensionArray stored in an Index or Series. - ExtensionArrays pass through. No dtype coercion is done. - - Parameters - ---------- - values : Index, Series, ExtensionArray - - Returns - ------- - ExtensionArray - """ - if isinstance(values, (ABCIndexClass, ABCSeries)): - values = values._values - return values - - @property - def _holder(self): - # For extension blocks, the holder is values-dependent. - return type(self.values) - - @property - def fill_value(self): - # Used in reindex_indexer - return self.values.dtype.na_value - - @property - def _can_hold_na(self): - # The default ExtensionArray._can_hold_na is True - return self._holder._can_hold_na - - @property - def is_view(self): - """Extension arrays are never treated as views.""" - return False - - def setitem(self, indexer, value, mgr=None): - """Set the value inplace, returning a same-typed block. - - This differs from Block.setitem by not allowing setitem to change - the dtype of the Block. - - Parameters - ---------- - indexer : tuple, list-like, array-like, slice - The subset of self.values to set - value : object - The value being set - mgr : BlockPlacement, optional - - Returns - ------- - Block - - Notes - ----- - `indexer` is a direct slice/positional indexer. `value` must - be a compatible shape. - """ - if isinstance(indexer, tuple): - # we are always 1-D - indexer = indexer[0] - - check_setitem_lengths(indexer, value, self.values) - self.values[indexer] = value - return self - - def get_values(self, dtype=None): - # ExtensionArrays must be iterable, so this works. - values = np.asarray(self.values) - if values.ndim == self.ndim - 1: - values = values.reshape((1,) + values.shape) - return values - - def to_dense(self): - return np.asarray(self.values) - - def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): - """ - Take values according to indexer and return them as a block. - """ - if fill_tuple is None: - fill_value = None - else: - fill_value = fill_tuple[0] - - # axis doesn't matter; we are really a single-dim object - # but are passed the axis depending on the calling routing - # if its REALLY axis 0, then this will be a reindex and not a take - new_values = self.values.take(indexer, fill_value=fill_value, - allow_fill=True) - - # if we are a 1-dim object, then always place at 0 - if self.ndim == 1: - new_mgr_locs = [0] - else: - if new_mgr_locs is None: - new_mgr_locs = self.mgr_locs - - return self.make_block_same_class(new_values, new_mgr_locs) - - def _can_hold_element(self, element): - # XXX: We may need to think about pushing this onto the array. - # We're doing the same as CategoricalBlock here. - return True - - def _slice(self, slicer): - """ return a slice of my values """ - - # slice the category - # return same dims as we currently have - - if isinstance(slicer, tuple) and len(slicer) == 2: - if not com.is_null_slice(slicer[0]): - raise AssertionError("invalid slicing for a 1-ndim " - "categorical") - slicer = slicer[1] - - return self.values[slicer] - - def formatting_values(self): - return self.values._formatting_values() - - def concat_same_type(self, to_concat, placement=None): - """ - Concatenate list of single blocks of the same type. - """ - values = self._holder._concat_same_type( - [blk.values for blk in to_concat]) - placement = placement or slice(0, len(values), 1) - return self.make_block_same_class(values, ndim=self.ndim, - placement=placement) - - def fillna(self, value, limit=None, inplace=False, downcast=None, - mgr=None): - values = self.values if inplace else self.values.copy() - values = values.fillna(value=value, limit=limit) - return [self.make_block_same_class(values=values, - placement=self.mgr_locs, - ndim=self.ndim)] - - def interpolate(self, method='pad', axis=0, inplace=False, limit=None, - fill_value=None, **kwargs): - - values = self.values if inplace else self.values.copy() - return self.make_block_same_class( - values=values.fillna(value=fill_value, method=method, - limit=limit), - placement=self.mgr_locs) - - -class NumericBlock(Block): - __slots__ = () - is_numeric = True - _can_hold_na = True - - -class FloatOrComplexBlock(NumericBlock): - __slots__ = () - - def equals(self, other): - if self.dtype != other.dtype or self.shape != other.shape: - return False - left, right = self.values, other.values - return ((left == right) | (np.isnan(left) & np.isnan(right))).all() - - -class FloatBlock(FloatOrComplexBlock): - __slots__ = () - is_float = True - - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return (issubclass(tipo.type, (np.floating, np.integer)) and - not issubclass(tipo.type, (np.datetime64, np.timedelta64))) - return ( - isinstance( - element, (float, int, np.floating, np.int_, compat.long)) - and not isinstance(element, (bool, np.bool_, datetime, timedelta, - np.datetime64, np.timedelta64))) - - def to_native_types(self, slicer=None, na_rep='', float_format=None, - decimal='.', quoting=None, **kwargs): - """ convert to our native types format, slicing if desired """ - - values = self.values - if slicer is not None: - values = values[:, slicer] - - # see gh-13418: no special formatting is desired at the - # output (important for appropriate 'quoting' behaviour), - # so do not pass it through the FloatArrayFormatter - if float_format is None and decimal == '.': - mask = isna(values) - - if not quoting: - values = values.astype(str) - else: - values = np.array(values, dtype='object') - - values[mask] = na_rep - return values - - from pandas.io.formats.format import FloatArrayFormatter - formatter = FloatArrayFormatter(values, na_rep=na_rep, - float_format=float_format, - decimal=decimal, quoting=quoting, - fixed_width=False) - return formatter.get_result_as_array() - - def should_store(self, value): - # when inserting a column should not coerce integers to floats - # unnecessarily - return (issubclass(value.dtype.type, np.floating) and - value.dtype == self.dtype) - - -class ComplexBlock(FloatOrComplexBlock): - __slots__ = () - is_complex = True - - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return issubclass(tipo.type, - (np.floating, np.integer, np.complexfloating)) - return ( - isinstance( - element, - (float, int, complex, np.float_, np.int_, compat.long)) - and not isinstance(element, (bool, np.bool_))) - - def should_store(self, value): - return issubclass(value.dtype.type, np.complexfloating) - - -class IntBlock(NumericBlock): - __slots__ = () - is_integer = True - _can_hold_na = False - - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return (issubclass(tipo.type, np.integer) and - not issubclass(tipo.type, (np.datetime64, - np.timedelta64)) and - self.dtype.itemsize >= tipo.itemsize) - return is_integer(element) - - def should_store(self, value): - return is_integer_dtype(value) and value.dtype == self.dtype - - -class DatetimeLikeBlockMixin(object): - """Mixin class for DatetimeBlock and DatetimeTZBlock.""" - - @property - def _holder(self): - return DatetimeIndex - - @property - def _na_value(self): - return tslibs.NaT - - @property - def fill_value(self): - return tslibs.iNaT - - def get_values(self, dtype=None): - """ - return object dtype as boxed values, such as Timestamps/Timedelta - """ - if is_object_dtype(dtype): - return lib.map_infer(self.values.ravel(), - self._box_func).reshape(self.values.shape) - return self.values - - -class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): - __slots__ = () - is_timedelta = True - _can_hold_na = True - is_numeric = False - - def __init__(self, values, placement, ndim=None): - if values.dtype != _TD_DTYPE: - values = conversion.ensure_timedelta64ns(values) - - super(TimeDeltaBlock, self).__init__(values, - placement=placement, ndim=ndim) - - @property - def _holder(self): - return TimedeltaIndex - - @property - def _box_func(self): - return lambda x: Timedelta(x, unit='ns') - - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return issubclass(tipo.type, np.timedelta64) - return is_integer(element) or isinstance( - element, (timedelta, np.timedelta64)) - - def fillna(self, value, **kwargs): - - # allow filling with integers to be - # interpreted as seconds - if is_integer(value) and not isinstance(value, np.timedelta64): - value = Timedelta(value, unit='s') - return super(TimeDeltaBlock, self).fillna(value, **kwargs) - - def _try_coerce_args(self, values, other): - """ - Coerce values and other to int64, with null values converted to - iNaT. values is always ndarray-like, other may not be - - Parameters - ---------- - values : ndarray-like - other : ndarray-like or scalar - - Returns - ------- - base-type values, values mask, base-type other, other mask - """ - - values_mask = isna(values) - values = values.view('i8') - other_mask = False - - if isinstance(other, bool): - raise TypeError - elif is_null_datelike_scalar(other): - other = tslibs.iNaT - other_mask = True - elif isinstance(other, Timedelta): - other_mask = isna(other) - other = other.value - elif isinstance(other, timedelta): - other = Timedelta(other).value - elif isinstance(other, np.timedelta64): - other_mask = isna(other) - other = Timedelta(other).value - elif hasattr(other, 'dtype') and is_timedelta64_dtype(other): - other_mask = isna(other) - other = other.astype('i8', copy=False).view('i8') - else: - # coercion issues - # let higher levels handle - raise TypeError - - return values, values_mask, other, other_mask - - def _try_coerce_result(self, result): - """ reverse of try_coerce_args / try_operate """ - if isinstance(result, np.ndarray): - mask = isna(result) - if result.dtype.kind in ['i', 'f', 'O']: - result = result.astype('m8[ns]') - result[mask] = tslibs.iNaT - elif isinstance(result, (np.integer, np.float)): - result = self._box_func(result) - return result - - def should_store(self, value): - return issubclass(value.dtype.type, np.timedelta64) - - def to_native_types(self, slicer=None, na_rep=None, quoting=None, - **kwargs): - """ convert to our native types format, slicing if desired """ - - values = self.values - if slicer is not None: - values = values[:, slicer] - mask = isna(values) - - rvalues = np.empty(values.shape, dtype=object) - if na_rep is None: - na_rep = 'NaT' - rvalues[mask] = na_rep - imask = (~mask).ravel() - - # FIXME: - # should use the formats.format.Timedelta64Formatter here - # to figure what format to pass to the Timedelta - # e.g. to not show the decimals say - rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all') - for val in values.ravel()[imask]], - dtype=object) - return rvalues - - -class BoolBlock(NumericBlock): - __slots__ = () - is_bool = True - _can_hold_na = False - - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return issubclass(tipo.type, np.bool_) - return isinstance(element, (bool, np.bool_)) - - def should_store(self, value): - return issubclass(value.dtype.type, np.bool_) - - def replace(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True, mgr=None): - inplace = validate_bool_kwarg(inplace, 'inplace') - to_replace_values = np.atleast_1d(to_replace) - if not np.can_cast(to_replace_values, bool): - return self - return super(BoolBlock, self).replace(to_replace, value, - inplace=inplace, filter=filter, - regex=regex, convert=convert, - mgr=mgr) - - -class ObjectBlock(Block): - __slots__ = () - is_object = True - _can_hold_na = True - - def __init__(self, values, placement=None, ndim=2): - if issubclass(values.dtype.type, compat.string_types): - values = np.array(values, dtype=object) - - super(ObjectBlock, self).__init__(values, ndim=ndim, - placement=placement) - - @property - def is_bool(self): - """ we can be a bool if we have only bool values but are of type - object - """ - return lib.is_bool_array(self.values.ravel()) - - # TODO: Refactor when convert_objects is removed since there will be 1 path - def convert(self, *args, **kwargs): - """ attempt to coerce any object types to better types return a copy of - the block (if copy = True) by definition we ARE an ObjectBlock!!!!! - - can return multiple blocks! - """ - - if args: - raise NotImplementedError - by_item = True if 'by_item' not in kwargs else kwargs['by_item'] - - new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta'] - new_style = False - for kw in new_inputs: - new_style |= kw in kwargs - - if new_style: - fn = soft_convert_objects - fn_inputs = new_inputs - else: - fn = maybe_convert_objects - fn_inputs = ['convert_dates', 'convert_numeric', - 'convert_timedeltas'] - fn_inputs += ['copy'] - - fn_kwargs = {} - for key in fn_inputs: - if key in kwargs: - fn_kwargs[key] = kwargs[key] - - # operate column-by-column - def f(m, v, i): - shape = v.shape - values = fn(v.ravel(), **fn_kwargs) - try: - values = values.reshape(shape) - values = _block_shape(values, ndim=self.ndim) - except (AttributeError, NotImplementedError): - pass - - return values - - if by_item and not self._is_single_block: - blocks = self.split_and_operate(None, f, False) - else: - values = f(None, self.values.ravel(), None) - blocks = [make_block(values, ndim=self.ndim, - placement=self.mgr_locs)] - - return blocks - - def set(self, locs, values, check=False): - """ - Modify Block in-place with new item value - - Returns - ------- - None - """ - - # GH6026 - if check: - try: - if (self.values[locs] == values).all(): - return - except: - pass - try: - self.values[locs] = values - except (ValueError): - - # broadcasting error - # see GH6171 - new_shape = list(values.shape) - new_shape[0] = len(self.items) - self.values = np.empty(tuple(new_shape), dtype=self.dtype) - self.values.fill(np.nan) - self.values[locs] = values - - def _maybe_downcast(self, blocks, downcast=None): - - if downcast is not None: - return blocks - - # split and convert the blocks - return _extend_blocks([b.convert(datetime=True, numeric=False) - for b in blocks]) - - def _can_hold_element(self, element): - return True - - def _try_coerce_args(self, values, other): - """ provide coercion to our input arguments """ - - if isinstance(other, ABCDatetimeIndex): - # to store DatetimeTZBlock as object - other = other.astype(object).values - - return values, False, other, False - - def should_store(self, value): - return not (issubclass(value.dtype.type, - (np.integer, np.floating, np.complexfloating, - np.datetime64, np.bool_)) or - # TODO(ExtensionArray): remove is_extension_type - # when all extension arrays have been ported. - is_extension_type(value) or - is_extension_array_dtype(value)) - - def replace(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True, mgr=None): - to_rep_is_list = is_list_like(to_replace) - value_is_list = is_list_like(value) - both_lists = to_rep_is_list and value_is_list - either_list = to_rep_is_list or value_is_list - - result_blocks = [] - blocks = [self] - - if not either_list and is_re(to_replace): - return self._replace_single(to_replace, value, inplace=inplace, - filter=filter, regex=True, - convert=convert, mgr=mgr) - elif not (either_list or regex): - return super(ObjectBlock, self).replace(to_replace, value, - inplace=inplace, - filter=filter, regex=regex, - convert=convert, mgr=mgr) - elif both_lists: - for to_rep, v in zip(to_replace, value): - result_blocks = [] - for b in blocks: - result = b._replace_single(to_rep, v, inplace=inplace, - filter=filter, regex=regex, - convert=convert, mgr=mgr) - result_blocks = _extend_blocks(result, result_blocks) - blocks = result_blocks - return result_blocks - - elif to_rep_is_list and regex: - for to_rep in to_replace: - result_blocks = [] - for b in blocks: - result = b._replace_single(to_rep, value, inplace=inplace, - filter=filter, regex=regex, - convert=convert, mgr=mgr) - result_blocks = _extend_blocks(result, result_blocks) - blocks = result_blocks - return result_blocks - - return self._replace_single(to_replace, value, inplace=inplace, - filter=filter, convert=convert, - regex=regex, mgr=mgr) - - def _replace_single(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True, mgr=None): - - inplace = validate_bool_kwarg(inplace, 'inplace') - - # to_replace is regex compilable - to_rep_re = regex and is_re_compilable(to_replace) - - # regex is regex compilable - regex_re = is_re_compilable(regex) - - # only one will survive - if to_rep_re and regex_re: - raise AssertionError('only one of to_replace and regex can be ' - 'regex compilable') - - # if regex was passed as something that can be a regex (rather than a - # boolean) - if regex_re: - to_replace = regex - - regex = regex_re or to_rep_re - - # try to get the pattern attribute (compiled re) or it's a string - try: - pattern = to_replace.pattern - except AttributeError: - pattern = to_replace - - # if the pattern is not empty and to_replace is either a string or a - # regex - if regex and pattern: - rx = re.compile(to_replace) - else: - # if the thing to replace is not a string or compiled regex call - # the superclass method -> to_replace is some kind of object - return super(ObjectBlock, self).replace(to_replace, value, - inplace=inplace, - filter=filter, regex=regex, - mgr=mgr) - - new_values = self.values if inplace else self.values.copy() - - # deal with replacing values with objects (strings) that match but - # whose replacement is not a string (numeric, nan, object) - if isna(value) or not isinstance(value, compat.string_types): - - def re_replacer(s): - try: - return value if rx.search(s) is not None else s - except TypeError: - return s - else: - # value is guaranteed to be a string here, s can be either a string - # or null if it's null it gets returned - def re_replacer(s): - try: - return rx.sub(value, s) - except TypeError: - return s - - f = np.vectorize(re_replacer, otypes=[self.dtype]) - - if filter is None: - filt = slice(None) - else: - filt = self.mgr_locs.isin(filter).nonzero()[0] - - new_values[filt] = f(new_values[filt]) - - # convert - block = self.make_block(new_values) - if convert: - block = block.convert(by_item=True, numeric=False) - - return block - - -class CategoricalBlock(ExtensionBlock): - __slots__ = () - is_categorical = True - _verify_integrity = True - _can_hold_na = True - _concatenator = staticmethod(_concat._concat_categorical) - - def __init__(self, values, placement, ndim=None): - from pandas.core.arrays.categorical import _maybe_to_categorical - - # coerce to categorical if we can - super(CategoricalBlock, self).__init__(_maybe_to_categorical(values), - placement=placement, - ndim=ndim) - - @property - def _holder(self): - return Categorical - - @property - def array_dtype(self): - """ the dtype to return if I want to construct this block as an - array - """ - return np.object_ - - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - - # GH12564: CategoricalBlock is 1-dim only - # while returned results could be any dim - if ((not is_categorical_dtype(result)) and - isinstance(result, np.ndarray)): - result = _block_shape(result, ndim=self.ndim) - - return result - - def shift(self, periods, axis=0, mgr=None): - return self.make_block_same_class(values=self.values.shift(periods), - placement=self.mgr_locs) - - def to_dense(self): - # Categorical.get_values returns a DatetimeIndex for datetime - # categories, so we can't simply use `np.asarray(self.values)` like - # other types. - return self.values.get_values() - - def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): - """ convert to our native types format, slicing if desired """ - - values = self.values - if slicer is not None: - # Categorical is always one dimension - values = values[slicer] - mask = isna(values) - values = np.array(values, dtype='object') - values[mask] = na_rep - - # we are expected to return a 2-d ndarray - return values.reshape(1, len(values)) - - def concat_same_type(self, to_concat, placement=None): - """ - Concatenate list of single blocks of the same type. - - Note that this CategoricalBlock._concat_same_type *may* not - return a CategoricalBlock. When the categories in `to_concat` - differ, this will return an object ndarray. - - If / when we decide we don't like that behavior: - - 1. Change Categorical._concat_same_type to use union_categoricals - 2. Delete this method. - """ - values = self._concatenator([blk.values for blk in to_concat], - axis=self.ndim - 1) - # not using self.make_block_same_class as values can be object dtype - return make_block( - values, placement=placement or slice(0, len(values), 1), - ndim=self.ndim) - - -class DatetimeBlock(DatetimeLikeBlockMixin, Block): - __slots__ = () - is_datetime = True - _can_hold_na = True - - def __init__(self, values, placement, ndim=None): - values = self._maybe_coerce_values(values) - super(DatetimeBlock, self).__init__(values, - placement=placement, ndim=ndim) - - def _maybe_coerce_values(self, values): - """Input validation for values passed to __init__. Ensure that - we have datetime64ns, coercing if necessary. - - Parameters - ---------- - values : array-like - Must be convertible to datetime64 - - Returns - ------- - values : ndarray[datetime64ns] - - Overridden by DatetimeTZBlock. - """ - if values.dtype != _NS_DTYPE: - values = conversion.ensure_datetime64ns(values) - return values - - def _astype(self, dtype, mgr=None, **kwargs): - """ - these automatically copy, so copy=True has no effect - raise on an except if raise == True - """ - - # if we are passed a datetime64[ns, tz] - if is_datetime64tz_dtype(dtype): - dtype = DatetimeTZDtype(dtype) - - values = self.values - if getattr(values, 'tz', None) is None: - values = DatetimeIndex(values).tz_localize('UTC') - values = values.tz_convert(dtype.tz) - return self.make_block(values) - - # delegate - return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs) - - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - # TODO: this still uses asarray, instead of dtype.type - element = np.array(element) - return element.dtype == _NS_DTYPE or element.dtype == np.int64 - return (is_integer(element) or isinstance(element, datetime) or - isna(element)) - - def _try_coerce_args(self, values, other): - """ - Coerce values and other to dtype 'i8'. NaN and NaT convert to - the smallest i8, and will correctly round-trip to NaT if converted - back in _try_coerce_result. values is always ndarray-like, other - may not be - - Parameters - ---------- - values : ndarray-like - other : ndarray-like or scalar - - Returns - ------- - base-type values, values mask, base-type other, other mask - """ - - values_mask = isna(values) - values = values.view('i8') - other_mask = False - - if isinstance(other, bool): - raise TypeError - elif is_null_datelike_scalar(other): - other = tslibs.iNaT - other_mask = True - elif isinstance(other, (datetime, np.datetime64, date)): - other = self._box_func(other) - if getattr(other, 'tz') is not None: - raise TypeError("cannot coerce a Timestamp with a tz on a " - "naive Block") - other_mask = isna(other) - other = other.asm8.view('i8') - elif hasattr(other, 'dtype') and is_datetime64_dtype(other): - other_mask = isna(other) - other = other.astype('i8', copy=False).view('i8') - else: - # coercion issues - # let higher levels handle - raise TypeError - - return values, values_mask, other, other_mask - - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - if isinstance(result, np.ndarray): - if result.dtype.kind in ['i', 'f', 'O']: - try: - result = result.astype('M8[ns]') - except ValueError: - pass - elif isinstance(result, (np.integer, np.float, np.datetime64)): - result = self._box_func(result) - return result - - @property - def _box_func(self): - return tslibs.Timestamp - - def to_native_types(self, slicer=None, na_rep=None, date_format=None, - quoting=None, **kwargs): - """ convert to our native types format, slicing if desired """ - - values = self.values - if slicer is not None: - values = values[..., slicer] - - from pandas.io.formats.format import _get_format_datetime64_from_values - format = _get_format_datetime64_from_values(values, date_format) - - result = tslib.format_array_from_datetime( - values.view('i8').ravel(), tz=getattr(self.values, 'tz', None), - format=format, na_rep=na_rep).reshape(values.shape) - return np.atleast_2d(result) - - def should_store(self, value): - return (issubclass(value.dtype.type, np.datetime64) and - not is_datetimetz(value)) - - def set(self, locs, values, check=False): - """ - Modify Block in-place with new item value - - Returns - ------- - None - """ - if values.dtype != _NS_DTYPE: - # Workaround for numpy 1.6 bug - values = conversion.ensure_datetime64ns(values) - - self.values[locs] = values - - -class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock): - """ implement a datetime64 block with a tz attribute """ - __slots__ = () - _concatenator = staticmethod(_concat._concat_datetime) - is_datetimetz = True - - def __init__(self, values, placement, ndim=2, dtype=None): - # XXX: This will end up calling _maybe_coerce_values twice - # when dtype is not None. It's relatively cheap (just an isinstance) - # but it'd nice to avoid. - # - # If we can remove dtype from __init__, and push that conversion - # push onto the callers, then we can remove this entire __init__ - # and just use DatetimeBlock's. - if dtype is not None: - values = self._maybe_coerce_values(values, dtype=dtype) - super(DatetimeTZBlock, self).__init__(values, placement=placement, - ndim=ndim) - - def _maybe_coerce_values(self, values, dtype=None): - """Input validation for values passed to __init__. Ensure that - we have datetime64TZ, coercing if necessary. - - Parametetrs - ----------- - values : array-like - Must be convertible to datetime64 - dtype : string or DatetimeTZDtype, optional - Does a shallow copy to this tz - - Returns - ------- - values : ndarray[datetime64ns] - """ - if not isinstance(values, self._holder): - values = self._holder(values) - - if dtype is not None: - if isinstance(dtype, compat.string_types): - dtype = DatetimeTZDtype.construct_from_string(dtype) - values = values._shallow_copy(tz=dtype.tz) - - if values.tz is None: - raise ValueError("cannot create a DatetimeTZBlock without a tz") - - return values - - @property - def is_view(self): - """ return a boolean if I am possibly a view """ - # check the ndarray values of the DatetimeIndex values - return self.values.values.base is not None - - def copy(self, deep=True, mgr=None): - """ copy constructor """ - values = self.values - if deep: - values = values.copy(deep=True) - return self.make_block_same_class(values) - - def external_values(self): - """ we internally represent the data as a DatetimeIndex, but for - external compat with ndarray, export as a ndarray of Timestamps - """ - return self.values.astype('datetime64[ns]').values - - def get_values(self, dtype=None): - # return object dtype as Timestamps with the zones - if is_object_dtype(dtype): - return lib.map_infer( - self.values.ravel(), self._box_func).reshape(self.values.shape) - return self.values - - def _slice(self, slicer): - """ return a slice of my values """ - if isinstance(slicer, tuple): - col, loc = slicer - if not com.is_null_slice(col) and col != 0: - raise IndexError("{0} only contains one item".format(self)) - return self.values[loc] - return self.values[slicer] - - def _try_coerce_args(self, values, other): - """ - localize and return i8 for the values - - Parameters - ---------- - values : ndarray-like - other : ndarray-like or scalar - - Returns - ------- - base-type values, values mask, base-type other, other mask - """ - values_mask = _block_shape(isna(values), ndim=self.ndim) - # asi8 is a view, needs copy - values = _block_shape(values.asi8, ndim=self.ndim) - other_mask = False - - if isinstance(other, ABCSeries): - other = self._holder(other) - other_mask = isna(other) - - if isinstance(other, bool): - raise TypeError - elif (is_null_datelike_scalar(other) or - (is_scalar(other) and isna(other))): - other = tslibs.iNaT - other_mask = True - elif isinstance(other, self._holder): - if other.tz != self.values.tz: - raise ValueError("incompatible or non tz-aware value") - other_mask = _block_shape(isna(other), ndim=self.ndim) - other = _block_shape(other.asi8, ndim=self.ndim) - elif isinstance(other, (np.datetime64, datetime, date)): - other = tslibs.Timestamp(other) - tz = getattr(other, 'tz', None) - - # test we can have an equal time zone - if tz is None or str(tz) != str(self.values.tz): - raise ValueError("incompatible or non tz-aware value") - other_mask = isna(other) - other = other.value - else: - raise TypeError - - return values, values_mask, other, other_mask - - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - if isinstance(result, np.ndarray): - if result.dtype.kind in ['i', 'f', 'O']: - result = result.astype('M8[ns]') - elif isinstance(result, (np.integer, np.float, np.datetime64)): - result = tslibs.Timestamp(result, tz=self.values.tz) - if isinstance(result, np.ndarray): - # allow passing of > 1dim if its trivial - if result.ndim > 1: - result = result.reshape(np.prod(result.shape)) - result = self.values._shallow_copy(result) - - return result - - @property - def _box_func(self): - return lambda x: tslibs.Timestamp(x, tz=self.dtype.tz) - - def shift(self, periods, axis=0, mgr=None): - """ shift the block by periods """ - - # think about moving this to the DatetimeIndex. This is a non-freq - # (number of periods) shift ### - - N = len(self) - indexer = np.zeros(N, dtype=int) - if periods > 0: - indexer[periods:] = np.arange(N - periods) - else: - indexer[:periods] = np.arange(-periods, N) - - new_values = self.values.asi8.take(indexer) - - if periods > 0: - new_values[:periods] = tslibs.iNaT - else: - new_values[periods:] = tslibs.iNaT - - new_values = self.values._shallow_copy(new_values) - return [self.make_block_same_class(new_values, - placement=self.mgr_locs)] - - def diff(self, n, axis=0, mgr=None): - """1st discrete difference - - Parameters - ---------- - n : int, number of periods to diff - axis : int, axis to diff upon. default 0 - mgr : default None - - Return - ------ - A list with a new TimeDeltaBlock. - - Note - ---- - The arguments here are mimicking shift so they are called correctly - by apply. - """ - if axis == 0: - # Cannot currently calculate diff across multiple blocks since this - # function is invoked via apply - raise NotImplementedError - new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8 - - # Reshape the new_values like how algos.diff does for timedelta data - new_values = new_values.reshape(1, len(new_values)) - new_values = new_values.astype('timedelta64[ns]') - return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)] - - def concat_same_type(self, to_concat, placement=None): - """ - Concatenate list of single blocks of the same type. - """ - values = self._concatenator([blk.values for blk in to_concat], - axis=self.ndim - 1) - # not using self.make_block_same_class as values can be non-tz dtype - return make_block( - values, placement=placement or slice(0, len(values), 1)) - - -class SparseBlock(NonConsolidatableMixIn, Block): - """ implement as a list of sparse arrays of the same dtype """ - __slots__ = () - is_sparse = True - is_numeric = True - _box_to_block_values = False - _can_hold_na = True - _ftype = 'sparse' - _concatenator = staticmethod(_concat._concat_sparse) - - def __init__(self, values, placement, ndim=None): - # Ensure that we have the underlying SparseArray here... - if isinstance(values, ABCSeries): - values = values.values - assert isinstance(values, SparseArray) - super(SparseBlock, self).__init__(values, placement, ndim=ndim) - - @property - def _holder(self): - return SparseArray - - @property - def shape(self): - return (len(self.mgr_locs), self.sp_index.length) - - @property - def fill_value(self): - # return np.nan - return self.values.fill_value - - @fill_value.setter - def fill_value(self, v): - self.values.fill_value = v - - def to_dense(self): - return self.values.to_dense().view() - - @property - def sp_values(self): - return self.values.sp_values - - @sp_values.setter - def sp_values(self, v): - # reset the sparse values - self.values = SparseArray(v, sparse_index=self.sp_index, - kind=self.kind, dtype=v.dtype, - fill_value=self.values.fill_value, - copy=False) - - @property - def sp_index(self): - return self.values.sp_index - - @property - def kind(self): - return self.values.kind - - def _astype(self, dtype, copy=False, errors='raise', values=None, - klass=None, mgr=None, **kwargs): - if values is None: - values = self.values - values = values.astype(dtype, copy=copy) - return self.make_block_same_class(values=values, - placement=self.mgr_locs) - - def __len__(self): - try: - return self.sp_index.length - except: - return 0 - - def copy(self, deep=True, mgr=None): - return self.make_block_same_class(values=self.values, - sparse_index=self.sp_index, - kind=self.kind, copy=deep, - placement=self.mgr_locs) - - def make_block_same_class(self, values, placement, sparse_index=None, - kind=None, dtype=None, fill_value=None, - copy=False, ndim=None): - """ return a new block """ - if dtype is None: - dtype = values.dtype - if fill_value is None and not isinstance(values, SparseArray): - fill_value = self.values.fill_value - - # if not isinstance(values, SparseArray) and values.ndim != self.ndim: - # raise ValueError("ndim mismatch") - - if values.ndim == 2: - nitems = values.shape[0] - - if nitems == 0: - # kludgy, but SparseBlocks cannot handle slices, where the - # output is 0-item, so let's convert it to a dense block: it - # won't take space since there's 0 items, plus it will preserve - # the dtype. - return self.make_block(np.empty(values.shape, dtype=dtype), - placement) - elif nitems > 1: - raise ValueError("Only 1-item 2d sparse blocks are supported") - else: - values = values.reshape(values.shape[1]) - - new_values = SparseArray(values, sparse_index=sparse_index, - kind=kind or self.kind, dtype=dtype, - fill_value=fill_value, copy=copy) - return self.make_block(new_values, - placement=placement) - - def interpolate(self, method='pad', axis=0, inplace=False, limit=None, - fill_value=None, **kwargs): - - values = missing.interpolate_2d(self.values.to_dense(), method, axis, - limit, fill_value) - return self.make_block_same_class(values=values, - placement=self.mgr_locs) - - def fillna(self, value, limit=None, inplace=False, downcast=None, - mgr=None): - # we may need to upcast our fill to match our dtype - if limit is not None: - raise NotImplementedError("specifying a limit for 'fillna' has " - "not been implemented yet") - values = self.values if inplace else self.values.copy() - values = values.fillna(value, downcast=downcast) - return [self.make_block_same_class(values=values, - placement=self.mgr_locs)] - - def shift(self, periods, axis=0, mgr=None): - """ shift the block by periods """ - N = len(self.values.T) - indexer = np.zeros(N, dtype=int) - if periods > 0: - indexer[periods:] = np.arange(N - periods) - else: - indexer[:periods] = np.arange(-periods, N) - new_values = self.values.to_dense().take(indexer) - # convert integer to float if necessary. need to do a lot more than - # that, handle boolean etc also - new_values, fill_value = maybe_upcast(new_values) - if periods > 0: - new_values[:periods] = fill_value - else: - new_values[periods:] = fill_value - return [self.make_block_same_class(new_values, - placement=self.mgr_locs)] - - def sparse_reindex(self, new_index): - """ sparse reindex and return a new block - current reindex only works for float64 dtype! """ - values = self.values - values = values.sp_index.to_int_index().reindex( - values.sp_values.astype('float64'), values.fill_value, new_index) - return self.make_block_same_class(values, sparse_index=new_index, - placement=self.mgr_locs) - - -def get_block_type(values, dtype=None): - """ - Find the appropriate Block subclass to use for the given values and dtype. - - Parameters - ---------- - values : ndarray-like - dtype : numpy or pandas dtype - - Returns - ------- - cls : class, subclass of Block - """ - dtype = dtype or values.dtype - vtype = dtype.type - - if is_sparse(values): - cls = SparseBlock - elif issubclass(vtype, np.floating): - cls = FloatBlock - elif issubclass(vtype, np.timedelta64): - assert issubclass(vtype, np.integer) - cls = TimeDeltaBlock - elif issubclass(vtype, np.complexfloating): - cls = ComplexBlock - elif is_categorical(values): - cls = CategoricalBlock - elif is_extension_array_dtype(values): - cls = ExtensionBlock - elif issubclass(vtype, np.datetime64): - assert not is_datetimetz(values) - cls = DatetimeBlock - elif is_datetimetz(values): - cls = DatetimeTZBlock - elif issubclass(vtype, np.integer): - cls = IntBlock - elif dtype == np.bool_: - cls = BoolBlock - else: - cls = ObjectBlock - return cls - - -def make_block(values, placement, klass=None, ndim=None, dtype=None, - fastpath=None): - if fastpath is not None: - # GH#19265 pyarrow is passing this - warnings.warn("fastpath argument is deprecated, will be removed " - "in a future release.", DeprecationWarning) - if klass is None: - dtype = dtype or values.dtype - klass = get_block_type(values, dtype) - - elif klass is DatetimeTZBlock and not is_datetimetz(values): - return klass(values, ndim=ndim, - placement=placement, dtype=dtype) - - return klass(values, ndim=ndim, placement=placement) +from .blocks import ( + Block, + _extend_blocks, _merge_blocks, _safe_reshape, + make_block, get_block_type) +from .blocks import ( # noqa:F401 + _block2d_to_blocknd, _factor_indexer, _block_shape, # io.pytables + FloatBlock, IntBlock, ComplexBlock, BoolBlock, ObjectBlock, + TimeDeltaBlock, DatetimeBlock, DatetimeTZBlock, + CategoricalBlock, ExtensionBlock, SparseBlock, ScalarBlock) # TODO: flexible with index=None and/or items=None @@ -5082,70 +1925,6 @@ def _consolidate(blocks): return new_blocks -def _merge_blocks(blocks, dtype=None, _can_consolidate=True): - - if len(blocks) == 1: - return blocks[0] - - if _can_consolidate: - - if dtype is None: - if len({b.dtype for b in blocks}) != 1: - raise AssertionError("_merge_blocks are invalid!") - dtype = blocks[0].dtype - - # FIXME: optimization potential in case all mgrs contain slices and - # combination of those slices is a slice, too. - new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks]) - new_values = _vstack([b.values for b in blocks], dtype) - - argsort = np.argsort(new_mgr_locs) - new_values = new_values[argsort] - new_mgr_locs = new_mgr_locs[argsort] - - return make_block(new_values, placement=new_mgr_locs) - - # no merge - return blocks - - -def _extend_blocks(result, blocks=None): - """ return a new extended blocks, givin the result """ - if blocks is None: - blocks = [] - if isinstance(result, list): - for r in result: - if isinstance(r, list): - blocks.extend(r) - else: - blocks.append(r) - elif isinstance(result, BlockManager): - blocks.extend(result.blocks) - else: - blocks.append(result) - return blocks - - -def _block_shape(values, ndim=1, shape=None): - """ guarantee the shape of the values to be at least 1 d """ - if values.ndim < ndim: - if shape is None: - shape = values.shape - values = values.reshape(tuple((1, ) + shape)) - return values - - -def _vstack(to_stack, dtype): - - # work around NumPy 1.6 bug - if dtype == _NS_DTYPE or dtype == _TD_DTYPE: - new_values = np.vstack([x.view('i8') for x in to_stack]) - return new_values.view(dtype) - - else: - return np.vstack(to_stack) - - def _maybe_compare(a, b, op): is_a_array = isinstance(a, np.ndarray) @@ -5181,41 +1960,6 @@ def _concat_indexes(indexes): return indexes[0].append(indexes[1:]) -def _block2d_to_blocknd(values, placement, shape, labels, ref_items): - """ pivot to the labels shape """ - panel_shape = (len(placement),) + shape - - # TODO: lexsort depth needs to be 2!! - - # Create observation selection vector using major and minor - # labels, for converting to panel format. - selector = _factor_indexer(shape[1:], labels) - mask = np.zeros(np.prod(shape), dtype=bool) - mask.put(selector, True) - - if mask.all(): - pvalues = np.empty(panel_shape, dtype=values.dtype) - else: - dtype, fill_value = maybe_promote(values.dtype) - pvalues = np.empty(panel_shape, dtype=dtype) - pvalues.fill(fill_value) - - for i in range(len(placement)): - pvalues[i].flat[mask] = values[:, i] - - return make_block(pvalues, placement=placement) - - -def _factor_indexer(shape, labels): - """ - given a tuple of shape and a list of Categorical labels, return the - expanded label indexer - """ - mult = np.array(shape)[::-1].cumprod()[::-1] - return ensure_platform_int( - np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T) - - def _get_blkno_placements(blknos, blk_count, group=True): """ @@ -5268,28 +2012,6 @@ def rrenamer(x): _transform_index(right, rrenamer)) -def _safe_reshape(arr, new_shape): - """ - If possible, reshape `arr` to have shape `new_shape`, - with a couple of exceptions (see gh-13012): - - 1) If `arr` is a ExtensionArray or Index, `arr` will be - returned as is. - 2) If `arr` is a Series, the `_values` attribute will - be reshaped and returned. - - Parameters - ---------- - arr : array-like, object to be reshaped - new_shape : int or tuple of ints, the new shape - """ - if isinstance(arr, ABCSeries): - arr = arr._values - if not isinstance(arr, ABCExtensionArray): - arr = arr.reshape(new_shape) - return arr - - def _transform_index(index, func, level=None): """ Apply function to all values found in index. @@ -5310,92 +2032,6 @@ def _transform_index(index, func, level=None): return Index(items, name=index.name, tupleize_cols=False) -def _putmask_smart(v, m, n): - """ - Return a new ndarray, try to preserve dtype if possible. - - Parameters - ---------- - v : `values`, updated in-place (array like) - m : `mask`, applies to both sides (array like) - n : `new values` either scalar or an array like aligned with `values` - - Returns - ------- - values : ndarray with updated values - this *may* be a copy of the original - - See Also - -------- - ndarray.putmask - """ - - # we cannot use np.asarray() here as we cannot have conversions - # that numpy does when numeric are mixed with strings - - # n should be the length of the mask or a scalar here - if not is_list_like(n): - n = np.repeat(n, len(m)) - elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar - n = np.repeat(np.array(n, ndmin=1), len(m)) - - # see if we are only masking values that if putted - # will work in the current dtype - try: - nn = n[m] - - # make sure that we have a nullable type - # if we have nulls - if not _isna_compat(v, nn[0]): - raise ValueError - - # we ignore ComplexWarning here - with catch_warnings(record=True): - nn_at = nn.astype(v.dtype) - - # avoid invalid dtype comparisons - # between numbers & strings - - # only compare integers/floats - # don't compare integers to datetimelikes - if (not is_numeric_v_string_like(nn, nn_at) and - (is_float_dtype(nn.dtype) or - is_integer_dtype(nn.dtype) and - is_float_dtype(nn_at.dtype) or - is_integer_dtype(nn_at.dtype))): - - comp = (nn == nn_at) - if is_list_like(comp) and comp.all(): - nv = v.copy() - nv[m] = nn_at - return nv - except (ValueError, IndexError, TypeError): - pass - - n = np.asarray(n) - - def _putmask_preserve(nv, n): - try: - nv[m] = n[m] - except (IndexError, ValueError): - nv[m] = n - return nv - - # preserves dtype if possible - if v.dtype.kind == n.dtype.kind: - return _putmask_preserve(v, n) - - # change the dtype if needed - dtype, _ = maybe_promote(n.dtype) - - if is_extension_type(v.dtype) and is_object_dtype(dtype): - v = v.get_values(dtype) - else: - v = v.astype(dtype) - - return _putmask_preserve(v, n) - - def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): """ Concatenate block managers into one. diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py new file mode 100644 index 0000000000000..ffa2267dd6877 --- /dev/null +++ b/pandas/core/internals/blocks.py @@ -0,0 +1,3417 @@ +# -*- coding: utf-8 -*- +import warnings +import inspect +import re +from datetime import datetime, timedelta, date + +import numpy as np + +from pandas._libs import lib, tslib, tslibs, internals as libinternals +from pandas._libs.tslibs import conversion, Timedelta + +from pandas import compat +from pandas.compat import range, zip + +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, DatetimeTZDtype, + PandasExtensionDtype, + CategoricalDtype) +from pandas.core.dtypes.common import ( + _TD_DTYPE, _NS_DTYPE, + ensure_platform_int, + is_integer, + is_dtype_equal, + is_timedelta64_dtype, + is_datetime64_dtype, is_datetimetz, is_sparse, + is_categorical, is_categorical_dtype, + is_integer_dtype, + is_datetime64tz_dtype, + is_bool_dtype, + is_object_dtype, + is_float_dtype, + is_numeric_v_string_like, is_extension_type, + is_extension_array_dtype, + is_list_like, + is_re, + is_re_compilable, + pandas_dtype) +from pandas.core.dtypes.cast import ( + maybe_downcast_to_dtype, + maybe_upcast, + maybe_promote, + infer_dtype_from, + infer_dtype_from_scalar, + soft_convert_objects, + maybe_convert_objects, + astype_nansafe, + find_common_type, + maybe_infer_dtype_type) +from pandas.core.dtypes.missing import ( + isna, notna, array_equivalent, + _isna_compat, + is_null_datelike_scalar) +import pandas.core.dtypes.concat as _concat +from pandas.core.dtypes.generic import ( + ABCSeries, + ABCDatetimeIndex, + ABCExtensionArray, + ABCIndexClass) + +import pandas.core.common as com +import pandas.core.algorithms as algos +import pandas.core.missing as missing +from pandas.core.base import PandasObject + +from pandas.core.arrays import Categorical +from pandas.core.sparse.array import SparseArray + +from pandas.core.indexes.datetimes import DatetimeIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex +from pandas.core.indexing import check_setitem_lengths + +from pandas.io.formats.printing import pprint_thing + + +class Block(PandasObject): + """ + Canonical n-dimensional unit of homogeneous dtype contained in a pandas + data structure + + Index-ignorant; let the container take care of that + """ + __slots__ = ['_mgr_locs', 'values', 'ndim'] + is_numeric = False + is_float = False + is_integer = False + is_complex = False + is_datetime = False + is_datetimetz = False + is_timedelta = False + is_bool = False + is_object = False + is_categorical = False + is_sparse = False + is_extension = False + _box_to_block_values = True + _can_hold_na = False + _can_consolidate = True + _verify_integrity = True + _validate_ndim = True + _ftype = 'dense' + _concatenator = staticmethod(np.concatenate) + + def __init__(self, values, placement, ndim=None): + self.ndim = self._check_ndim(values, ndim) + self.mgr_locs = placement + self.values = values + + if (self._validate_ndim and self.ndim and + len(self.mgr_locs) != len(self.values)): + raise ValueError( + 'Wrong number of items passed {val}, placement implies ' + '{mgr}'.format(val=len(self.values), mgr=len(self.mgr_locs))) + + def _check_ndim(self, values, ndim): + """ndim inference and validation. + + Infers ndim from 'values' if not provided to __init__. + Validates that values.ndim and ndim are consistent if and only if + the class variable '_validate_ndim' is True. + + Parameters + ---------- + values : array-like + ndim : int or None + + Returns + ------- + ndim : int + + Raises + ------ + ValueError : the number of dimensions do not match + """ + if ndim is None: + ndim = values.ndim + + if self._validate_ndim and values.ndim != ndim: + msg = ("Wrong number of dimensions. values.ndim != ndim " + "[{} != {}]") + raise ValueError(msg.format(values.ndim, ndim)) + + return ndim + + @property + def _holder(self): + """The array-like that can hold the underlying values. + + None for 'Block', overridden by subclasses that don't + use an ndarray. + """ + return None + + @property + def _consolidate_key(self): + return (self._can_consolidate, self.dtype.name) + + @property + def _is_single_block(self): + return self.ndim == 1 + + @property + def is_view(self): + """ return a boolean if I am possibly a view """ + return self.values.base is not None + + @property + def is_datelike(self): + """ return True if I am a non-datelike """ + return self.is_datetime or self.is_timedelta + + def is_categorical_astype(self, dtype): + """ + validate that we have a astypeable to categorical, + returns a boolean if we are a categorical + """ + if dtype is Categorical or dtype is CategoricalDtype: + # this is a pd.Categorical, but is not + # a valid type for astypeing + raise TypeError("invalid type {0} for astype".format(dtype)) + + elif is_categorical_dtype(dtype): + return True + + return False + + def external_values(self, dtype=None): + """ return an outside world format, currently just the ndarray """ + return self.values + + def internal_values(self, dtype=None): + """ return an internal format, currently just the ndarray + this should be the pure internal API format + """ + return self.values + + def formatting_values(self): + """Return the internal values used by the DataFrame/SeriesFormatter""" + return self.internal_values() + + def get_values(self, dtype=None): + """ + return an internal format, currently just the ndarray + this is often overridden to handle to_dense like operations + """ + if is_object_dtype(dtype): + return self.values.astype(object) + return self.values + + def to_dense(self): + return self.values.view() + + @property + def _na_value(self): + return np.nan + + @property + def fill_value(self): + return np.nan + + @property + def mgr_locs(self): + return self._mgr_locs + + @mgr_locs.setter + def mgr_locs(self, new_mgr_locs): + if not isinstance(new_mgr_locs, libinternals.BlockPlacement): + new_mgr_locs = libinternals.BlockPlacement(new_mgr_locs) + + self._mgr_locs = new_mgr_locs + + @property + def array_dtype(self): + """ the dtype to return if I want to construct this block as an + array + """ + return self.dtype + + def make_block(self, values, placement=None, ndim=None): + """ + Create a new block, with type inference propagate any values that are + not specified + """ + if placement is None: + placement = self.mgr_locs + if ndim is None: + ndim = self.ndim + + return make_block(values, placement=placement, ndim=ndim) + + def make_block_scalar(self, values): + """ + Create a ScalarBlock + """ + return ScalarBlock(values) + + def make_block_same_class(self, values, placement=None, ndim=None, + dtype=None): + """ Wrap given values in a block of same type as self. """ + if dtype is not None: + # issue 19431 fastparquet is passing this + warnings.warn("dtype argument is deprecated, will be removed " + "in a future release.", DeprecationWarning) + if placement is None: + placement = self.mgr_locs + return make_block(values, placement=placement, ndim=ndim, + klass=self.__class__, dtype=dtype) + + def __unicode__(self): + + # don't want to print out all of the items here + name = pprint_thing(self.__class__.__name__) + if self._is_single_block: + + result = '{name}: {len} dtype: {dtype}'.format( + name=name, len=len(self), dtype=self.dtype) + + else: + + shape = ' x '.join(pprint_thing(s) for s in self.shape) + result = '{name}: {index}, {shape}, dtype: {dtype}'.format( + name=name, index=pprint_thing(self.mgr_locs.indexer), + shape=shape, dtype=self.dtype) + + return result + + def __len__(self): + return len(self.values) + + def __getstate__(self): + return self.mgr_locs.indexer, self.values + + def __setstate__(self, state): + self.mgr_locs = libinternals.BlockPlacement(state[0]) + self.values = state[1] + self.ndim = self.values.ndim + + def _slice(self, slicer): + """ return a slice of my values """ + return self.values[slicer] + + def reshape_nd(self, labels, shape, ref_items, mgr=None): + """ + Parameters + ---------- + labels : list of new axis labels + shape : new shape + ref_items : new ref_items + + return a new block that is transformed to a nd block + """ + return _block2d_to_blocknd(values=self.get_values().T, + placement=self.mgr_locs, shape=shape, + labels=labels, ref_items=ref_items) + + def getitem_block(self, slicer, new_mgr_locs=None): + """ + Perform __getitem__-like, return result as block. + + As of now, only supports slices that preserve dimensionality. + """ + if new_mgr_locs is None: + if isinstance(slicer, tuple): + axis0_slicer = slicer[0] + else: + axis0_slicer = slicer + new_mgr_locs = self.mgr_locs[axis0_slicer] + + new_values = self._slice(slicer) + + if self._validate_ndim and new_values.ndim != self.ndim: + raise ValueError("Only same dim slicing is allowed") + + return self.make_block_same_class(new_values, new_mgr_locs) + + @property + def shape(self): + return self.values.shape + + @property + def dtype(self): + return self.values.dtype + + @property + def ftype(self): + return "{dtype}:{ftype}".format(dtype=self.dtype, ftype=self._ftype) + + def merge(self, other): + return _merge_blocks([self, other]) + + def concat_same_type(self, to_concat, placement=None): + """ + Concatenate list of single blocks of the same type. + """ + values = self._concatenator([blk.values for blk in to_concat], + axis=self.ndim - 1) + return self.make_block_same_class( + values, placement=placement or slice(0, len(values), 1)) + + def iget(self, i): + return self.values[i] + + def set(self, locs, values, check=False): + """ + Modify Block in-place with new item value + + Returns + ------- + None + """ + self.values[locs] = values + + def delete(self, loc): + """ + Delete given loc(-s) from block in-place. + """ + self.values = np.delete(self.values, loc, 0) + self.mgr_locs = self.mgr_locs.delete(loc) + + def apply(self, func, mgr=None, **kwargs): + """ apply the function to my values; return a block if we are not + one + """ + with np.errstate(all='ignore'): + result = func(self.values, **kwargs) + if not isinstance(result, Block): + result = self.make_block(values=_block_shape(result, + ndim=self.ndim)) + + return result + + def fillna(self, value, limit=None, inplace=False, downcast=None, + mgr=None): + """ fillna on the block with the value. If we fail, then convert to + ObjectBlock and try again + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + + if not self._can_hold_na: + if inplace: + return self + else: + return self.copy() + + mask = isna(self.values) + if limit is not None: + if not is_integer(limit): + raise ValueError('Limit must be an integer') + if limit < 1: + raise ValueError('Limit must be greater than 0') + if self.ndim > 2: + raise NotImplementedError("number of dimensions for 'fillna' " + "is currently limited to 2") + mask[mask.cumsum(self.ndim - 1) > limit] = False + + # fillna, but if we cannot coerce, then try again as an ObjectBlock + try: + values, _, _, _ = self._try_coerce_args(self.values, value) + blocks = self.putmask(mask, value, inplace=inplace) + blocks = [b.make_block(values=self._try_coerce_result(b.values)) + for b in blocks] + return self._maybe_downcast(blocks, downcast) + except (TypeError, ValueError): + + # we can't process the value, but nothing to do + if not mask.any(): + return self if inplace else self.copy() + + # operate column-by-column + def f(m, v, i): + block = self.coerce_to_target_dtype(value) + + # slice out our block + if i is not None: + block = block.getitem_block(slice(i, i + 1)) + return block.fillna(value, + limit=limit, + inplace=inplace, + downcast=None) + + return self.split_and_operate(mask, f, inplace) + + def split_and_operate(self, mask, f, inplace): + """ + split the block per-column, and apply the callable f + per-column, return a new block for each. Handle + masking which will not change a block unless needed. + + Parameters + ---------- + mask : 2-d boolean mask + f : callable accepting (1d-mask, 1d values, indexer) + inplace : boolean + + Returns + ------- + list of blocks + """ + + if mask is None: + mask = np.ones(self.shape, dtype=bool) + new_values = self.values + + def make_a_block(nv, ref_loc): + if isinstance(nv, Block): + block = nv + elif isinstance(nv, list): + block = nv[0] + else: + # Put back the dimension that was taken from it and make + # a block out of the result. + try: + nv = _block_shape(nv, ndim=self.ndim) + except (AttributeError, NotImplementedError): + pass + block = self.make_block(values=nv, + placement=ref_loc) + return block + + # ndim == 1 + if self.ndim == 1: + if mask.any(): + nv = f(mask, new_values, None) + else: + nv = new_values if inplace else new_values.copy() + block = make_a_block(nv, self.mgr_locs) + return [block] + + # ndim > 1 + new_blocks = [] + for i, ref_loc in enumerate(self.mgr_locs): + m = mask[i] + v = new_values[i] + + # need a new block + if m.any(): + nv = f(m, v, i) + else: + nv = v if inplace else v.copy() + + block = make_a_block(nv, [ref_loc]) + new_blocks.append(block) + + return new_blocks + + def _maybe_downcast(self, blocks, downcast=None): + + # no need to downcast our float + # unless indicated + if downcast is None and self.is_float: + return blocks + elif downcast is None and (self.is_timedelta or self.is_datetime): + return blocks + + if not isinstance(blocks, list): + blocks = [blocks] + return _extend_blocks([b.downcast(downcast) for b in blocks]) + + def downcast(self, dtypes=None, mgr=None): + """ try to downcast each item to the dict of dtypes if present """ + + # turn it off completely + if dtypes is False: + return self + + values = self.values + + # single block handling + if self._is_single_block: + + # try to cast all non-floats here + if dtypes is None: + dtypes = 'infer' + + nv = maybe_downcast_to_dtype(values, dtypes) + return self.make_block(nv) + + # ndim > 1 + if dtypes is None: + return self + + if not (dtypes == 'infer' or isinstance(dtypes, dict)): + raise ValueError("downcast must have a dictionary or 'infer' as " + "its argument") + + # operate column-by-column + # this is expensive as it splits the blocks items-by-item + def f(m, v, i): + + if dtypes == 'infer': + dtype = 'infer' + else: + raise AssertionError("dtypes as dict is not supported yet") + + if dtype is not None: + v = maybe_downcast_to_dtype(v, dtype) + return v + + return self.split_and_operate(None, f, False) + + def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs): + return self._astype(dtype, copy=copy, errors=errors, values=values, + **kwargs) + + def _astype(self, dtype, copy=False, errors='raise', values=None, + klass=None, mgr=None, **kwargs): + """Coerce to the new type + + Parameters + ---------- + dtype : str, dtype convertible + copy : boolean, default False + copy if indicated + errors : str, {'raise', 'ignore'}, default 'ignore' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + Returns + ------- + Block + """ + errors_legal_values = ('raise', 'ignore') + + if errors not in errors_legal_values: + invalid_arg = ("Expected value of kwarg 'errors' to be one of {}. " + "Supplied value is '{}'".format( + list(errors_legal_values), errors)) + raise ValueError(invalid_arg) + + if (inspect.isclass(dtype) and + issubclass(dtype, (PandasExtensionDtype, ExtensionDtype))): + msg = ("Expected an instance of {}, but got the class instead. " + "Try instantiating 'dtype'.".format(dtype.__name__)) + raise TypeError(msg) + + # may need to convert to categorical + if self.is_categorical_astype(dtype): + + # deprecated 17636 + if ('categories' in kwargs or 'ordered' in kwargs): + if isinstance(dtype, CategoricalDtype): + raise TypeError( + "Cannot specify a CategoricalDtype and also " + "`categories` or `ordered`. Use " + "`dtype=CategoricalDtype(categories, ordered)`" + " instead.") + warnings.warn("specifying 'categories' or 'ordered' in " + ".astype() is deprecated; pass a " + "CategoricalDtype instead", + FutureWarning, stacklevel=7) + + categories = kwargs.get('categories', None) + ordered = kwargs.get('ordered', None) + if com._any_not_none(categories, ordered): + dtype = CategoricalDtype(categories, ordered) + + if is_categorical_dtype(self.values): + # GH 10696/18593: update an existing categorical efficiently + return self.make_block(self.values.astype(dtype, copy=copy)) + + return self.make_block(Categorical(self.values, dtype=dtype)) + + # convert dtypes if needed + dtype = pandas_dtype(dtype) + + # astype processing + if is_dtype_equal(self.dtype, dtype): + if copy: + return self.copy() + return self + + if klass is None: + if dtype == np.object_: + klass = ObjectBlock + try: + # force the copy here + if values is None: + + if issubclass(dtype.type, + (compat.text_type, compat.string_types)): + + # use native type formatting for datetime/tz/timedelta + if self.is_datelike: + values = self.to_native_types() + + # astype formatting + else: + values = self.get_values() + + else: + values = self.get_values(dtype=dtype) + + # _astype_nansafe works fine with 1-d only + values = astype_nansafe(values.ravel(), dtype, copy=True) + + # TODO(extension) + # should we make this attribute? + try: + values = values.reshape(self.shape) + except AttributeError: + pass + + newb = make_block(values, placement=self.mgr_locs, + klass=klass) + except: + if errors == 'raise': + raise + newb = self.copy() if copy else self + + if newb.is_numeric and self.is_numeric: + if newb.shape != self.shape: + raise TypeError( + "cannot set astype for copy = [{copy}] for dtype " + "({dtype} [{itemsize}]) with smaller itemsize than " + "current ({newb_dtype} [{newb_size}])".format( + copy=copy, dtype=self.dtype.name, + itemsize=self.itemsize, newb_dtype=newb.dtype.name, + newb_size=newb.itemsize)) + return newb + + def convert(self, copy=True, **kwargs): + """ attempt to coerce any object types to better types return a copy + of the block (if copy = True) by definition we are not an ObjectBlock + here! + """ + + return self.copy() if copy else self + + def _can_hold_element(self, element): + """ require the same dtype as ourselves """ + dtype = self.values.dtype.type + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, dtype) + return isinstance(element, dtype) + + def _try_cast_result(self, result, dtype=None): + """ try to cast the result to our original type, we may have + roundtripped thru object in the mean-time + """ + if dtype is None: + dtype = self.dtype + + if self.is_integer or self.is_bool or self.is_datetime: + pass + elif self.is_float and result.dtype == self.dtype: + + # protect against a bool/object showing up here + if isinstance(dtype, compat.string_types) and dtype == 'infer': + return result + if not isinstance(dtype, type): + dtype = dtype.type + if issubclass(dtype, (np.bool_, np.object_)): + if issubclass(dtype, np.bool_): + if isna(result).all(): + return result.astype(np.bool_) + else: + result = result.astype(np.object_) + result[result == 1] = True + result[result == 0] = False + return result + else: + return result.astype(np.object_) + + return result + + # may need to change the dtype here + return maybe_downcast_to_dtype(result, dtype) + + def _try_coerce_args(self, values, other): + """ provide coercion to our input arguments """ + + if np.any(notna(other)) and not self._can_hold_element(other): + # coercion issues + # let higher levels handle + raise TypeError("cannot convert {} to an {}".format( + type(other).__name__, + type(self).__name__.lower().replace('Block', ''))) + + return values, False, other, False + + def _try_coerce_result(self, result): + """ reverse of try_coerce_args """ + return result + + def _try_coerce_and_cast_result(self, result, dtype=None): + result = self._try_coerce_result(result) + result = self._try_cast_result(result, dtype=dtype) + return result + + def to_native_types(self, slicer=None, na_rep='nan', quoting=None, + **kwargs): + """ convert to our native types format, slicing if desired """ + + values = self.get_values() + + if slicer is not None: + values = values[:, slicer] + mask = isna(values) + + if not self.is_object and not quoting: + values = values.astype(str) + else: + values = np.array(values, dtype='object') + + values[mask] = na_rep + return values + + # block actions #### + def copy(self, deep=True, mgr=None): + """ copy constructor """ + values = self.values + if deep: + values = values.copy() + return self.make_block_same_class(values) + + def replace(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True, mgr=None): + """ replace the to_replace value with value, possible to create new + blocks here this is just a call to putmask. regex is not used here. + It is used in ObjectBlocks. It is here for API + compatibility. + """ + + inplace = validate_bool_kwarg(inplace, 'inplace') + original_to_replace = to_replace + + # try to replace, if we raise an error, convert to ObjectBlock and + # retry + try: + values, _, to_replace, _ = self._try_coerce_args(self.values, + to_replace) + mask = missing.mask_missing(values, to_replace) + if filter is not None: + filtered_out = ~self.mgr_locs.isin(filter) + mask[filtered_out.nonzero()[0]] = False + + blocks = self.putmask(mask, value, inplace=inplace) + if convert: + blocks = [b.convert(by_item=True, numeric=False, + copy=not inplace) for b in blocks] + return blocks + except (TypeError, ValueError): + + # try again with a compatible block + block = self.astype(object) + return block.replace( + to_replace=original_to_replace, value=value, inplace=inplace, + filter=filter, regex=regex, convert=convert) + + def _replace_single(self, *args, **kwargs): + """ no-op on a non-ObjectBlock """ + return self if kwargs['inplace'] else self.copy() + + def setitem(self, indexer, value, mgr=None): + """Set the value inplace, returning a a maybe different typed block. + + Parameters + ---------- + indexer : tuple, list-like, array-like, slice + The subset of self.values to set + value : object + The value being set + mgr : BlockPlacement, optional + + Returns + ------- + Block + + Notes + ----- + `indexer` is a direct slice/positional indexer. `value` must + be a compatible shape. + """ + # coerce None values, if appropriate + if value is None: + if self.is_numeric: + value = np.nan + + # coerce if block dtype can store value + values = self.values + try: + values, _, value, _ = self._try_coerce_args(values, value) + # can keep its own dtype + if hasattr(value, 'dtype') and is_dtype_equal(values.dtype, + value.dtype): + dtype = self.dtype + else: + dtype = 'infer' + + except (TypeError, ValueError): + # current dtype cannot store value, coerce to common dtype + find_dtype = False + + if hasattr(value, 'dtype'): + dtype = value.dtype + find_dtype = True + + elif lib.is_scalar(value): + if isna(value): + # NaN promotion is handled in latter path + dtype = False + else: + dtype, _ = infer_dtype_from_scalar(value, + pandas_dtype=True) + find_dtype = True + else: + dtype = 'infer' + + if find_dtype: + dtype = find_common_type([values.dtype, dtype]) + if not is_dtype_equal(self.dtype, dtype): + b = self.astype(dtype) + return b.setitem(indexer, value, mgr=mgr) + + # value must be storeable at this moment + arr_value = np.array(value) + + # cast the values to a type that can hold nan (if necessary) + if not self._can_hold_element(value): + dtype, _ = maybe_promote(arr_value.dtype) + values = values.astype(dtype) + + transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x) + values = transf(values) + + # length checking + check_setitem_lengths(indexer, value, values) + + def _is_scalar_indexer(indexer): + # return True if we are all scalar indexers + + if arr_value.ndim == 1: + if not isinstance(indexer, tuple): + indexer = tuple([indexer]) + return any(isinstance(idx, np.ndarray) and len(idx) == 0 + for idx in indexer) + return False + + def _is_empty_indexer(indexer): + # return a boolean if we have an empty indexer + + if is_list_like(indexer) and not len(indexer): + return True + if arr_value.ndim == 1: + if not isinstance(indexer, tuple): + indexer = tuple([indexer]) + return any(isinstance(idx, np.ndarray) and len(idx) == 0 + for idx in indexer) + return False + + # empty indexers + # 8669 (empty) + if _is_empty_indexer(indexer): + pass + + # setting a single element for each dim and with a rhs that could + # be say a list + # GH 6043 + elif _is_scalar_indexer(indexer): + values[indexer] = value + + # if we are an exact match (ex-broadcasting), + # then use the resultant dtype + elif (len(arr_value.shape) and + arr_value.shape[0] == values.shape[0] and + np.prod(arr_value.shape) == np.prod(values.shape)): + values[indexer] = value + try: + values = values.astype(arr_value.dtype) + except ValueError: + pass + + # set + else: + values[indexer] = value + + # coerce and try to infer the dtypes of the result + values = self._try_coerce_and_cast_result(values, dtype) + block = self.make_block(transf(values)) + return block + + def putmask(self, mask, new, align=True, inplace=False, axis=0, + transpose=False, mgr=None): + """ putmask the data to the block; it is possible that we may create a + new dtype of block + + return the resulting block(s) + + Parameters + ---------- + mask : the condition to respect + new : a ndarray/object + align : boolean, perform alignment on other/cond, default is True + inplace : perform inplace modification, default is False + axis : int + transpose : boolean + Set to True if self is stored with axes reversed + + Returns + ------- + a list of new blocks, the result of the putmask + """ + + new_values = self.values if inplace else self.values.copy() + + new = getattr(new, 'values', new) + mask = getattr(mask, 'values', mask) + + # if we are passed a scalar None, convert it here + if not is_list_like(new) and isna(new) and not self.is_object: + new = self.fill_value + + if self._can_hold_element(new): + _, _, new, _ = self._try_coerce_args(new_values, new) + + if transpose: + new_values = new_values.T + + # If the default repeat behavior in np.putmask would go in the + # wrong direction, then explicitly repeat and reshape new instead + if getattr(new, 'ndim', 0) >= 1: + if self.ndim - 1 == new.ndim and axis == 1: + new = np.repeat( + new, new_values.shape[-1]).reshape(self.shape) + new = new.astype(new_values.dtype) + + # we require exact matches between the len of the + # values we are setting (or is compat). np.putmask + # doesn't check this and will simply truncate / pad + # the output, but we want sane error messages + # + # TODO: this prob needs some better checking + # for 2D cases + if ((is_list_like(new) and + np.any(mask[mask]) and + getattr(new, 'ndim', 1) == 1)): + + if not (mask.shape[-1] == len(new) or + mask[mask].shape[-1] == len(new) or + len(new) == 1): + raise ValueError("cannot assign mismatch " + "length to masked array") + + np.putmask(new_values, mask, new) + + # maybe upcast me + elif mask.any(): + if transpose: + mask = mask.T + if isinstance(new, np.ndarray): + new = new.T + axis = new_values.ndim - axis - 1 + + # Pseudo-broadcast + if getattr(new, 'ndim', 0) >= 1: + if self.ndim - 1 == new.ndim: + new_shape = list(new.shape) + new_shape.insert(axis, 1) + new = new.reshape(tuple(new_shape)) + + # operate column-by-column + def f(m, v, i): + + if i is None: + # ndim==1 case. + n = new + else: + + if isinstance(new, np.ndarray): + n = np.squeeze(new[i % new.shape[0]]) + else: + n = np.array(new) + + # type of the new block + dtype, _ = maybe_promote(n.dtype) + + # we need to explicitly astype here to make a copy + n = n.astype(dtype) + + nv = _putmask_smart(v, m, n) + return nv + + new_blocks = self.split_and_operate(mask, f, inplace) + return new_blocks + + if inplace: + return [self] + + if transpose: + new_values = new_values.T + + return [self.make_block(new_values)] + + def coerce_to_target_dtype(self, other): + """ + coerce the current block to a dtype compat for other + we will return a block, possibly object, and not raise + + we can also safely try to coerce to the same dtype + and will receive the same block + """ + + # if we cannot then coerce to object + dtype, _ = infer_dtype_from(other, pandas_dtype=True) + + if is_dtype_equal(self.dtype, dtype): + return self + + if self.is_bool or is_object_dtype(dtype) or is_bool_dtype(dtype): + # we don't upcast to bool + return self.astype(object) + + elif ((self.is_float or self.is_complex) and + (is_integer_dtype(dtype) or is_float_dtype(dtype))): + # don't coerce float/complex to int + return self + + elif (self.is_datetime or + is_datetime64_dtype(dtype) or + is_datetime64tz_dtype(dtype)): + + # not a datetime + if not ((is_datetime64_dtype(dtype) or + is_datetime64tz_dtype(dtype)) and self.is_datetime): + return self.astype(object) + + # don't upcast timezone with different timezone or no timezone + mytz = getattr(self.dtype, 'tz', None) + othertz = getattr(dtype, 'tz', None) + + if str(mytz) != str(othertz): + return self.astype(object) + + raise AssertionError("possible recursion in " + "coerce_to_target_dtype: {} {}".format( + self, other)) + + elif (self.is_timedelta or is_timedelta64_dtype(dtype)): + + # not a timedelta + if not (is_timedelta64_dtype(dtype) and self.is_timedelta): + return self.astype(object) + + raise AssertionError("possible recursion in " + "coerce_to_target_dtype: {} {}".format( + self, other)) + + try: + return self.astype(dtype) + except (ValueError, TypeError): + pass + + return self.astype(object) + + def interpolate(self, method='pad', axis=0, index=None, values=None, + inplace=False, limit=None, limit_direction='forward', + limit_area=None, fill_value=None, coerce=False, + downcast=None, mgr=None, **kwargs): + + inplace = validate_bool_kwarg(inplace, 'inplace') + + def check_int_bool(self, inplace): + # Only FloatBlocks will contain NaNs. + # timedelta subclasses IntBlock + if (self.is_bool or self.is_integer) and not self.is_timedelta: + if inplace: + return self + else: + return self.copy() + + # a fill na type method + try: + m = missing.clean_fill_method(method) + except: + m = None + + if m is not None: + r = check_int_bool(self, inplace) + if r is not None: + return r + return self._interpolate_with_fill(method=m, axis=axis, + inplace=inplace, limit=limit, + fill_value=fill_value, + coerce=coerce, + downcast=downcast, mgr=mgr) + # try an interp method + try: + m = missing.clean_interp_method(method, **kwargs) + except: + m = None + + if m is not None: + r = check_int_bool(self, inplace) + if r is not None: + return r + return self._interpolate(method=m, index=index, values=values, + axis=axis, limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + fill_value=fill_value, inplace=inplace, + downcast=downcast, mgr=mgr, **kwargs) + + raise ValueError("invalid method '{0}' to interpolate.".format(method)) + + def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, + limit=None, fill_value=None, coerce=False, + downcast=None, mgr=None): + """ fillna but using the interpolate machinery """ + + inplace = validate_bool_kwarg(inplace, 'inplace') + + # if we are coercing, then don't force the conversion + # if the block can't hold the type + if coerce: + if not self._can_hold_na: + if inplace: + return [self] + else: + return [self.copy()] + + values = self.values if inplace else self.values.copy() + values, _, fill_value, _ = self._try_coerce_args(values, fill_value) + values = missing.interpolate_2d(values, method=method, axis=axis, + limit=limit, fill_value=fill_value, + dtype=self.dtype) + values = self._try_coerce_result(values) + + blocks = [self.make_block_same_class(values, ndim=self.ndim)] + return self._maybe_downcast(blocks, downcast) + + def _interpolate(self, method=None, index=None, values=None, + fill_value=None, axis=0, limit=None, + limit_direction='forward', limit_area=None, + inplace=False, downcast=None, mgr=None, **kwargs): + """ interpolate using scipy wrappers """ + + inplace = validate_bool_kwarg(inplace, 'inplace') + data = self.values if inplace else self.values.copy() + + # only deal with floats + if not self.is_float: + if not self.is_integer: + return self + data = data.astype(np.float64) + + if fill_value is None: + fill_value = self.fill_value + + if method in ('krogh', 'piecewise_polynomial', 'pchip'): + if not index.is_monotonic: + raise ValueError("{0} interpolation requires that the " + "index be monotonic.".format(method)) + # process 1-d slices in the axis direction + + def func(x): + + # process a 1-d slice, returning it + # should the axis argument be handled below in apply_along_axis? + # i.e. not an arg to missing.interpolate_1d + return missing.interpolate_1d(index, x, method=method, limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + fill_value=fill_value, + bounds_error=False, **kwargs) + + # interp each column independently + interp_values = np.apply_along_axis(func, axis, data) + + blocks = [self.make_block_same_class(interp_values)] + return self._maybe_downcast(blocks, downcast) + + def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): + """ + Take values according to indexer and return them as a block.bb + + """ + + # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock + # so need to preserve types + # sparse is treated like an ndarray, but needs .get_values() shaping + + values = self.values + if self.is_sparse: + values = self.get_values() + + if fill_tuple is None: + fill_value = self.fill_value + new_values = algos.take_nd(values, indexer, axis=axis, + allow_fill=False) + else: + fill_value = fill_tuple[0] + new_values = algos.take_nd(values, indexer, axis=axis, + allow_fill=True, fill_value=fill_value) + + if new_mgr_locs is None: + if axis == 0: + slc = libinternals.indexer_as_slice(indexer) + if slc is not None: + new_mgr_locs = self.mgr_locs[slc] + else: + new_mgr_locs = self.mgr_locs[indexer] + else: + new_mgr_locs = self.mgr_locs + + if not is_dtype_equal(new_values.dtype, self.dtype): + return self.make_block(new_values, new_mgr_locs) + else: + return self.make_block_same_class(new_values, new_mgr_locs) + + def diff(self, n, axis=1, mgr=None): + """ return block for the diff of the values """ + new_values = algos.diff(self.values, n, axis=axis) + return [self.make_block(values=new_values)] + + def shift(self, periods, axis=0, mgr=None): + """ shift the block by periods, possibly upcast """ + + # convert integer to float if necessary. need to do a lot more than + # that, handle boolean etc also + new_values, fill_value = maybe_upcast(self.values) + + # make sure array sent to np.roll is c_contiguous + f_ordered = new_values.flags.f_contiguous + if f_ordered: + new_values = new_values.T + axis = new_values.ndim - axis - 1 + + if np.prod(new_values.shape): + new_values = np.roll(new_values, ensure_platform_int(periods), + axis=axis) + + axis_indexer = [slice(None)] * self.ndim + if periods > 0: + axis_indexer[axis] = slice(None, periods) + else: + axis_indexer[axis] = slice(periods, None) + new_values[tuple(axis_indexer)] = fill_value + + # restore original order + if f_ordered: + new_values = new_values.T + + return [self.make_block(new_values)] + + def eval(self, func, other, errors='raise', try_cast=False, mgr=None): + """ + evaluate the block; return result block from the result + + Parameters + ---------- + func : how to combine self, other + other : a ndarray/object + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + try_cast : try casting the results to the input type + + Returns + ------- + a new block, the result of the func + """ + orig_other = other + values = self.values + + other = getattr(other, 'values', other) + + # make sure that we can broadcast + is_transposed = False + if hasattr(other, 'ndim') and hasattr(values, 'ndim'): + if values.ndim != other.ndim: + is_transposed = True + else: + if values.shape == other.shape[::-1]: + is_transposed = True + elif values.shape[0] == other.shape[-1]: + is_transposed = True + else: + # this is a broadcast error heree + raise ValueError( + "cannot broadcast shape [{t_shape}] with " + "block values [{oth_shape}]".format( + t_shape=values.T.shape, oth_shape=other.shape)) + + transf = (lambda x: x.T) if is_transposed else (lambda x: x) + + # coerce/transpose the args if needed + try: + values, values_mask, other, other_mask = self._try_coerce_args( + transf(values), other) + except TypeError: + block = self.coerce_to_target_dtype(orig_other) + return block.eval(func, orig_other, + errors=errors, + try_cast=try_cast, mgr=mgr) + + # get the result, may need to transpose the other + def get_result(other): + + # avoid numpy warning of comparisons again None + if other is None: + result = not func.__name__ == 'eq' + + # avoid numpy warning of elementwise comparisons to object + elif is_numeric_v_string_like(values, other): + result = False + + # avoid numpy warning of elementwise comparisons + elif func.__name__ == 'eq': + if is_list_like(other) and not isinstance(other, np.ndarray): + other = np.asarray(other) + + # if we can broadcast, then ok + if values.shape[-1] != other.shape[-1]: + return False + result = func(values, other) + else: + result = func(values, other) + + # mask if needed + if isinstance(values_mask, np.ndarray) and values_mask.any(): + result = result.astype('float64', copy=False) + result[values_mask] = np.nan + if other_mask is True: + result = result.astype('float64', copy=False) + result[:] = np.nan + elif isinstance(other_mask, np.ndarray) and other_mask.any(): + result = result.astype('float64', copy=False) + result[other_mask.ravel()] = np.nan + + return result + + # error handler if we have an issue operating with the function + def handle_error(): + + if errors == 'raise': + # The 'detail' variable is defined in outer scope. + raise TypeError( + 'Could not operate {other!r} with block values ' + '{detail!s}'.format(other=other, detail=detail)) # noqa + else: + # return the values + result = np.empty(values.shape, dtype='O') + result.fill(np.nan) + return result + + # get the result + try: + with np.errstate(all='ignore'): + result = get_result(other) + + # if we have an invalid shape/broadcast error + # GH4576, so raise instead of allowing to pass through + except ValueError as detail: + raise + except Exception as detail: + result = handle_error() + + # technically a broadcast error in numpy can 'work' by returning a + # boolean False + if not isinstance(result, np.ndarray): + if not isinstance(result, np.ndarray): + + # differentiate between an invalid ndarray-ndarray comparison + # and an invalid type comparison + if isinstance(values, np.ndarray) and is_list_like(other): + raise ValueError( + 'Invalid broadcasting comparison [{other!r}] with ' + 'block values'.format(other=other)) + + raise TypeError('Could not compare [{other!r}] ' + 'with block values'.format(other=other)) + + # transpose if needed + result = transf(result) + + # try to cast if requested + if try_cast: + result = self._try_cast_result(result) + + result = _block_shape(result, ndim=self.ndim) + return [self.make_block(result)] + + def where(self, other, cond, align=True, errors='raise', + try_cast=False, axis=0, transpose=False, mgr=None): + """ + evaluate the block; return result block(s) from the result + + Parameters + ---------- + other : a ndarray/object + cond : the condition to respect + align : boolean, perform alignment on other/cond + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + axis : int + transpose : boolean + Set to True if self is stored with axes reversed + + Returns + ------- + a new block(s), the result of the func + """ + import pandas.core.computation.expressions as expressions + assert errors in ['raise', 'ignore'] + + values = self.values + orig_other = other + if transpose: + values = values.T + + other = getattr(other, '_values', getattr(other, 'values', other)) + cond = getattr(cond, 'values', cond) + + # If the default broadcasting would go in the wrong direction, then + # explicitly reshape other instead + if getattr(other, 'ndim', 0) >= 1: + if values.ndim - 1 == other.ndim and axis == 1: + other = other.reshape(tuple(other.shape + (1, ))) + elif transpose and values.ndim == self.ndim - 1: + cond = cond.T + + if not hasattr(cond, 'shape'): + raise ValueError("where must have a condition that is ndarray " + "like") + + # our where function + def func(cond, values, other): + if cond.ravel().all(): + return values + + values, values_mask, other, other_mask = self._try_coerce_args( + values, other) + + try: + return self._try_coerce_result(expressions.where( + cond, values, other)) + except Exception as detail: + if errors == 'raise': + raise TypeError( + 'Could not operate [{other!r}] with block values ' + '[{detail!s}]'.format(other=other, detail=detail)) + else: + # return the values + result = np.empty(values.shape, dtype='float64') + result.fill(np.nan) + return result + + # see if we can operate on the entire block, or need item-by-item + # or if we are a single block (ndim == 1) + try: + result = func(cond, values, other) + except TypeError: + + # we cannot coerce, return a compat dtype + # we are explicitly ignoring errors + block = self.coerce_to_target_dtype(other) + blocks = block.where(orig_other, cond, align=align, + errors=errors, + try_cast=try_cast, axis=axis, + transpose=transpose) + return self._maybe_downcast(blocks, 'infer') + + if self._can_hold_na or self.ndim == 1: + + if transpose: + result = result.T + + # try to cast if requested + if try_cast: + result = self._try_cast_result(result) + + return self.make_block(result) + + # might need to separate out blocks + axis = cond.ndim - 1 + cond = cond.swapaxes(axis, 0) + mask = np.array([cond[i].all() for i in range(cond.shape[0])], + dtype=bool) + + result_blocks = [] + for m in [mask, ~mask]: + if m.any(): + r = self._try_cast_result(result.take(m.nonzero()[0], + axis=axis)) + result_blocks.append( + self.make_block(r.T, placement=self.mgr_locs[m])) + + return result_blocks + + def equals(self, other): + if self.dtype != other.dtype or self.shape != other.shape: + return False + return array_equivalent(self.values, other.values) + + def _unstack(self, unstacker_func, new_columns): + """Return a list of unstacked blocks of self + + Parameters + ---------- + unstacker_func : callable + Partially applied unstacker. + new_columns : Index + All columns of the unstacked BlockManager. + + Returns + ------- + blocks : list of Block + New blocks of unstacked values. + mask : array_like of bool + The mask of columns of `blocks` we should keep. + """ + unstacker = unstacker_func(self.values.T) + new_items = unstacker.get_new_columns() + new_placement = new_columns.get_indexer(new_items) + new_values, mask = unstacker.get_new_values() + + mask = mask.any(0) + new_values = new_values.T[mask] + new_placement = new_placement[mask] + + blocks = [make_block(new_values, placement=new_placement)] + return blocks, mask + + def quantile(self, qs, interpolation='linear', axis=0, mgr=None): + """ + compute the quantiles of the + + Parameters + ---------- + qs: a scalar or list of the quantiles to be computed + interpolation: type of interpolation, default 'linear' + axis: axis to compute, default 0 + + Returns + ------- + tuple of (axis, block) + + """ + kw = {'interpolation': interpolation} + values = self.get_values() + values, _, _, _ = self._try_coerce_args(values, values) + + def _nanpercentile1D(values, mask, q, **kw): + values = values[~mask] + + if len(values) == 0: + if lib.is_scalar(q): + return self._na_value + else: + return np.array([self._na_value] * len(q), + dtype=values.dtype) + + return np.percentile(values, q, **kw) + + def _nanpercentile(values, q, axis, **kw): + + mask = isna(self.values) + if not lib.is_scalar(mask) and mask.any(): + if self.ndim == 1: + return _nanpercentile1D(values, mask, q, **kw) + else: + # for nonconsolidatable blocks mask is 1D, but values 2D + if mask.ndim < values.ndim: + mask = mask.reshape(values.shape) + if axis == 0: + values = values.T + mask = mask.T + result = [_nanpercentile1D(val, m, q, **kw) for (val, m) + in zip(list(values), list(mask))] + result = np.array(result, dtype=values.dtype, copy=False).T + return result + else: + return np.percentile(values, q, axis=axis, **kw) + + from pandas import Float64Index + is_empty = values.shape[axis] == 0 + if is_list_like(qs): + ax = Float64Index(qs) + + if is_empty: + if self.ndim == 1: + result = self._na_value + else: + # create the array of na_values + # 2d len(values) * len(qs) + result = np.repeat(np.array([self._na_value] * len(qs)), + len(values)).reshape(len(values), + len(qs)) + else: + + try: + result = _nanpercentile(values, np.array(qs) * 100, + axis=axis, **kw) + except ValueError: + + # older numpies don't handle an array for q + result = [_nanpercentile(values, q * 100, + axis=axis, **kw) for q in qs] + + result = np.array(result, copy=False) + if self.ndim > 1: + result = result.T + + else: + + if self.ndim == 1: + ax = Float64Index([qs]) + else: + ax = mgr.axes[0] + + if is_empty: + if self.ndim == 1: + result = self._na_value + else: + result = np.array([self._na_value] * len(self)) + else: + result = _nanpercentile(values, qs * 100, axis=axis, **kw) + + ndim = getattr(result, 'ndim', None) or 0 + result = self._try_coerce_result(result) + if lib.is_scalar(result): + return ax, self.make_block_scalar(result) + return ax, make_block(result, + placement=np.arange(len(result)), + ndim=ndim) + + +class ScalarBlock(Block): + """ + a scalar compat Block + """ + __slots__ = ['_mgr_locs', 'values', 'ndim'] + + def __init__(self, values): + self.ndim = 0 + self.mgr_locs = [0] + self.values = values + + @property + def dtype(self): + return type(self.values) + + @property + def shape(self): + return tuple([0]) + + def __len__(self): + return 0 + + +class NonConsolidatableMixIn(object): + """ hold methods for the nonconsolidatable blocks """ + _can_consolidate = False + _verify_integrity = False + _validate_ndim = False + + def __init__(self, values, placement, ndim=None): + """Initialize a non-consolidatable block. + + 'ndim' may be inferred from 'placement'. + + This will call continue to call __init__ for the other base + classes mixed in with this Mixin. + """ + # Placement must be converted to BlockPlacement so that we can check + # its length + if not isinstance(placement, libinternals.BlockPlacement): + placement = libinternals.BlockPlacement(placement) + + # Maybe infer ndim from placement + if ndim is None: + if len(placement) != 1: + ndim = 1 + else: + ndim = 2 + super(NonConsolidatableMixIn, self).__init__(values, placement, + ndim=ndim) + + @property + def shape(self): + if self.ndim == 1: + return (len(self.values)), + return (len(self.mgr_locs), len(self.values)) + + def get_values(self, dtype=None): + """ need to to_dense myself (and always return a ndim sized object) """ + values = self.values.to_dense() + if values.ndim == self.ndim - 1: + values = values.reshape((1,) + values.shape) + return values + + def iget(self, col): + + if self.ndim == 2 and isinstance(col, tuple): + col, loc = col + if not com.is_null_slice(col) and col != 0: + raise IndexError("{0} only contains one item".format(self)) + return self.values[loc] + else: + if col != 0: + raise IndexError("{0} only contains one item".format(self)) + return self.values + + def should_store(self, value): + return isinstance(value, self._holder) + + def set(self, locs, values, check=False): + assert locs.tolist() == [0] + self.values = values + + def putmask(self, mask, new, align=True, inplace=False, axis=0, + transpose=False, mgr=None): + """ + putmask the data to the block; we must be a single block and not + generate other blocks + + return the resulting block + + Parameters + ---------- + mask : the condition to respect + new : a ndarray/object + align : boolean, perform alignment on other/cond, default is True + inplace : perform inplace modification, default is False + + Returns + ------- + a new block, the result of the putmask + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + + # use block's copy logic. + # .values may be an Index which does shallow copy by default + new_values = self.values if inplace else self.copy().values + new_values, _, new, _ = self._try_coerce_args(new_values, new) + + if isinstance(new, np.ndarray) and len(new) == len(mask): + new = new[mask] + + mask = _safe_reshape(mask, new_values.shape) + + new_values[mask] = new + new_values = self._try_coerce_result(new_values) + return [self.make_block(values=new_values)] + + def _slice(self, slicer): + """ return a slice of my values (but densify first) """ + return self.get_values()[slicer] + + def _try_cast_result(self, result, dtype=None): + return result + + def _unstack(self, unstacker_func, new_columns): + """Return a list of unstacked blocks of self + + Parameters + ---------- + unstacker_func : callable + Partially applied unstacker. + new_columns : Index + All columns of the unstacked BlockManager. + + Returns + ------- + blocks : list of Block + New blocks of unstacked values. + mask : array_like of bool + The mask of columns of `blocks` we should keep. + """ + # NonConsolidatable blocks can have a single item only, so we return + # one block per item + unstacker = unstacker_func(self.values.T) + new_items = unstacker.get_new_columns() + new_placement = new_columns.get_indexer(new_items) + new_values, mask = unstacker.get_new_values() + + mask = mask.any(0) + new_values = new_values.T[mask] + new_placement = new_placement[mask] + + blocks = [self.make_block_same_class(vals, [place]) + for vals, place in zip(new_values, new_placement)] + return blocks, mask + + +class ExtensionBlock(NonConsolidatableMixIn, Block): + """Block for holding extension types. + + Notes + ----- + This holds all 3rd-party extension array types. It's also the immediate + parent class for our internal extension types' blocks, CategoricalBlock. + + ExtensionArrays are limited to 1-D. + """ + is_extension = True + + def __init__(self, values, placement, ndim=None): + values = self._maybe_coerce_values(values) + super(ExtensionBlock, self).__init__(values, placement, ndim) + + def _maybe_coerce_values(self, values): + """Unbox to an extension array. + + This will unbox an ExtensionArray stored in an Index or Series. + ExtensionArrays pass through. No dtype coercion is done. + + Parameters + ---------- + values : Index, Series, ExtensionArray + + Returns + ------- + ExtensionArray + """ + if isinstance(values, (ABCIndexClass, ABCSeries)): + values = values._values + return values + + @property + def _holder(self): + # For extension blocks, the holder is values-dependent. + return type(self.values) + + @property + def fill_value(self): + # Used in reindex_indexer + return self.values.dtype.na_value + + @property + def _can_hold_na(self): + # The default ExtensionArray._can_hold_na is True + return self._holder._can_hold_na + + @property + def is_view(self): + """Extension arrays are never treated as views.""" + return False + + def setitem(self, indexer, value, mgr=None): + """Set the value inplace, returning a same-typed block. + + This differs from Block.setitem by not allowing setitem to change + the dtype of the Block. + + Parameters + ---------- + indexer : tuple, list-like, array-like, slice + The subset of self.values to set + value : object + The value being set + mgr : BlockPlacement, optional + + Returns + ------- + Block + + Notes + ----- + `indexer` is a direct slice/positional indexer. `value` must + be a compatible shape. + """ + if isinstance(indexer, tuple): + # we are always 1-D + indexer = indexer[0] + + check_setitem_lengths(indexer, value, self.values) + self.values[indexer] = value + return self + + def get_values(self, dtype=None): + # ExtensionArrays must be iterable, so this works. + values = np.asarray(self.values) + if values.ndim == self.ndim - 1: + values = values.reshape((1,) + values.shape) + return values + + def to_dense(self): + return np.asarray(self.values) + + def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): + """ + Take values according to indexer and return them as a block. + """ + if fill_tuple is None: + fill_value = None + else: + fill_value = fill_tuple[0] + + # axis doesn't matter; we are really a single-dim object + # but are passed the axis depending on the calling routing + # if its REALLY axis 0, then this will be a reindex and not a take + new_values = self.values.take(indexer, fill_value=fill_value, + allow_fill=True) + + # if we are a 1-dim object, then always place at 0 + if self.ndim == 1: + new_mgr_locs = [0] + else: + if new_mgr_locs is None: + new_mgr_locs = self.mgr_locs + + return self.make_block_same_class(new_values, new_mgr_locs) + + def _can_hold_element(self, element): + # XXX: We may need to think about pushing this onto the array. + # We're doing the same as CategoricalBlock here. + return True + + def _slice(self, slicer): + """ return a slice of my values """ + + # slice the category + # return same dims as we currently have + + if isinstance(slicer, tuple) and len(slicer) == 2: + if not com.is_null_slice(slicer[0]): + raise AssertionError("invalid slicing for a 1-ndim " + "categorical") + slicer = slicer[1] + + return self.values[slicer] + + def formatting_values(self): + return self.values._formatting_values() + + def concat_same_type(self, to_concat, placement=None): + """ + Concatenate list of single blocks of the same type. + """ + values = self._holder._concat_same_type( + [blk.values for blk in to_concat]) + placement = placement or slice(0, len(values), 1) + return self.make_block_same_class(values, ndim=self.ndim, + placement=placement) + + def fillna(self, value, limit=None, inplace=False, downcast=None, + mgr=None): + values = self.values if inplace else self.values.copy() + values = values.fillna(value=value, limit=limit) + return [self.make_block_same_class(values=values, + placement=self.mgr_locs, + ndim=self.ndim)] + + def interpolate(self, method='pad', axis=0, inplace=False, limit=None, + fill_value=None, **kwargs): + + values = self.values if inplace else self.values.copy() + return self.make_block_same_class( + values=values.fillna(value=fill_value, method=method, + limit=limit), + placement=self.mgr_locs) + + +class NumericBlock(Block): + __slots__ = () + is_numeric = True + _can_hold_na = True + + +class FloatOrComplexBlock(NumericBlock): + __slots__ = () + + def equals(self, other): + if self.dtype != other.dtype or self.shape != other.shape: + return False + left, right = self.values, other.values + return ((left == right) | (np.isnan(left) & np.isnan(right))).all() + + +class FloatBlock(FloatOrComplexBlock): + __slots__ = () + is_float = True + + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return (issubclass(tipo.type, (np.floating, np.integer)) and + not issubclass(tipo.type, (np.datetime64, np.timedelta64))) + return ( + isinstance( + element, (float, int, np.floating, np.int_, compat.long)) + and not isinstance(element, (bool, np.bool_, datetime, timedelta, + np.datetime64, np.timedelta64))) + + def to_native_types(self, slicer=None, na_rep='', float_format=None, + decimal='.', quoting=None, **kwargs): + """ convert to our native types format, slicing if desired """ + + values = self.values + if slicer is not None: + values = values[:, slicer] + + # see gh-13418: no special formatting is desired at the + # output (important for appropriate 'quoting' behaviour), + # so do not pass it through the FloatArrayFormatter + if float_format is None and decimal == '.': + mask = isna(values) + + if not quoting: + values = values.astype(str) + else: + values = np.array(values, dtype='object') + + values[mask] = na_rep + return values + + from pandas.io.formats.format import FloatArrayFormatter + formatter = FloatArrayFormatter(values, na_rep=na_rep, + float_format=float_format, + decimal=decimal, quoting=quoting, + fixed_width=False) + return formatter.get_result_as_array() + + def should_store(self, value): + # when inserting a column should not coerce integers to floats + # unnecessarily + return (issubclass(value.dtype.type, np.floating) and + value.dtype == self.dtype) + + +class ComplexBlock(FloatOrComplexBlock): + __slots__ = () + is_complex = True + + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, + (np.floating, np.integer, np.complexfloating)) + return ( + isinstance( + element, + (float, int, complex, np.float_, np.int_, compat.long)) + and not isinstance(element, (bool, np.bool_))) + + def should_store(self, value): + return issubclass(value.dtype.type, np.complexfloating) + + +class IntBlock(NumericBlock): + __slots__ = () + is_integer = True + _can_hold_na = False + + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return (issubclass(tipo.type, np.integer) and + not issubclass(tipo.type, (np.datetime64, + np.timedelta64)) and + self.dtype.itemsize >= tipo.itemsize) + return is_integer(element) + + def should_store(self, value): + return is_integer_dtype(value) and value.dtype == self.dtype + + +class DatetimeLikeBlockMixin(object): + """Mixin class for DatetimeBlock and DatetimeTZBlock.""" + + @property + def _holder(self): + return DatetimeIndex + + @property + def _na_value(self): + return tslibs.NaT + + @property + def fill_value(self): + return tslibs.iNaT + + def get_values(self, dtype=None): + """ + return object dtype as boxed values, such as Timestamps/Timedelta + """ + if is_object_dtype(dtype): + return lib.map_infer(self.values.ravel(), + self._box_func).reshape(self.values.shape) + return self.values + + +class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): + __slots__ = () + is_timedelta = True + _can_hold_na = True + is_numeric = False + + def __init__(self, values, placement, ndim=None): + if values.dtype != _TD_DTYPE: + values = conversion.ensure_timedelta64ns(values) + + super(TimeDeltaBlock, self).__init__(values, + placement=placement, ndim=ndim) + + @property + def _holder(self): + return TimedeltaIndex + + @property + def _box_func(self): + return lambda x: Timedelta(x, unit='ns') + + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, np.timedelta64) + return is_integer(element) or isinstance( + element, (timedelta, np.timedelta64)) + + def fillna(self, value, **kwargs): + + # allow filling with integers to be + # interpreted as seconds + if is_integer(value) and not isinstance(value, np.timedelta64): + value = Timedelta(value, unit='s') + return super(TimeDeltaBlock, self).fillna(value, **kwargs) + + def _try_coerce_args(self, values, other): + """ + Coerce values and other to int64, with null values converted to + iNaT. values is always ndarray-like, other may not be + + Parameters + ---------- + values : ndarray-like + other : ndarray-like or scalar + + Returns + ------- + base-type values, values mask, base-type other, other mask + """ + + values_mask = isna(values) + values = values.view('i8') + other_mask = False + + if isinstance(other, bool): + raise TypeError + elif is_null_datelike_scalar(other): + other = tslibs.iNaT + other_mask = True + elif isinstance(other, Timedelta): + other_mask = isna(other) + other = other.value + elif isinstance(other, timedelta): + other = Timedelta(other).value + elif isinstance(other, np.timedelta64): + other_mask = isna(other) + other = Timedelta(other).value + elif hasattr(other, 'dtype') and is_timedelta64_dtype(other): + other_mask = isna(other) + other = other.astype('i8', copy=False).view('i8') + else: + # coercion issues + # let higher levels handle + raise TypeError + + return values, values_mask, other, other_mask + + def _try_coerce_result(self, result): + """ reverse of try_coerce_args / try_operate """ + if isinstance(result, np.ndarray): + mask = isna(result) + if result.dtype.kind in ['i', 'f', 'O']: + result = result.astype('m8[ns]') + result[mask] = tslibs.iNaT + elif isinstance(result, (np.integer, np.float)): + result = self._box_func(result) + return result + + def should_store(self, value): + return issubclass(value.dtype.type, np.timedelta64) + + def to_native_types(self, slicer=None, na_rep=None, quoting=None, + **kwargs): + """ convert to our native types format, slicing if desired """ + + values = self.values + if slicer is not None: + values = values[:, slicer] + mask = isna(values) + + rvalues = np.empty(values.shape, dtype=object) + if na_rep is None: + na_rep = 'NaT' + rvalues[mask] = na_rep + imask = (~mask).ravel() + + # FIXME: + # should use the formats.format.Timedelta64Formatter here + # to figure what format to pass to the Timedelta + # e.g. to not show the decimals say + rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all') + for val in values.ravel()[imask]], + dtype=object) + return rvalues + + +class BoolBlock(NumericBlock): + __slots__ = () + is_bool = True + _can_hold_na = False + + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, np.bool_) + return isinstance(element, (bool, np.bool_)) + + def should_store(self, value): + return issubclass(value.dtype.type, np.bool_) + + def replace(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True, mgr=None): + inplace = validate_bool_kwarg(inplace, 'inplace') + to_replace_values = np.atleast_1d(to_replace) + if not np.can_cast(to_replace_values, bool): + return self + return super(BoolBlock, self).replace(to_replace, value, + inplace=inplace, filter=filter, + regex=regex, convert=convert, + mgr=mgr) + + +class ObjectBlock(Block): + __slots__ = () + is_object = True + _can_hold_na = True + + def __init__(self, values, placement=None, ndim=2): + if issubclass(values.dtype.type, compat.string_types): + values = np.array(values, dtype=object) + + super(ObjectBlock, self).__init__(values, ndim=ndim, + placement=placement) + + @property + def is_bool(self): + """ we can be a bool if we have only bool values but are of type + object + """ + return lib.is_bool_array(self.values.ravel()) + + # TODO: Refactor when convert_objects is removed since there will be 1 path + def convert(self, *args, **kwargs): + """ attempt to coerce any object types to better types return a copy of + the block (if copy = True) by definition we ARE an ObjectBlock!!!!! + + can return multiple blocks! + """ + + if args: + raise NotImplementedError + by_item = True if 'by_item' not in kwargs else kwargs['by_item'] + + new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta'] + new_style = False + for kw in new_inputs: + new_style |= kw in kwargs + + if new_style: + fn = soft_convert_objects + fn_inputs = new_inputs + else: + fn = maybe_convert_objects + fn_inputs = ['convert_dates', 'convert_numeric', + 'convert_timedeltas'] + fn_inputs += ['copy'] + + fn_kwargs = {} + for key in fn_inputs: + if key in kwargs: + fn_kwargs[key] = kwargs[key] + + # operate column-by-column + def f(m, v, i): + shape = v.shape + values = fn(v.ravel(), **fn_kwargs) + try: + values = values.reshape(shape) + values = _block_shape(values, ndim=self.ndim) + except (AttributeError, NotImplementedError): + pass + + return values + + if by_item and not self._is_single_block: + blocks = self.split_and_operate(None, f, False) + else: + values = f(None, self.values.ravel(), None) + blocks = [make_block(values, ndim=self.ndim, + placement=self.mgr_locs)] + + return blocks + + def set(self, locs, values, check=False): + """ + Modify Block in-place with new item value + + Returns + ------- + None + """ + + # GH6026 + if check: + try: + if (self.values[locs] == values).all(): + return + except: + pass + try: + self.values[locs] = values + except (ValueError): + + # broadcasting error + # see GH6171 + new_shape = list(values.shape) + new_shape[0] = len(self.items) + self.values = np.empty(tuple(new_shape), dtype=self.dtype) + self.values.fill(np.nan) + self.values[locs] = values + + def _maybe_downcast(self, blocks, downcast=None): + + if downcast is not None: + return blocks + + # split and convert the blocks + return _extend_blocks([b.convert(datetime=True, numeric=False) + for b in blocks]) + + def _can_hold_element(self, element): + return True + + def _try_coerce_args(self, values, other): + """ provide coercion to our input arguments """ + + if isinstance(other, ABCDatetimeIndex): + # to store DatetimeTZBlock as object + other = other.astype(object).values + + return values, False, other, False + + def should_store(self, value): + return not (issubclass(value.dtype.type, + (np.integer, np.floating, np.complexfloating, + np.datetime64, np.bool_)) or + # TODO(ExtensionArray): remove is_extension_type + # when all extension arrays have been ported. + is_extension_type(value) or + is_extension_array_dtype(value)) + + def replace(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True, mgr=None): + to_rep_is_list = is_list_like(to_replace) + value_is_list = is_list_like(value) + both_lists = to_rep_is_list and value_is_list + either_list = to_rep_is_list or value_is_list + + result_blocks = [] + blocks = [self] + + if not either_list and is_re(to_replace): + return self._replace_single(to_replace, value, inplace=inplace, + filter=filter, regex=True, + convert=convert, mgr=mgr) + elif not (either_list or regex): + return super(ObjectBlock, self).replace(to_replace, value, + inplace=inplace, + filter=filter, regex=regex, + convert=convert, mgr=mgr) + elif both_lists: + for to_rep, v in zip(to_replace, value): + result_blocks = [] + for b in blocks: + result = b._replace_single(to_rep, v, inplace=inplace, + filter=filter, regex=regex, + convert=convert, mgr=mgr) + result_blocks = _extend_blocks(result, result_blocks) + blocks = result_blocks + return result_blocks + + elif to_rep_is_list and regex: + for to_rep in to_replace: + result_blocks = [] + for b in blocks: + result = b._replace_single(to_rep, value, inplace=inplace, + filter=filter, regex=regex, + convert=convert, mgr=mgr) + result_blocks = _extend_blocks(result, result_blocks) + blocks = result_blocks + return result_blocks + + return self._replace_single(to_replace, value, inplace=inplace, + filter=filter, convert=convert, + regex=regex, mgr=mgr) + + def _replace_single(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True, mgr=None): + + inplace = validate_bool_kwarg(inplace, 'inplace') + + # to_replace is regex compilable + to_rep_re = regex and is_re_compilable(to_replace) + + # regex is regex compilable + regex_re = is_re_compilable(regex) + + # only one will survive + if to_rep_re and regex_re: + raise AssertionError('only one of to_replace and regex can be ' + 'regex compilable') + + # if regex was passed as something that can be a regex (rather than a + # boolean) + if regex_re: + to_replace = regex + + regex = regex_re or to_rep_re + + # try to get the pattern attribute (compiled re) or it's a string + try: + pattern = to_replace.pattern + except AttributeError: + pattern = to_replace + + # if the pattern is not empty and to_replace is either a string or a + # regex + if regex and pattern: + rx = re.compile(to_replace) + else: + # if the thing to replace is not a string or compiled regex call + # the superclass method -> to_replace is some kind of object + return super(ObjectBlock, self).replace(to_replace, value, + inplace=inplace, + filter=filter, regex=regex, + mgr=mgr) + + new_values = self.values if inplace else self.values.copy() + + # deal with replacing values with objects (strings) that match but + # whose replacement is not a string (numeric, nan, object) + if isna(value) or not isinstance(value, compat.string_types): + + def re_replacer(s): + try: + return value if rx.search(s) is not None else s + except TypeError: + return s + else: + # value is guaranteed to be a string here, s can be either a string + # or null if it's null it gets returned + def re_replacer(s): + try: + return rx.sub(value, s) + except TypeError: + return s + + f = np.vectorize(re_replacer, otypes=[self.dtype]) + + if filter is None: + filt = slice(None) + else: + filt = self.mgr_locs.isin(filter).nonzero()[0] + + new_values[filt] = f(new_values[filt]) + + # convert + block = self.make_block(new_values) + if convert: + block = block.convert(by_item=True, numeric=False) + + return block + + +class CategoricalBlock(ExtensionBlock): + __slots__ = () + is_categorical = True + _verify_integrity = True + _can_hold_na = True + _concatenator = staticmethod(_concat._concat_categorical) + + def __init__(self, values, placement, ndim=None): + from pandas.core.arrays.categorical import _maybe_to_categorical + + # coerce to categorical if we can + super(CategoricalBlock, self).__init__(_maybe_to_categorical(values), + placement=placement, + ndim=ndim) + + @property + def _holder(self): + return Categorical + + @property + def array_dtype(self): + """ the dtype to return if I want to construct this block as an + array + """ + return np.object_ + + def _try_coerce_result(self, result): + """ reverse of try_coerce_args """ + + # GH12564: CategoricalBlock is 1-dim only + # while returned results could be any dim + if ((not is_categorical_dtype(result)) and + isinstance(result, np.ndarray)): + result = _block_shape(result, ndim=self.ndim) + + return result + + def shift(self, periods, axis=0, mgr=None): + return self.make_block_same_class(values=self.values.shift(periods), + placement=self.mgr_locs) + + def to_dense(self): + # Categorical.get_values returns a DatetimeIndex for datetime + # categories, so we can't simply use `np.asarray(self.values)` like + # other types. + return self.values.get_values() + + def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): + """ convert to our native types format, slicing if desired """ + + values = self.values + if slicer is not None: + # Categorical is always one dimension + values = values[slicer] + mask = isna(values) + values = np.array(values, dtype='object') + values[mask] = na_rep + + # we are expected to return a 2-d ndarray + return values.reshape(1, len(values)) + + def concat_same_type(self, to_concat, placement=None): + """ + Concatenate list of single blocks of the same type. + + Note that this CategoricalBlock._concat_same_type *may* not + return a CategoricalBlock. When the categories in `to_concat` + differ, this will return an object ndarray. + + If / when we decide we don't like that behavior: + + 1. Change Categorical._concat_same_type to use union_categoricals + 2. Delete this method. + """ + values = self._concatenator([blk.values for blk in to_concat], + axis=self.ndim - 1) + # not using self.make_block_same_class as values can be object dtype + return make_block( + values, placement=placement or slice(0, len(values), 1), + ndim=self.ndim) + + +class DatetimeBlock(DatetimeLikeBlockMixin, Block): + __slots__ = () + is_datetime = True + _can_hold_na = True + + def __init__(self, values, placement, ndim=None): + values = self._maybe_coerce_values(values) + super(DatetimeBlock, self).__init__(values, + placement=placement, ndim=ndim) + + def _maybe_coerce_values(self, values): + """Input validation for values passed to __init__. Ensure that + we have datetime64ns, coercing if necessary. + + Parameters + ---------- + values : array-like + Must be convertible to datetime64 + + Returns + ------- + values : ndarray[datetime64ns] + + Overridden by DatetimeTZBlock. + """ + if values.dtype != _NS_DTYPE: + values = conversion.ensure_datetime64ns(values) + return values + + def _astype(self, dtype, mgr=None, **kwargs): + """ + these automatically copy, so copy=True has no effect + raise on an except if raise == True + """ + + # if we are passed a datetime64[ns, tz] + if is_datetime64tz_dtype(dtype): + dtype = DatetimeTZDtype(dtype) + + values = self.values + if getattr(values, 'tz', None) is None: + values = DatetimeIndex(values).tz_localize('UTC') + values = values.tz_convert(dtype.tz) + return self.make_block(values) + + # delegate + return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs) + + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + # TODO: this still uses asarray, instead of dtype.type + element = np.array(element) + return element.dtype == _NS_DTYPE or element.dtype == np.int64 + return (is_integer(element) or isinstance(element, datetime) or + isna(element)) + + def _try_coerce_args(self, values, other): + """ + Coerce values and other to dtype 'i8'. NaN and NaT convert to + the smallest i8, and will correctly round-trip to NaT if converted + back in _try_coerce_result. values is always ndarray-like, other + may not be + + Parameters + ---------- + values : ndarray-like + other : ndarray-like or scalar + + Returns + ------- + base-type values, values mask, base-type other, other mask + """ + + values_mask = isna(values) + values = values.view('i8') + other_mask = False + + if isinstance(other, bool): + raise TypeError + elif is_null_datelike_scalar(other): + other = tslibs.iNaT + other_mask = True + elif isinstance(other, (datetime, np.datetime64, date)): + other = self._box_func(other) + if getattr(other, 'tz') is not None: + raise TypeError("cannot coerce a Timestamp with a tz on a " + "naive Block") + other_mask = isna(other) + other = other.asm8.view('i8') + elif hasattr(other, 'dtype') and is_datetime64_dtype(other): + other_mask = isna(other) + other = other.astype('i8', copy=False).view('i8') + else: + # coercion issues + # let higher levels handle + raise TypeError + + return values, values_mask, other, other_mask + + def _try_coerce_result(self, result): + """ reverse of try_coerce_args """ + if isinstance(result, np.ndarray): + if result.dtype.kind in ['i', 'f', 'O']: + try: + result = result.astype('M8[ns]') + except ValueError: + pass + elif isinstance(result, (np.integer, np.float, np.datetime64)): + result = self._box_func(result) + return result + + @property + def _box_func(self): + return tslibs.Timestamp + + def to_native_types(self, slicer=None, na_rep=None, date_format=None, + quoting=None, **kwargs): + """ convert to our native types format, slicing if desired """ + + values = self.values + if slicer is not None: + values = values[..., slicer] + + from pandas.io.formats.format import _get_format_datetime64_from_values + format = _get_format_datetime64_from_values(values, date_format) + + result = tslib.format_array_from_datetime( + values.view('i8').ravel(), tz=getattr(self.values, 'tz', None), + format=format, na_rep=na_rep).reshape(values.shape) + return np.atleast_2d(result) + + def should_store(self, value): + return (issubclass(value.dtype.type, np.datetime64) and + not is_datetimetz(value)) + + def set(self, locs, values, check=False): + """ + Modify Block in-place with new item value + + Returns + ------- + None + """ + if values.dtype != _NS_DTYPE: + # Workaround for numpy 1.6 bug + values = conversion.ensure_datetime64ns(values) + + self.values[locs] = values + + +class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock): + """ implement a datetime64 block with a tz attribute """ + __slots__ = () + _concatenator = staticmethod(_concat._concat_datetime) + is_datetimetz = True + + def __init__(self, values, placement, ndim=2, dtype=None): + # XXX: This will end up calling _maybe_coerce_values twice + # when dtype is not None. It's relatively cheap (just an isinstance) + # but it'd nice to avoid. + # + # If we can remove dtype from __init__, and push that conversion + # push onto the callers, then we can remove this entire __init__ + # and just use DatetimeBlock's. + if dtype is not None: + values = self._maybe_coerce_values(values, dtype=dtype) + super(DatetimeTZBlock, self).__init__(values, placement=placement, + ndim=ndim) + + def _maybe_coerce_values(self, values, dtype=None): + """Input validation for values passed to __init__. Ensure that + we have datetime64TZ, coercing if necessary. + + Parametetrs + ----------- + values : array-like + Must be convertible to datetime64 + dtype : string or DatetimeTZDtype, optional + Does a shallow copy to this tz + + Returns + ------- + values : ndarray[datetime64ns] + """ + if not isinstance(values, self._holder): + values = self._holder(values) + + if dtype is not None: + if isinstance(dtype, compat.string_types): + dtype = DatetimeTZDtype.construct_from_string(dtype) + values = values._shallow_copy(tz=dtype.tz) + + if values.tz is None: + raise ValueError("cannot create a DatetimeTZBlock without a tz") + + return values + + @property + def is_view(self): + """ return a boolean if I am possibly a view """ + # check the ndarray values of the DatetimeIndex values + return self.values.values.base is not None + + def copy(self, deep=True, mgr=None): + """ copy constructor """ + values = self.values + if deep: + values = values.copy(deep=True) + return self.make_block_same_class(values) + + def external_values(self): + """ we internally represent the data as a DatetimeIndex, but for + external compat with ndarray, export as a ndarray of Timestamps + """ + return self.values.astype('datetime64[ns]').values + + def get_values(self, dtype=None): + # return object dtype as Timestamps with the zones + if is_object_dtype(dtype): + return lib.map_infer( + self.values.ravel(), self._box_func).reshape(self.values.shape) + return self.values + + def _slice(self, slicer): + """ return a slice of my values """ + if isinstance(slicer, tuple): + col, loc = slicer + if not com.is_null_slice(col) and col != 0: + raise IndexError("{0} only contains one item".format(self)) + return self.values[loc] + return self.values[slicer] + + def _try_coerce_args(self, values, other): + """ + localize and return i8 for the values + + Parameters + ---------- + values : ndarray-like + other : ndarray-like or scalar + + Returns + ------- + base-type values, values mask, base-type other, other mask + """ + values_mask = _block_shape(isna(values), ndim=self.ndim) + # asi8 is a view, needs copy + values = _block_shape(values.asi8, ndim=self.ndim) + other_mask = False + + if isinstance(other, ABCSeries): + other = self._holder(other) + other_mask = isna(other) + + if isinstance(other, bool): + raise TypeError + elif (is_null_datelike_scalar(other) or + (lib.is_scalar(other) and isna(other))): + other = tslibs.iNaT + other_mask = True + elif isinstance(other, self._holder): + if other.tz != self.values.tz: + raise ValueError("incompatible or non tz-aware value") + other_mask = _block_shape(isna(other), ndim=self.ndim) + other = _block_shape(other.asi8, ndim=self.ndim) + elif isinstance(other, (np.datetime64, datetime, date)): + other = tslibs.Timestamp(other) + tz = getattr(other, 'tz', None) + + # test we can have an equal time zone + if tz is None or str(tz) != str(self.values.tz): + raise ValueError("incompatible or non tz-aware value") + other_mask = isna(other) + other = other.value + else: + raise TypeError + + return values, values_mask, other, other_mask + + def _try_coerce_result(self, result): + """ reverse of try_coerce_args """ + if isinstance(result, np.ndarray): + if result.dtype.kind in ['i', 'f', 'O']: + result = result.astype('M8[ns]') + elif isinstance(result, (np.integer, np.float, np.datetime64)): + result = tslibs.Timestamp(result, tz=self.values.tz) + if isinstance(result, np.ndarray): + # allow passing of > 1dim if its trivial + if result.ndim > 1: + result = result.reshape(np.prod(result.shape)) + result = self.values._shallow_copy(result) + + return result + + @property + def _box_func(self): + return lambda x: tslibs.Timestamp(x, tz=self.dtype.tz) + + def shift(self, periods, axis=0, mgr=None): + """ shift the block by periods """ + + # think about moving this to the DatetimeIndex. This is a non-freq + # (number of periods) shift ### + + N = len(self) + indexer = np.zeros(N, dtype=int) + if periods > 0: + indexer[periods:] = np.arange(N - periods) + else: + indexer[:periods] = np.arange(-periods, N) + + new_values = self.values.asi8.take(indexer) + + if periods > 0: + new_values[:periods] = tslibs.iNaT + else: + new_values[periods:] = tslibs.iNaT + + new_values = self.values._shallow_copy(new_values) + return [self.make_block_same_class(new_values, + placement=self.mgr_locs)] + + def diff(self, n, axis=0, mgr=None): + """1st discrete difference + + Parameters + ---------- + n : int, number of periods to diff + axis : int, axis to diff upon. default 0 + mgr : default None + + Return + ------ + A list with a new TimeDeltaBlock. + + Note + ---- + The arguments here are mimicking shift so they are called correctly + by apply. + """ + if axis == 0: + # Cannot currently calculate diff across multiple blocks since this + # function is invoked via apply + raise NotImplementedError + new_values = (self.values - self.shift(n, axis=axis)[0].values).asi8 + + # Reshape the new_values like how algos.diff does for timedelta data + new_values = new_values.reshape(1, len(new_values)) + new_values = new_values.astype('timedelta64[ns]') + return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)] + + def concat_same_type(self, to_concat, placement=None): + """ + Concatenate list of single blocks of the same type. + """ + values = self._concatenator([blk.values for blk in to_concat], + axis=self.ndim - 1) + # not using self.make_block_same_class as values can be non-tz dtype + return make_block( + values, placement=placement or slice(0, len(values), 1)) + + +class SparseBlock(NonConsolidatableMixIn, Block): + """ implement as a list of sparse arrays of the same dtype """ + __slots__ = () + is_sparse = True + is_numeric = True + _box_to_block_values = False + _can_hold_na = True + _ftype = 'sparse' + _concatenator = staticmethod(_concat._concat_sparse) + + def __init__(self, values, placement, ndim=None): + # Ensure that we have the underlying SparseArray here... + if isinstance(values, ABCSeries): + values = values.values + assert isinstance(values, SparseArray) + super(SparseBlock, self).__init__(values, placement, ndim=ndim) + + @property + def _holder(self): + return SparseArray + + @property + def shape(self): + return (len(self.mgr_locs), self.sp_index.length) + + @property + def fill_value(self): + # return np.nan + return self.values.fill_value + + @fill_value.setter + def fill_value(self, v): + self.values.fill_value = v + + def to_dense(self): + return self.values.to_dense().view() + + @property + def sp_values(self): + return self.values.sp_values + + @sp_values.setter + def sp_values(self, v): + # reset the sparse values + self.values = SparseArray(v, sparse_index=self.sp_index, + kind=self.kind, dtype=v.dtype, + fill_value=self.values.fill_value, + copy=False) + + @property + def sp_index(self): + return self.values.sp_index + + @property + def kind(self): + return self.values.kind + + def _astype(self, dtype, copy=False, errors='raise', values=None, + klass=None, mgr=None, **kwargs): + if values is None: + values = self.values + values = values.astype(dtype, copy=copy) + return self.make_block_same_class(values=values, + placement=self.mgr_locs) + + def __len__(self): + try: + return self.sp_index.length + except: + return 0 + + def copy(self, deep=True, mgr=None): + return self.make_block_same_class(values=self.values, + sparse_index=self.sp_index, + kind=self.kind, copy=deep, + placement=self.mgr_locs) + + def make_block_same_class(self, values, placement, sparse_index=None, + kind=None, dtype=None, fill_value=None, + copy=False, ndim=None): + """ return a new block """ + if dtype is None: + dtype = values.dtype + if fill_value is None and not isinstance(values, SparseArray): + fill_value = self.values.fill_value + + # if not isinstance(values, SparseArray) and values.ndim != self.ndim: + # raise ValueError("ndim mismatch") + + if values.ndim == 2: + nitems = values.shape[0] + + if nitems == 0: + # kludgy, but SparseBlocks cannot handle slices, where the + # output is 0-item, so let's convert it to a dense block: it + # won't take space since there's 0 items, plus it will preserve + # the dtype. + return self.make_block(np.empty(values.shape, dtype=dtype), + placement) + elif nitems > 1: + raise ValueError("Only 1-item 2d sparse blocks are supported") + else: + values = values.reshape(values.shape[1]) + + new_values = SparseArray(values, sparse_index=sparse_index, + kind=kind or self.kind, dtype=dtype, + fill_value=fill_value, copy=copy) + return self.make_block(new_values, + placement=placement) + + def interpolate(self, method='pad', axis=0, inplace=False, limit=None, + fill_value=None, **kwargs): + + values = missing.interpolate_2d(self.values.to_dense(), method, axis, + limit, fill_value) + return self.make_block_same_class(values=values, + placement=self.mgr_locs) + + def fillna(self, value, limit=None, inplace=False, downcast=None, + mgr=None): + # we may need to upcast our fill to match our dtype + if limit is not None: + raise NotImplementedError("specifying a limit for 'fillna' has " + "not been implemented yet") + values = self.values if inplace else self.values.copy() + values = values.fillna(value, downcast=downcast) + return [self.make_block_same_class(values=values, + placement=self.mgr_locs)] + + def shift(self, periods, axis=0, mgr=None): + """ shift the block by periods """ + N = len(self.values.T) + indexer = np.zeros(N, dtype=int) + if periods > 0: + indexer[periods:] = np.arange(N - periods) + else: + indexer[:periods] = np.arange(-periods, N) + new_values = self.values.to_dense().take(indexer) + # convert integer to float if necessary. need to do a lot more than + # that, handle boolean etc also + new_values, fill_value = maybe_upcast(new_values) + if periods > 0: + new_values[:periods] = fill_value + else: + new_values[periods:] = fill_value + return [self.make_block_same_class(new_values, + placement=self.mgr_locs)] + + def sparse_reindex(self, new_index): + """ sparse reindex and return a new block + current reindex only works for float64 dtype! """ + values = self.values + values = values.sp_index.to_int_index().reindex( + values.sp_values.astype('float64'), values.fill_value, new_index) + return self.make_block_same_class(values, sparse_index=new_index, + placement=self.mgr_locs) + + +# ----------------------------------------------------------------- +# Constructor Helpers + +def get_block_type(values, dtype=None): + """ + Find the appropriate Block subclass to use for the given values and dtype. + + Parameters + ---------- + values : ndarray-like + dtype : numpy or pandas dtype + + Returns + ------- + cls : class, subclass of Block + """ + dtype = dtype or values.dtype + vtype = dtype.type + + if is_sparse(values): + cls = SparseBlock + elif issubclass(vtype, np.floating): + cls = FloatBlock + elif issubclass(vtype, np.timedelta64): + assert issubclass(vtype, np.integer) + cls = TimeDeltaBlock + elif issubclass(vtype, np.complexfloating): + cls = ComplexBlock + elif is_categorical(values): + cls = CategoricalBlock + elif is_extension_array_dtype(values): + cls = ExtensionBlock + elif issubclass(vtype, np.datetime64): + assert not is_datetimetz(values) + cls = DatetimeBlock + elif is_datetimetz(values): + cls = DatetimeTZBlock + elif issubclass(vtype, np.integer): + cls = IntBlock + elif dtype == np.bool_: + cls = BoolBlock + else: + cls = ObjectBlock + return cls + + +def make_block(values, placement, klass=None, ndim=None, dtype=None, + fastpath=None): + if fastpath is not None: + # GH#19265 pyarrow is passing this + warnings.warn("fastpath argument is deprecated, will be removed " + "in a future release.", DeprecationWarning) + if klass is None: + dtype = dtype or values.dtype + klass = get_block_type(values, dtype) + + elif klass is DatetimeTZBlock and not is_datetimetz(values): + return klass(values, ndim=ndim, + placement=placement, dtype=dtype) + + return klass(values, ndim=ndim, placement=placement) + + +# ----------------------------------------------------------------- + +def _extend_blocks(result, blocks=None): + """ return a new extended blocks, givin the result """ + from pandas.core.internals import BlockManager + if blocks is None: + blocks = [] + if isinstance(result, list): + for r in result: + if isinstance(r, list): + blocks.extend(r) + else: + blocks.append(r) + elif isinstance(result, BlockManager): + blocks.extend(result.blocks) + else: + blocks.append(result) + return blocks + + +def _block_shape(values, ndim=1, shape=None): + """ guarantee the shape of the values to be at least 1 d """ + if values.ndim < ndim: + if shape is None: + shape = values.shape + values = values.reshape(tuple((1, ) + shape)) + return values + + +def _merge_blocks(blocks, dtype=None, _can_consolidate=True): + + if len(blocks) == 1: + return blocks[0] + + if _can_consolidate: + + if dtype is None: + if len({b.dtype for b in blocks}) != 1: + raise AssertionError("_merge_blocks are invalid!") + dtype = blocks[0].dtype + + # FIXME: optimization potential in case all mgrs contain slices and + # combination of those slices is a slice, too. + new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks]) + new_values = _vstack([b.values for b in blocks], dtype) + + argsort = np.argsort(new_mgr_locs) + new_values = new_values[argsort] + new_mgr_locs = new_mgr_locs[argsort] + + return make_block(new_values, placement=new_mgr_locs) + + # no merge + return blocks + + +def _vstack(to_stack, dtype): + + # work around NumPy 1.6 bug + if dtype == _NS_DTYPE or dtype == _TD_DTYPE: + new_values = np.vstack([x.view('i8') for x in to_stack]) + return new_values.view(dtype) + + else: + return np.vstack(to_stack) + + +def _block2d_to_blocknd(values, placement, shape, labels, ref_items): + """ pivot to the labels shape """ + panel_shape = (len(placement),) + shape + + # TODO: lexsort depth needs to be 2!! + + # Create observation selection vector using major and minor + # labels, for converting to panel format. + selector = _factor_indexer(shape[1:], labels) + mask = np.zeros(np.prod(shape), dtype=bool) + mask.put(selector, True) + + if mask.all(): + pvalues = np.empty(panel_shape, dtype=values.dtype) + else: + dtype, fill_value = maybe_promote(values.dtype) + pvalues = np.empty(panel_shape, dtype=dtype) + pvalues.fill(fill_value) + + for i in range(len(placement)): + pvalues[i].flat[mask] = values[:, i] + + return make_block(pvalues, placement=placement) + + +def _safe_reshape(arr, new_shape): + """ + If possible, reshape `arr` to have shape `new_shape`, + with a couple of exceptions (see gh-13012): + + 1) If `arr` is a ExtensionArray or Index, `arr` will be + returned as is. + 2) If `arr` is a Series, the `_values` attribute will + be reshaped and returned. + + Parameters + ---------- + arr : array-like, object to be reshaped + new_shape : int or tuple of ints, the new shape + """ + if isinstance(arr, ABCSeries): + arr = arr._values + if not isinstance(arr, ABCExtensionArray): + arr = arr.reshape(new_shape) + return arr + + +def _factor_indexer(shape, labels): + """ + given a tuple of shape and a list of Categorical labels, return the + expanded label indexer + """ + mult = np.array(shape)[::-1].cumprod()[::-1] + return ensure_platform_int( + np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T) + + +def _putmask_smart(v, m, n): + """ + Return a new ndarray, try to preserve dtype if possible. + + Parameters + ---------- + v : `values`, updated in-place (array like) + m : `mask`, applies to both sides (array like) + n : `new values` either scalar or an array like aligned with `values` + + Returns + ------- + values : ndarray with updated values + this *may* be a copy of the original + + See Also + -------- + ndarray.putmask + """ + + # we cannot use np.asarray() here as we cannot have conversions + # that numpy does when numeric are mixed with strings + + # n should be the length of the mask or a scalar here + if not is_list_like(n): + n = np.repeat(n, len(m)) + elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar + n = np.repeat(np.array(n, ndmin=1), len(m)) + + # see if we are only masking values that if putted + # will work in the current dtype + try: + nn = n[m] + + # make sure that we have a nullable type + # if we have nulls + if not _isna_compat(v, nn[0]): + raise ValueError + + # we ignore ComplexWarning here + with warnings.catch_warnings(record=True): + nn_at = nn.astype(v.dtype) + + # avoid invalid dtype comparisons + # between numbers & strings + + # only compare integers/floats + # don't compare integers to datetimelikes + if (not is_numeric_v_string_like(nn, nn_at) and + (is_float_dtype(nn.dtype) or + is_integer_dtype(nn.dtype) and + is_float_dtype(nn_at.dtype) or + is_integer_dtype(nn_at.dtype))): + + comp = (nn == nn_at) + if is_list_like(comp) and comp.all(): + nv = v.copy() + nv[m] = nn_at + return nv + except (ValueError, IndexError, TypeError): + pass + + n = np.asarray(n) + + def _putmask_preserve(nv, n): + try: + nv[m] = n[m] + except (IndexError, ValueError): + nv[m] = n + return nv + + # preserves dtype if possible + if v.dtype.kind == n.dtype.kind: + return _putmask_preserve(v, n) + + # change the dtype if needed + dtype, _ = maybe_promote(n.dtype) + + if is_extension_type(v.dtype) and is_object_dtype(dtype): + v = v.get_values(dtype) + else: + v = v.astype(dtype) + + return _putmask_preserve(v, n) diff --git a/pandas/tests/extension/test_external_block.py b/pandas/tests/extension/test_external_block.py index 991da41168aa0..aa32bf6051617 100644 --- a/pandas/tests/extension/test_external_block.py +++ b/pandas/tests/extension/test_external_block.py @@ -5,7 +5,8 @@ import pandas as pd from pandas.core.internals import ( - BlockManager, SingleBlockManager, NonConsolidatableMixIn, Block) + BlockManager, SingleBlockManager) +from pandas.core.internals.blocks import Block, NonConsolidatableMixIn import pytest
Follow-up to #21903
https://api.github.com/repos/pandas-dev/pandas/pulls/22014
2018-07-21T19:03:25Z
2018-07-23T11:24:01Z
2018-07-23T11:24:01Z
2018-07-23T14:10:31Z
[CLN] [BLD] Fix many compiler warnings
diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index ff6570e2106b2..7f4a2eeafeea2 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -29,7 +29,7 @@ dtypes = [('Float64', 'float64', 'float64_t'), ctypedef struct {{name}}VectorData: {{arg}} *data - size_t n, m + Py_ssize_t n, m {{endif}} @@ -147,7 +147,7 @@ cdef class StringVector: cdef resize(self): cdef: char **orig_data - size_t i, m + Py_ssize_t i, m m = self.data.m self.data.m = max(self.data.m * 4, _INIT_VEC_CAP) @@ -172,7 +172,7 @@ cdef class StringVector: def to_array(self): cdef: ndarray ao - size_t n + Py_ssize_t n object val ao = np.empty(self.data.n, dtype=np.object) @@ -198,7 +198,7 @@ cdef class ObjectVector: cdef: PyObject **data - size_t n, m + Py_ssize_t n, m ndarray ao bint external_view_exists @@ -281,7 +281,7 @@ cdef class {{name}}HashTable(HashTable): def sizeof(self, deep=False): """ return the size of my table in bytes """ return self.table.n_buckets * (sizeof({{dtype}}_t) + # keys - sizeof(size_t) + # vals + sizeof(Py_ssize_t) + # vals sizeof(uint32_t)) # flags cpdef get_item(self, {{dtype}}_t val): @@ -522,13 +522,13 @@ cdef class StringHashTable(HashTable): def sizeof(self, deep=False): """ return the size of my table in bytes """ return self.table.n_buckets * (sizeof(char *) + # keys - sizeof(size_t) + # vals + sizeof(Py_ssize_t) + # vals sizeof(uint32_t)) # flags cpdef get_item(self, object val): cdef: khiter_t k - char *v + const char *v v = util.get_c_string(val) k = kh_get_str(self.table, v) @@ -541,7 +541,7 @@ cdef class StringHashTable(HashTable): cdef: khiter_t k int ret = 0 - char *v + const char *v v = util.get_c_string(val) @@ -560,10 +560,10 @@ cdef class StringHashTable(HashTable): int64_t *resbuf = <int64_t*> labels.data khiter_t k kh_str_t *table = self.table - char *v - char **vecs + const char *v + const char **vecs - vecs = <char **> malloc(n * sizeof(char *)) + vecs = <const char **> malloc(n * sizeof(char *)) for i in range(n): val = values[i] v = util.get_c_string(val) @@ -589,10 +589,10 @@ cdef class StringHashTable(HashTable): object val ObjectVector uniques khiter_t k - char *v - char **vecs + const char *v + const char **vecs - vecs = <char **> malloc(n * sizeof(char *)) + vecs = <const char **> malloc(n * sizeof(char *)) uindexer = np.empty(n, dtype=np.int64) for i in range(n): val = values[i] @@ -627,7 +627,7 @@ cdef class StringHashTable(HashTable): Py_ssize_t i, n = len(values) int ret = 0 object val - char *v + const char *v khiter_t k int64_t[:] locs = np.empty(n, dtype=np.int64) @@ -660,12 +660,12 @@ cdef class StringHashTable(HashTable): Py_ssize_t i, n = len(values) int ret = 0 object val - char *v - char **vecs + const char *v + const char **vecs khiter_t k # these by-definition *must* be strings - vecs = <char **> malloc(n * sizeof(char *)) + vecs = <const char **> malloc(n * sizeof(char *)) for i in range(n): val = values[i] @@ -693,8 +693,8 @@ cdef class StringHashTable(HashTable): Py_ssize_t idx, count = count_prior int ret = 0 object val - char *v - char **vecs + const char *v + const char **vecs khiter_t k bint use_na_value @@ -705,7 +705,7 @@ cdef class StringHashTable(HashTable): # pre-filter out missing # and assign pointers - vecs = <char **> malloc(n * sizeof(char *)) + vecs = <const char **> malloc(n * sizeof(char *)) for i in range(n): val = values[i] @@ -769,7 +769,7 @@ cdef class PyObjectHashTable(HashTable): def sizeof(self, deep=False): """ return the size of my table in bytes """ return self.table.n_buckets * (sizeof(PyObject *) + # keys - sizeof(size_t) + # vals + sizeof(Py_ssize_t) + # vals sizeof(uint32_t)) # flags cpdef get_item(self, object val): diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c index 9e56802b92bf0..663ec66a35db2 100644 --- a/pandas/_libs/src/datetime/np_datetime.c +++ b/pandas/_libs/src/datetime/np_datetime.c @@ -329,10 +329,11 @@ int cmp_npy_datetimestruct(const npy_datetimestruct *a, * Returns -1 on error, 0 on success, and 1 (with no error set) * if obj doesn't have the needed date or datetime attributes. */ -int convert_pydatetime_to_datetimestruct(PyDateTime_Date *obj, +int convert_pydatetime_to_datetimestruct(PyDateTime_Date *dtobj, npy_datetimestruct *out) { // Assumes that obj is a valid datetime object PyObject *tmp; + PyObject *obj = (PyObject*)dtobj; /* Initialize the output to all zeros */ memset(out, 0, sizeof(npy_datetimestruct)); diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h index 4347d0c8c47d4..04009c6581ac0 100644 --- a/pandas/_libs/src/datetime/np_datetime.h +++ b/pandas/_libs/src/datetime/np_datetime.h @@ -31,7 +31,7 @@ extern const npy_datetimestruct _NS_MAX_DTS; // stuff pandas needs // ---------------------------------------------------------------------------- -int convert_pydatetime_to_datetimestruct(PyDateTime_Date *obj, +int convert_pydatetime_to_datetimestruct(PyDateTime_Date *dtobj, npy_datetimestruct *out); npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c index 25eede6c286dc..a18d12616a802 100644 --- a/pandas/_libs/src/parser/tokenizer.c +++ b/pandas/_libs/src/parser/tokenizer.c @@ -262,7 +262,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) { ("\n\nmake_stream_space: nbytes = %zu. grow_buffer(self->stream...)\n", nbytes)) self->stream = (char *)grow_buffer((void *)self->stream, self->stream_len, - (size_t*)&self->stream_cap, nbytes * 2, + (int64_t*)&self->stream_cap, nbytes * 2, sizeof(char), &status); TRACE( ("make_stream_space: self->stream=%p, self->stream_len = %zu, " @@ -289,7 +289,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) { cap = self->words_cap; self->words = (char **)grow_buffer((void *)self->words, self->words_len, - (size_t*)&self->words_cap, nbytes, + (int64_t*)&self->words_cap, nbytes, sizeof(char *), &status); TRACE( ("make_stream_space: grow_buffer(self->self->words, %zu, %zu, %zu, " @@ -320,7 +320,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) { cap = self->lines_cap; self->line_start = (int64_t *)grow_buffer((void *)self->line_start, self->lines + 1, - (size_t*)&self->lines_cap, nbytes, + (int64_t*)&self->lines_cap, nbytes, sizeof(int64_t), &status); TRACE(( "make_stream_space: grow_buffer(self->line_start, %zu, %zu, %zu, %d)\n", diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 4bab32e93ab1e..8c7b92ddeaa81 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -427,7 +427,7 @@ static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, #if (PY_VERSION_HEX >= 0x03030000) if (PyUnicode_IS_COMPACT_ASCII(obj)) { Py_ssize_t len; - char *data = PyUnicode_AsUTF8AndSize(obj, &len); + char *data = (char*)PyUnicode_AsUTF8AndSize(obj, &len); *_outLen = len; return data; } diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index cd3ce5c1a8f09..4054154cd285b 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -927,7 +927,8 @@ def extract_freq(ndarray[object] values): # ----------------------------------------------------------------------- # period helpers - +@cython.wraparound(False) +@cython.boundscheck(False) cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, int freq, object tz): cdef: diff --git a/pandas/_libs/tslibs/util.pxd b/pandas/_libs/tslibs/util.pxd index 305c4f8f908e0..efdb1570ed878 100644 --- a/pandas/_libs/tslibs/util.pxd +++ b/pandas/_libs/tslibs/util.pxd @@ -70,7 +70,7 @@ cdef extern from "../src/numpy_helper.h": int assign_value_1d(ndarray, Py_ssize_t, object) except -1 cnp.int64_t get_nat() object get_value_1d(ndarray, Py_ssize_t) - char *get_c_string(object) except NULL + const char *get_c_string(object) except NULL object char_to_string(char*) ctypedef fused numeric: diff --git a/pandas/io/msgpack/_unpacker.pyx b/pandas/io/msgpack/_unpacker.pyx index 04bb330e595dd..427414b80dfe4 100644 --- a/pandas/io/msgpack/_unpacker.pyx +++ b/pandas/io/msgpack/_unpacker.pyx @@ -139,7 +139,7 @@ def unpackb(object packed, object object_hook=None, object list_hook=None, ret = unpack_construct(&ctx, buf, buf_len, &off) if ret == 1: obj = unpack_data(&ctx) - if off < buf_len: + if <Py_ssize_t> off < buf_len: raise ExtraData(obj, PyBytes_FromStringAndSize( buf + off, buf_len - off)) return obj @@ -367,9 +367,11 @@ cdef class Unpacker(object): self.buf_tail = tail + _buf_len cdef read_from_file(self): + # Assume self.max_buffer_size - (self.buf_tail - self.buf_head) >= 0 next_bytes = self.file_like_read( min(self.read_size, - self.max_buffer_size - (self.buf_tail - self.buf_head))) + <Py_ssize_t>(self.max_buffer_size - + (self.buf_tail - self.buf_head)))) if next_bytes: self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes)) @@ -417,7 +419,9 @@ cdef class Unpacker(object): def read_bytes(self, Py_ssize_t nbytes): """Read a specified number of raw bytes from the stream""" cdef size_t nread - nread = min(self.buf_tail - self.buf_head, nbytes) + + # Assume that self.buf_tail - self.buf_head >= 0 + nread = min(<Py_ssize_t>(self.buf_tail - self.buf_head), nbytes) ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread) self.buf_head += nread if len(ret) < nbytes and self.file_like is not None: diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx index e2a1107969990..3d94dc127a1d2 100644 --- a/pandas/io/sas/sas.pyx +++ b/pandas/io/sas/sas.pyx @@ -104,7 +104,8 @@ cdef ndarray[uint8_t, ndim=1] rle_decompress( raise ValueError("unknown control byte: {byte}" .format(byte=control_byte)) - if len(result) != result_length: + # In py37 cython/clang sees `len(outbuff)` as size_t and not Py_ssize_t + if <Py_ssize_t>len(result) != <Py_ssize_t>result_length: raise ValueError("RLE: {got} != {expect}".format(got=len(result), expect=result_length)) @@ -186,12 +187,14 @@ cdef ndarray[uint8_t, ndim=1] rdc_decompress( else: raise ValueError("unknown RDC command") - if len(outbuff) != result_length: + # In py37 cython/clang sees `len(outbuff)` as size_t and not Py_ssize_t + if <Py_ssize_t>len(outbuff) != <Py_ssize_t>result_length: raise ValueError("RDC: {got} != {expect}\n" .format(got=len(outbuff), expect=result_length)) return np.asarray(outbuff) + cdef enum ColumnTypes: column_type_decimal = 1 column_type_string = 2 @@ -204,6 +207,7 @@ cdef int page_mix_types_1 = const.page_mix_types[1] cdef int page_data_type = const.page_data_type cdef int subheader_pointers_offset = const.subheader_pointers_offset + cdef class Parser(object): cdef: diff --git a/setup.py b/setup.py index 85c5970af018f..d265733738425 100755 --- a/setup.py +++ b/setup.py @@ -491,7 +491,6 @@ def srcpath(name=None, suffix='.pyx', subdir='src'): if suffix == '.pyx': lib_depends = [srcpath(f, suffix='.pyx', subdir='_libs/src') for f in lib_depends] - lib_depends.append('pandas/_libs/util.pxd') else: lib_depends = [] @@ -507,7 +506,7 @@ def srcpath(name=None, suffix='.pyx', subdir='src'): np_datetime_sources = ['pandas/_libs/src/datetime/np_datetime.c', 'pandas/_libs/src/datetime/np_datetime_strings.c'] -tseries_depends = np_datetime_headers + ['pandas/_libs/tslibs/np_datetime.pxd'] +tseries_depends = np_datetime_headers ext_data = {
Checked in OSX in 2.7 and 3.7, on Ubuntu on 2.7 and 3.5, this fixes just about all the warnings that are fixable at my pay-grade. The ones that are left: - Ubiquitous `NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION` --> I expect there is a compiler flag that can be set in setup.py to silence this (and only this) warning, haven't figured it out. - In Py3.7: ``` pandas/_libs/src/ujson/python/objToJSON.c:430:15: warning: initializing 'char *' with an expression of type 'const char *' discards qualifiers [-Wincompatible-pointer-types-discards-qualifiers] char *data = PyUnicode_AsUTF8AndSize(obj, &len); ^ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ pandas/_libs/src/ujson/python/objToJSON.c:436:14: warning: 'PyUnicode_EncodeUTF8' is deprecated [-Wdeprecated-declarations] newObj = PyUnicode_EncodeUTF8(PyUnicode_AS_UNICODE(obj), ^ /usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/include/python3.7m/unicodeobject.h:1324:7: note: 'PyUnicode_EncodeUTF8' has been explicitly marked deprecated here ) Py_DEPRECATED(3.3); ^ /usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/include/python3.7m/pyport.h:493:54: note: expanded from macro 'Py_DEPRECATED' #define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__)) ^ 3 warnings generated. ``` - On Linux: ``` In file included from pandas/_libs/src/datetime/np_datetime.h:21:0, from pandas/_libs/src/datetime/np_datetime_strings.c:33: /usr/include/python3.5m/datetime.h:191:25: warning: ‘PyDateTimeAPI’ defined but not used [-Wunused-variable] static PyDateTime_CAPI *PyDateTimeAPI = NULL; ``` - Also on Linux a bunch of "this may be unitialized" warnings.
https://api.github.com/repos/pandas-dev/pandas/pulls/22013
2018-07-21T19:00:32Z
2018-07-24T00:11:21Z
2018-07-24T00:11:21Z
2018-07-24T00:19:38Z
Default to_* methods to compression='infer'
diff --git a/doc/source/io.rst b/doc/source/io.rst index 9fe578524c8e0..c2c8c1c17700f 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -298,7 +298,7 @@ compression : {``'infer'``, ``'gzip'``, ``'bz2'``, ``'zip'``, ``'xz'``, ``None`` Set to ``None`` for no decompression. .. versionadded:: 0.18.1 support for 'zip' and 'xz' compression. - + .. versionchanged:: 0.24.0 'infer' option added and set to default. thousands : str, default ``None`` Thousands separator. decimal : str, default ``'.'`` diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 42e286f487a7d..213a4e91176c5 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -177,7 +177,8 @@ Other Enhancements - :func:`read_html` copies cell data across ``colspan`` and ``rowspan``, and it treats all-``th`` table rows as headers if ``header`` kwarg is not given and there is no ``thead`` (:issue:`17054`) - :meth:`Series.nlargest`, :meth:`Series.nsmallest`, :meth:`DataFrame.nlargest`, and :meth:`DataFrame.nsmallest` now accept the value ``"all"`` for the ``keep`` argument. This keeps all ties for the nth largest/smallest value (:issue:`16818`) - :class:`IntervalIndex` has gained the :meth:`~IntervalIndex.set_closed` method to change the existing ``closed`` value (:issue:`21670`) -- :func:`~DataFrame.to_csv` and :func:`~DataFrame.to_json` now support ``compression='infer'`` to infer compression based on filename (:issue:`15008`) +- :func:`~DataFrame.to_csv`, :func:`~Series.to_csv`, :func:`~DataFrame.to_json`, and :func:`~Series.to_json` now support ``compression='infer'`` to infer compression based on filename extension (:issue:`15008`). + The default compression for ``to_csv``, ``to_json``, and ``to_pickle`` methods has been updated to ``'infer'`` (:issue:`22004`). - :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`) - :class:`Series` and :class:`DataFrame` now support :class:`Iterable` in constructor (:issue:`2193`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 834cc3d188b39..ebd35cb1a6a1a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1715,7 +1715,7 @@ def to_panel(self): def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, columns=None, header=True, index=True, index_label=None, - mode='w', encoding=None, compression=None, quoting=None, + mode='w', encoding=None, compression='infer', quoting=None, quotechar='"', line_terminator='\n', chunksize=None, tupleize_cols=None, date_format=None, doublequote=True, escapechar=None, decimal='.'): @@ -1750,10 +1750,14 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, encoding : string, optional A string representing the encoding to use in the output file, defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. - compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default None + compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, + default 'infer' If 'infer' and `path_or_buf` is path-like, then detect compression from the following extensions: '.gz', '.bz2', '.zip' or '.xz' (otherwise no compression). + + .. versionchanged:: 0.24.0 + 'infer' option added and set to default line_terminator : string, default ``'\n'`` The newline character or character sequence to use in the output file diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7a12ce0e1385e..f62605c342702 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1933,7 +1933,7 @@ def _repr_latex_(self): def to_json(self, path_or_buf=None, orient=None, date_format=None, double_precision=10, force_ascii=True, date_unit='ms', - default_handler=None, lines=False, compression=None, + default_handler=None, lines=False, compression='infer', index=True): """ Convert the object to a JSON string. @@ -1999,13 +1999,14 @@ def to_json(self, path_or_buf=None, orient=None, date_format=None, like. .. versionadded:: 0.19.0 - - compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default None + compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, + default 'infer' A string representing the compression to use in the output file, only used when the first argument is a filename. .. versionadded:: 0.21.0 - + .. versionchanged:: 0.24.0 + 'infer' option added and set to default index : boolean, default True Whether to include the index values in the JSON string. Not including the index (``index=False``) is only supported when diff --git a/pandas/core/series.py b/pandas/core/series.py index 8f9fe5ee516e6..21dea15772cc0 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3767,7 +3767,7 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None, def to_csv(self, path=None, index=True, sep=",", na_rep='', float_format=None, header=False, index_label=None, - mode='w', encoding=None, compression=None, date_format=None, + mode='w', encoding=None, compression='infer', date_format=None, decimal='.'): """ Write Series to a comma-separated values (csv) file @@ -3795,10 +3795,13 @@ def to_csv(self, path=None, index=True, sep=",", na_rep='', encoding : string, optional a string representing the encoding to use if the contents are non-ascii, for python versions prior to 3 - compression : string, optional + compression : None or string, default 'infer' A string representing the compression to use in the output file. - Allowed values are 'gzip', 'bz2', 'zip', 'xz'. This input is only - used when the first argument is a filename. + Allowed values are None, 'gzip', 'bz2', 'zip', 'xz', and 'infer'. + This input is only used when the first argument is a filename. + + .. versionchanged:: 0.24.0 + 'infer' option added and set to default date_format: string, default None Format string for datetime objects. decimal: string, default '.' diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 0796888554a46..6fabd2573a7b4 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -21,8 +21,13 @@ from pandas.core.dtypes.generic import ( ABCMultiIndex, ABCPeriodIndex, ABCDatetimeIndex, ABCIndexClass) -from pandas.io.common import (_get_handle, UnicodeWriter, _expand_user, - _stringify_path) +from pandas.io.common import ( + _expand_user, + _get_handle, + _infer_compression, + _stringify_path, + UnicodeWriter, +) class CSVFormatter(object): @@ -30,7 +35,7 @@ class CSVFormatter(object): def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None, cols=None, header=True, index=True, index_label=None, mode='w', nanRep=None, encoding=None, - compression=None, quoting=None, line_terminator='\n', + compression='infer', quoting=None, line_terminator='\n', chunksize=None, tupleize_cols=False, quotechar='"', date_format=None, doublequote=True, escapechar=None, decimal='.'): @@ -50,8 +55,10 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', self.index = index self.index_label = index_label self.mode = mode + if encoding is None: + encoding = 'ascii' if compat.PY2 else 'utf-8' self.encoding = encoding - self.compression = compression + self.compression = _infer_compression(self.path_or_buf, compression) if quoting is None: quoting = csvlib.QUOTE_MINIMAL @@ -124,16 +131,10 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', self.nlevels = 0 def save(self): - # create the writer & save - if self.encoding is None: - if compat.PY2: - encoding = 'ascii' - else: - encoding = 'utf-8' - else: - encoding = self.encoding - - # GH 21227 internal compression is not used when file-like passed. + """ + Create the writer & save + """ + # GH21227 internal compression is not used when file-like passed. if self.compression and hasattr(self.path_or_buf, 'write'): msg = ("compression has no effect when passing file-like " "object as input.") @@ -147,7 +148,7 @@ def save(self): if is_zip: # zipfile doesn't support writing string to archive. uses string # buffer to receive csv writing and dump into zip compression - # file handle. GH 21241, 21118 + # file handle. GH21241, GH21118 f = StringIO() close = False elif hasattr(self.path_or_buf, 'write'): @@ -155,7 +156,7 @@ def save(self): close = False else: f, handles = _get_handle(self.path_or_buf, self.mode, - encoding=encoding, + encoding=self.encoding, compression=self.compression) close = True @@ -165,23 +166,23 @@ def save(self): doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar) - if encoding == 'ascii': + if self.encoding == 'ascii': self.writer = csvlib.writer(f, **writer_kwargs) else: - writer_kwargs['encoding'] = encoding + writer_kwargs['encoding'] = self.encoding self.writer = UnicodeWriter(f, **writer_kwargs) self._save() finally: if is_zip: - # GH 17778 handles zip compression separately. + # GH17778 handles zip compression separately. buf = f.getvalue() if hasattr(self.path_or_buf, 'write'): self.path_or_buf.write(buf) else: f, handles = _get_handle(self.path_or_buf, self.mode, - encoding=encoding, + encoding=self.encoding, compression=self.compression) f.write(buf) close = True diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 629e00ebfa7d0..c5f8872f93d94 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -28,7 +28,7 @@ # interface to/from def to_json(path_or_buf, obj, orient=None, date_format='epoch', double_precision=10, force_ascii=True, date_unit='ms', - default_handler=None, lines=False, compression=None, + default_handler=None, lines=False, compression='infer', index=True): if not index and orient not in ['split', 'table']: diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index 5c9739be73393..ceaac9818354a 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -1,19 +1,20 @@ """ - Tests for the pandas.io.common functionalities +Tests for the pandas.io.common functionalities """ import mmap -import pytest import os -from os.path import isabs + +import pytest import pandas as pd -import pandas.util.testing as tm +import pandas.io.common as icom import pandas.util._test_decorators as td - -from pandas.io import common -from pandas.compat import is_platform_windows, StringIO, FileNotFoundError - -from pandas import read_csv, concat +import pandas.util.testing as tm +from pandas.compat import ( + is_platform_windows, + StringIO, + FileNotFoundError, +) class CustomFSPath(object): @@ -55,24 +56,24 @@ class TestCommonIOCapabilities(object): def test_expand_user(self): filename = '~/sometest' - expanded_name = common._expand_user(filename) + expanded_name = icom._expand_user(filename) assert expanded_name != filename - assert isabs(expanded_name) + assert os.path.isabs(expanded_name) assert os.path.expanduser(filename) == expanded_name def test_expand_user_normal_path(self): filename = '/somefolder/sometest' - expanded_name = common._expand_user(filename) + expanded_name = icom._expand_user(filename) assert expanded_name == filename assert os.path.expanduser(filename) == expanded_name @td.skip_if_no('pathlib') def test_stringify_path_pathlib(self): - rel_path = common._stringify_path(Path('.')) + rel_path = icom._stringify_path(Path('.')) assert rel_path == '.' - redundant_path = common._stringify_path(Path('foo//bar')) + redundant_path = icom._stringify_path(Path('foo//bar')) assert redundant_path == os.path.join('foo', 'bar') @td.skip_if_no('py.path') @@ -80,11 +81,11 @@ def test_stringify_path_localpath(self): path = os.path.join('foo', 'bar') abs_path = os.path.abspath(path) lpath = LocalPath(path) - assert common._stringify_path(lpath) == abs_path + assert icom._stringify_path(lpath) == abs_path def test_stringify_path_fspath(self): p = CustomFSPath('foo/bar.csv') - result = common._stringify_path(p) + result = icom._stringify_path(p) assert result == 'foo/bar.csv' @pytest.mark.parametrize('extension,expected', [ @@ -97,36 +98,36 @@ def test_stringify_path_fspath(self): @pytest.mark.parametrize('path_type', path_types) def test_infer_compression_from_path(self, extension, expected, path_type): path = path_type('foo/bar.csv' + extension) - compression = common._infer_compression(path, compression='infer') + compression = icom._infer_compression(path, compression='infer') assert compression == expected def test_get_filepath_or_buffer_with_path(self): filename = '~/sometest' - filepath_or_buffer, _, _, should_close = common.get_filepath_or_buffer( + filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer( filename) assert filepath_or_buffer != filename - assert isabs(filepath_or_buffer) + assert os.path.isabs(filepath_or_buffer) assert os.path.expanduser(filename) == filepath_or_buffer assert not should_close def test_get_filepath_or_buffer_with_buffer(self): input_buffer = StringIO() - filepath_or_buffer, _, _, should_close = common.get_filepath_or_buffer( + filepath_or_buffer, _, _, should_close = icom.get_filepath_or_buffer( input_buffer) assert filepath_or_buffer == input_buffer assert not should_close def test_iterator(self): - reader = read_csv(StringIO(self.data1), chunksize=1) - result = concat(reader, ignore_index=True) - expected = read_csv(StringIO(self.data1)) + reader = pd.read_csv(StringIO(self.data1), chunksize=1) + result = pd.concat(reader, ignore_index=True) + expected = pd.read_csv(StringIO(self.data1)) tm.assert_frame_equal(result, expected) # GH12153 - it = read_csv(StringIO(self.data1), chunksize=1) + it = pd.read_csv(StringIO(self.data1), chunksize=1) first = next(it) tm.assert_frame_equal(first, expected.iloc[[0]]) - tm.assert_frame_equal(concat(it), expected.iloc[1:]) + tm.assert_frame_equal(pd.concat(it), expected.iloc[1:]) @pytest.mark.parametrize('reader, module, error_class, fn_ext', [ (pd.read_csv, 'os', FileNotFoundError, 'csv'), @@ -246,18 +247,18 @@ def test_constructor_bad_file(self, mmap_file): msg = "[Errno 22]" err = mmap.error - tm.assert_raises_regex(err, msg, common.MMapWrapper, non_file) + tm.assert_raises_regex(err, msg, icom.MMapWrapper, non_file) target = open(mmap_file, 'r') target.close() msg = "I/O operation on closed file" tm.assert_raises_regex( - ValueError, msg, common.MMapWrapper, target) + ValueError, msg, icom.MMapWrapper, target) def test_get_attr(self, mmap_file): with open(mmap_file, 'r') as target: - wrapper = common.MMapWrapper(target) + wrapper = icom.MMapWrapper(target) attrs = dir(wrapper.mmap) attrs = [attr for attr in attrs @@ -271,7 +272,7 @@ def test_get_attr(self, mmap_file): def test_next(self, mmap_file): with open(mmap_file, 'r') as target: - wrapper = common.MMapWrapper(target) + wrapper = icom.MMapWrapper(target) lines = target.readlines() for line in lines: @@ -285,4 +286,4 @@ def test_unknown_engine(self): df = tm.makeDataFrame() df.to_csv(path) with tm.assert_raises_regex(ValueError, 'Unknown engine'): - read_csv(path, engine='pyt') + pd.read_csv(path, engine='pyt') diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py new file mode 100644 index 0000000000000..76788ced44e84 --- /dev/null +++ b/pandas/tests/io/test_compression.py @@ -0,0 +1,99 @@ +import os + +import pytest + +import pandas as pd +import pandas.io.common as icom +import pandas.util.testing as tm + + +@pytest.mark.parametrize('obj', [ + pd.DataFrame(100 * [[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + columns=['X', 'Y', 'Z']), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name='X')]) +@pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv']) +def test_compression_size(obj, method, compression_only): + with tm.ensure_clean() as path: + getattr(obj, method)(path, compression=compression_only) + compressed_size = os.path.getsize(path) + getattr(obj, method)(path, compression=None) + uncompressed_size = os.path.getsize(path) + assert uncompressed_size > compressed_size + + +@pytest.mark.parametrize('obj', [ + pd.DataFrame(100 * [[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + columns=['X', 'Y', 'Z']), + pd.Series(100 * [0.123456, 0.234567, 0.567567], name='X')]) +@pytest.mark.parametrize('method', ['to_csv', 'to_json']) +def test_compression_size_fh(obj, method, compression_only): + with tm.ensure_clean() as path: + f, handles = icom._get_handle(path, 'w', compression=compression_only) + with f: + getattr(obj, method)(f) + assert not f.closed + assert f.closed + compressed_size = os.path.getsize(path) + with tm.ensure_clean() as path: + f, handles = icom._get_handle(path, 'w', compression=None) + with f: + getattr(obj, method)(f) + assert not f.closed + assert f.closed + uncompressed_size = os.path.getsize(path) + assert uncompressed_size > compressed_size + + +@pytest.mark.parametrize('write_method, write_kwargs, read_method', [ + ('to_csv', {'index': False}, pd.read_csv), + ('to_json', {}, pd.read_json), + ('to_pickle', {}, pd.read_pickle), +]) +def test_dataframe_compression_defaults_to_infer( + write_method, write_kwargs, read_method, compression_only): + # GH22004 + input = pd.DataFrame([[1.0, 0, -4], [3.4, 5, 2]], columns=['X', 'Y', 'Z']) + extension = icom._compression_to_extension[compression_only] + with tm.ensure_clean('compressed' + extension) as path: + getattr(input, write_method)(path, **write_kwargs) + output = read_method(path, compression=compression_only) + tm.assert_frame_equal(output, input) + + +@pytest.mark.parametrize('write_method,write_kwargs,read_method,read_kwargs', [ + ('to_csv', {'index': False, 'header': True}, + pd.read_csv, {'squeeze': True}), + ('to_json', {}, pd.read_json, {'typ': 'series'}), + ('to_pickle', {}, pd.read_pickle, {}), +]) +def test_series_compression_defaults_to_infer( + write_method, write_kwargs, read_method, read_kwargs, + compression_only): + # GH22004 + input = pd.Series([0, 5, -2, 10], name='X') + extension = icom._compression_to_extension[compression_only] + with tm.ensure_clean('compressed' + extension) as path: + getattr(input, write_method)(path, **write_kwargs) + output = read_method(path, compression=compression_only, **read_kwargs) + tm.assert_series_equal(output, input, check_names=False) + + +def test_compression_warning(compression_only): + # Assert that passing a file object to to_csv while explicitly specifying a + # compression protocol triggers a RuntimeWarning, as per GH21227. + # Note that pytest has an issue that causes assert_produces_warning to fail + # in Python 2 if the warning has occurred in previous tests + # (see https://git.io/fNEBm & https://git.io/fNEBC). Hence, should this + # test fail in just Python 2 builds, it likely indicates that other tests + # are producing RuntimeWarnings, thereby triggering the pytest bug. + df = pd.DataFrame(100 * [[0.123456, 0.234567, 0.567567], + [12.32112, 123123.2, 321321.2]], + columns=['X', 'Y', 'Z']) + with tm.ensure_clean() as path: + f, handles = icom._get_handle(path, 'w', compression=compression_only) + with tm.assert_produces_warning(RuntimeWarning, + check_stacklevel=False): + with f: + df.to_csv(f, compression=compression_only) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index e1c9202189972..868525e818b62 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -1,17 +1,16 @@ # -*- coding: utf-8 -*- -import pytest -import os import collections from functools import partial import numpy as np +import pytest -from pandas import Series, DataFrame, Timestamp -import pandas.core.common as com -from pandas.core import ops -from pandas.io.common import _get_handle -import pandas.util.testing as tm +from pandas import Series, Timestamp +from pandas.core import ( + common as com, + ops, +) def test_get_callable_name(): @@ -20,7 +19,7 @@ def test_get_callable_name(): def fn(x): return x - lambda_ = lambda x: x + lambda_ = lambda x: x # noqa: E731 part1 = partial(fn) part2 = partial(part1) @@ -111,57 +110,3 @@ def test_standardize_mapping(): dd = collections.defaultdict(list) assert isinstance(com.standardize_mapping(dd), partial) - - -@pytest.mark.parametrize('obj', [ - DataFrame(100 * [[0.123456, 0.234567, 0.567567], - [12.32112, 123123.2, 321321.2]], - columns=['X', 'Y', 'Z']), - Series(100 * [0.123456, 0.234567, 0.567567], name='X')]) -@pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv']) -def test_compression_size(obj, method, compression_only): - - with tm.ensure_clean() as filename: - getattr(obj, method)(filename, compression=compression_only) - compressed = os.path.getsize(filename) - getattr(obj, method)(filename, compression=None) - uncompressed = os.path.getsize(filename) - assert uncompressed > compressed - - -@pytest.mark.parametrize('obj', [ - DataFrame(100 * [[0.123456, 0.234567, 0.567567], - [12.32112, 123123.2, 321321.2]], - columns=['X', 'Y', 'Z']), - Series(100 * [0.123456, 0.234567, 0.567567], name='X')]) -@pytest.mark.parametrize('method', ['to_csv', 'to_json']) -def test_compression_size_fh(obj, method, compression_only): - - with tm.ensure_clean() as filename: - f, _handles = _get_handle(filename, 'w', compression=compression_only) - with f: - getattr(obj, method)(f) - assert not f.closed - assert f.closed - compressed = os.path.getsize(filename) - with tm.ensure_clean() as filename: - f, _handles = _get_handle(filename, 'w', compression=None) - with f: - getattr(obj, method)(f) - assert not f.closed - assert f.closed - uncompressed = os.path.getsize(filename) - assert uncompressed > compressed - - -# GH 21227 -def test_compression_warning(compression_only): - df = DataFrame(100 * [[0.123456, 0.234567, 0.567567], - [12.32112, 123123.2, 321321.2]], - columns=['X', 'Y', 'Z']) - with tm.ensure_clean() as filename: - f, _handles = _get_handle(filename, 'w', compression=compression_only) - with tm.assert_produces_warning(RuntimeWarning, - check_stacklevel=False): - with f: - df.to_csv(f, compression=compression_only)
- [x] closes https://github.com/pandas-dev/pandas/issues/22004 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This PR does the following: - Update default compression for `to_csv`, `to_json`, and `to_pickle` methods to infer. - Adds `test_compression_defaults_to_infer` to test that compression='infer' is default for the relevant to_* methods. - Fixes a bug in CSVFormatter where setting `compression='infer'` with a file object would produce a RuntimeWarning. - Adds documentation to `test_compression_warning` which can fail due to a pytest bug. - Cleans up how the encoding argument in CSVFormatter is processed. - Moves compression tests from `pandas/tests/test_common.py` to `pandas/tests/io/test_common.py`
https://api.github.com/repos/pandas-dev/pandas/pulls/22011
2018-07-21T15:14:44Z
2018-08-01T21:23:34Z
2018-08-01T21:23:34Z
2018-08-06T21:35:15Z
BUG: ValueError in Py3 applying stats over column level with skipna=False (#19720)
diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt index 69525aaea1d62..628e43eb8200e 100644 --- a/doc/source/whatsnew/v0.23.4.txt +++ b/doc/source/whatsnew/v0.23.4.txt @@ -32,7 +32,7 @@ Bug Fixes - Bug where calling :func:`DataFrameGroupBy.agg` with a list of functions including ``ohlc`` as the non-initial element would raise a ``ValueError`` (:issue:`21716`) - Bug in ``roll_quantile`` caused a memory leak when calling ``.rolling(...).quantile(q)`` with ``q`` in (0,1) (:issue:`21965`) -- +- Bug in ``DataFrame.groupby`` caused ValueError in Py3 when applying stats function over column level with skipna=False (:issue `19720`) **Conversion** diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index f2c55a56b119d..13fab0bbe0c4c 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -625,7 +625,15 @@ def _aggregate_series_pure_python(self, obj, func): splitter = get_splitter(obj, group_index, ngroups, axis=self.axis) for label, group in splitter: - res = func(group) + try: + res = func(group) + except ValueError: + if isinstance(group, Series): + # GH19720, align Py3 ValueError with Py2 TypeError + raise TypeError + else: + raise + if result is None: if (isinstance(res, (Series, Index, np.ndarray))): raise ValueError('Function does not reduce') diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index b48395efaf5c8..b68fda49eb9d9 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1091,6 +1091,18 @@ def test_sum_bool(self): bools.sum(1) bools.sum(0) + def test_sum_column_level_noskipna(self): + # GH19720 + df = DataFrame(np.ones([2, 8])) + df.iloc[0, 0] = np.nan + df.columns = MultiIndex.from_product( + [list('ab'), list('cd'), list('ef')]) + result = df.sum(axis=1, level=[0, 1], skipna=False) + expected = DataFrame( + [[np.nan, 2., 2., 2.], [2., 2., 2., 2.]], + columns=MultiIndex.from_product([list('ab'), list('cd')])) + tm.assert_frame_equal(result, expected) + def test_mean_corner(self): # unit test when have object data the_mean = self.mixed_frame.mean(axis=0)
- [x] closes #19720 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/22010
2018-07-21T12:26:58Z
2018-11-23T03:27:06Z
null
2018-11-23T03:27:07Z
CLN: remove F821 flake8 error in test case
diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 76a50a9ecf5e7..30a670ead3aa0 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -9,7 +9,7 @@ import numpy as np from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp, Categorical, compat, concat, option_context) -from pandas.compat import u, PY2 +from pandas.compat import u from pandas import _np_version_under1p14 from pandas.core.dtypes.dtypes import DatetimeTZDtype, CategoricalDtype @@ -356,9 +356,10 @@ def test_select_dtypes_datetime_with_tz(self): expected = df3.reindex(columns=[]) assert_frame_equal(result, expected) - @pytest.mark.parametrize( - "dtype", [str, "str", np.string_, "S1", - "unicode", np.unicode_, "U1"] + ([unicode] if PY2 else [])) + @pytest.mark.parametrize("dtype", [ + str, "str", np.string_, "S1", "unicode", np.unicode_, "U1", + compat.text_type + ]) @pytest.mark.parametrize("arg", ["include", "exclude"]) def test_select_dtypes_str_raises(self, dtype, arg): df = DataFrame({"a": list("abc"),
Kill flake8 error: F821 undefined name 'unicode' `compat.text_type == unicode if PY2`
https://api.github.com/repos/pandas-dev/pandas/pulls/22009
2018-07-21T09:57:46Z
2018-07-22T12:38:00Z
2018-07-22T12:38:00Z
2018-07-22T12:38:12Z
core: try coerce result back to DatetimeBlock
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 8fe3023e9537c..f143cbd13c22c 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -436,6 +436,7 @@ Datetimelike - Fixed bug where two :class:`DateOffset` objects with different ``normalize`` attributes could evaluate as equal (:issue:`21404`) - Fixed bug where :meth:`Timestamp.resolution` incorrectly returned 1-microsecond ``timedelta`` instead of 1-nanosecond :class:`Timedelta` (:issue:`21336`,:issue:`21365`) +- Fixed bug where :class:`DataFrame` with ``dtype='datetime64[ns]'`` operating with :class:`DateOffset` could cast to ``dtype='object'`` (:issue:`21610`) Timedelta ^^^^^^^^^ diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py index fde3aaa14ac5d..7ef21784b2d02 100644 --- a/pandas/core/internals/__init__.py +++ b/pandas/core/internals/__init__.py @@ -63,7 +63,9 @@ ABCSeries, ABCDatetimeIndex, ABCExtensionArray, - ABCIndexClass) + ABCIndexClass, + ABCDateOffset, +) import pandas.core.common as com import pandas.core.algorithms as algos @@ -2737,7 +2739,7 @@ def _try_coerce_args(self, values, other): def _try_coerce_result(self, result): """ reverse of try_coerce_args """ - if isinstance(result, np.ndarray): + if isinstance(result, (np.ndarray, Block)): if result.dtype.kind in ['i', 'f', 'O']: try: result = result.astype('M8[ns]') @@ -2785,6 +2787,17 @@ def set(self, locs, values, check=False): self.values[locs] = values + def eval(self, func, other, try_cast=False, **kwargs): + block = super(DatetimeBlock, self).eval(func, other, try_cast=try_cast, + **kwargs)[0] + if try_cast: + if isinstance(other, (np.datetime64, date)): + block = TimeDeltaBlock(block.values, block.mgr_locs, + ndim=block.ndim) + elif isinstance(other, ABCDateOffset): + block = self._try_coerce_result(block) + return [block] + class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock): """ implement a datetime64 block with a tz attribute """ @@ -2920,6 +2933,8 @@ def _try_coerce_result(self, result): if isinstance(result, np.ndarray): if result.dtype.kind in ['i', 'f', 'O']: result = result.astype('M8[ns]') + elif isinstance(result, Block): + result = self.make_block_same_class(result.values.flat) elif isinstance(result, (np.integer, np.float, np.datetime64)): result = tslibs.Timestamp(result, tz=self.values.tz) if isinstance(result, np.ndarray): diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index fb381a5640519..9a41360f4b7bd 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import datetime import pytest import numpy as np @@ -211,6 +212,29 @@ def test_df_sub_datetime64_not_ns(self): pd.Timedelta(days=2)]) tm.assert_frame_equal(res, expected) + def test_timestamp_df_add_dateoffset(self): + # GH 21610 + expected = pd.DataFrame([pd.Timestamp('2019')]) + result = pd.DataFrame([pd.Timestamp('2018')]) + pd.DateOffset(years=1) + tm.assert_frame_equal(expected, result) + + expected = pd.DataFrame([pd.Timestamp('2019', tz='Asia/Shanghai')]) + result = (pd.DataFrame([pd.Timestamp('2018', tz='Asia/Shanghai')]) + + pd.DateOffset(years=1)) + tm.assert_frame_equal(expected, result) + + @pytest.mark.parametrize('other', [ + pd.Timestamp('2017'), + np.datetime64('2017'), + datetime.datetime(2017, 1, 1), + datetime.date(2017, 1, 1), + ]) + def test_timestamp_df_sub_timestamp(self, other): + # GH 8554 12437 + expected = pd.DataFrame([pd.Timedelta('365d')]) + result = pd.DataFrame([pd.Timestamp('2018')]) - other + tm.assert_frame_equal(expected, result) + @pytest.mark.parametrize('data', [ [1, 2, 3], [1.1, 2.2, 3.3], diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 39418fb72bf4a..f85978e2e3a58 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1238,7 +1238,6 @@ class TestCanHoldElement(object): (2**63, 'complex128'), (True, 'bool'), (np.timedelta64(20, 'ns'), '<m8[ns]'), - (np.datetime64(20, 'ns'), '<M8[ns]'), ]) @pytest.mark.parametrize('op', [ operator.add, @@ -1255,7 +1254,6 @@ def test_binop_other(self, op, value, dtype): (operator.truediv, 'bool'), (operator.mod, 'i8'), (operator.mod, 'complex128'), - (operator.mod, '<M8[ns]'), (operator.mod, '<m8[ns]'), (operator.pow, 'bool')} if (op, dtype) in skip:
- [x] closes #21610 #8554 #12437 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry --- Hang up until `Block.eval` reformed.
https://api.github.com/repos/pandas-dev/pandas/pulls/22008
2018-07-21T09:13:26Z
2018-08-18T15:40:01Z
null
2018-08-18T15:40:01Z
core: fix DatetimeBlock operated with timedelta
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 1ac6d075946dd..52503f06c7e5c 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -443,7 +443,7 @@ Datetimelike Timedelta ^^^^^^^^^ -- +- Bug in :class:`DataFrame` with ``dtype='datetime64[ns]'`` when adding :class:`Timedelta` (:issue:`22005`) - - diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index ffa2267dd6877..a461d963f253a 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2713,6 +2713,11 @@ def _try_coerce_args(self, values, other): "naive Block") other_mask = isna(other) other = other.asm8.view('i8') + elif isinstance(other, (timedelta, np.timedelta64)): + if not isinstance(other, Timedelta): + other = Timedelta(other) + other_mask = isna(other) + other = other.asm8.view('i8') elif hasattr(other, 'dtype') and is_datetime64_dtype(other): other_mask = isna(other) other = other.astype('i8', copy=False).view('i8') diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index fb381a5640519..60951b30ffa10 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -1,4 +1,6 @@ # -*- coding: utf-8 -*- +import datetime + import pytest import numpy as np @@ -211,6 +213,23 @@ def test_df_sub_datetime64_not_ns(self): pd.Timedelta(days=2)]) tm.assert_frame_equal(res, expected) + @pytest.mark.parametrize('other', [ + pd.Timedelta('1d'), + datetime.timedelta(days=1), + np.timedelta64(1, 'D') + ]) + def test_timestamp_df_add_timedelta(self, other): + # GH 22005 + expected = pd.DataFrame([pd.Timestamp('2018-01-02')]) + result = pd.DataFrame([pd.Timestamp('2018-01-01')]) + other + tm.assert_frame_equal(result, expected) + + result = pd.DataFrame([pd.Timestamp('2018-01-03')]) - other + tm.assert_frame_equal(result, expected) + + result = other + pd.DataFrame([pd.Timestamp('2018-01-01')]) + tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize('data', [ [1, 2, 3], [1.1, 2.2, 3.3],
- [X] closes #22005 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/22007
2018-07-21T05:24:54Z
2018-08-18T12:58:05Z
null
2018-08-18T12:58:05Z
Fix for issue #21150, using a simple lock to prevent an issue with multiple threads accessing an Index
diff --git a/ci/azure-windows-27.yaml b/ci/azure-windows-27.yaml index bcd9ddee1715e..1bb8921df0f46 100644 --- a/ci/azure-windows-27.yaml +++ b/ci/azure-windows-27.yaml @@ -6,6 +6,7 @@ dependencies: - beautifulsoup4 - bottleneck - dateutil + - futures - gcsfs - html5lib - jinja2=2.8 diff --git a/ci/circle-27-compat.yaml b/ci/circle-27-compat.yaml index 84ec7e20fc8f1..2c2252713bdf2 100644 --- a/ci/circle-27-compat.yaml +++ b/ci/circle-27-compat.yaml @@ -5,6 +5,7 @@ channels: dependencies: - bottleneck=1.0.0 - cython=0.28.2 + - futures - jinja2=2.8 - numexpr=2.4.4 # we test that we correctly don't use an unsupported numexpr - numpy=1.9.3 diff --git a/ci/travis-27-locale.yaml b/ci/travis-27-locale.yaml index aca65f27d4187..d579ff0f297c3 100644 --- a/ci/travis-27-locale.yaml +++ b/ci/travis-27-locale.yaml @@ -5,6 +5,7 @@ channels: dependencies: - bottleneck=1.0.0 - cython=0.28.2 + - futures - lxml - matplotlib=1.4.3 - numpy=1.9.3 diff --git a/ci/travis-27.yaml b/ci/travis-27.yaml index cc0c5a3192188..3aef1caac73ac 100644 --- a/ci/travis-27.yaml +++ b/ci/travis-27.yaml @@ -8,6 +8,7 @@ dependencies: - cython=0.28.2 - fastparquet - feather-format + - futures - gcsfs - html5lib - ipython diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 3f76915655f58..6645f33c8d8b8 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -23,6 +23,18 @@ from pandas._libs import algos, hashtable as _hash from pandas._libs.tslibs import Timestamp, Timedelta, period as periodlib from pandas._libs.missing import checknull +# Python 2 vs Python 3 +try: + from thread import allocate_lock as _thread_allocate_lock +except ImportError: + try: + from _thread import allocate_lock as _thread_allocate_lock + except ImportError: + try: + from dummy_thread import allocate_lock as _thread_allocate_lock + except ImportError: + from _dummy_thread import allocate_lock as _thread_allocate_lock + cdef int64_t iNaT = util.get_nat() @@ -53,6 +65,9 @@ def get_value_box(arr: ndarray, loc: object) -> object: # Don't populate hash tables in monotonic indexes larger than this _SIZE_CUTOFF = 1000000 +# Used in _ensure_mapping_populated to ensure is_unique behaves correctly +# in multi-threaded code, see gh-21150 +_mapping_populated_lock = _thread_allocate_lock() cdef class IndexEngine: @@ -236,17 +251,17 @@ cdef class IndexEngine: cdef inline _ensure_mapping_populated(self): # this populates the mapping - # if its not already populated + # if it is not already populated # also satisfies the need_unique_check - if not self.is_mapping_populated: - - values = self._get_index_values() - self.mapping = self._make_hash_table(len(values)) - self._call_map_locations(values) + with _mapping_populated_lock: + if not self.is_mapping_populated: + values = self._get_index_values() + self.mapping = self._make_hash_table(len(values)) + self._call_map_locations(values) - if len(self.mapping) == len(values): - self.unique = 1 + if len(self.mapping) == len(values): + self.unique = 1 self.need_unique_check = 0 diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index a753e925b0ed8..00bbe9b90aadc 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -33,6 +33,8 @@ import pandas as pd from pandas._libs.tslib import Timestamp +from concurrent.futures import ThreadPoolExecutor + class TestIndex(Base): _holder = Index @@ -2509,6 +2511,21 @@ def test_ensure_index_from_sequences(self, data, names, expected): tm.assert_index_equal(result, expected) +class TestThreadSafety(object): + + @pytest.mark.slow + @pytest.mark.parametrize('execution_number', range(7)) + def test_isunique(self, execution_number): + """This test is executed seven times, each time it uses a pool of + two threads to run a test that is very likely to fail without the + fix for gh-21150. It is not a deterministic test, as there is + still a chance it will pass even though the bug exists. But + with the fix, it must always work with not issues.""" + x = pd.date_range('2001', '2020') + with ThreadPoolExecutor(2) as p: + assert all(p.map(lambda x: x.is_unique, [x] * 2)) + + @pytest.mark.parametrize('opname', ['eq', 'ne', 'le', 'lt', 'ge', 'gt', 'add', 'radd', 'sub', 'rsub', 'mul', 'rmul', 'truediv', 'rtruediv',
Hi, First time working with Cython, and after uni did very little C/C++, so feel free to be a bit more serious about reviewing this pull request. I am trying to practice more Python, and learn more about the code base of projects like Panda, and found the issue #21150, which happens a lot in Java (my main language), so decided to see how I'd go about fixing it. Used the same approach found in `strptime.pyx`, to add a lock to prevent two threads of accessing the `mapping` Hashtable in the `_ensure_mapping_populated` method. The problem occurred when two threads accessed the method around the same time, then one would create the `mapping` Hashtable instance, preventing the other from entering the `if` statement which also sets the `unique` flag to `true/1`. I didn't find any other part that could cause an obvious issue due to this lock, or another thread-safety issue. However, I can imagine that moving the logic maybe to the constructor so that we immediately know whether the index is unique or not, could fix it too, and remove the need of the lock, at the expense of changing the current design (I believe the index is not tightly coupled with the values passed... later, after certain calls, the mapping hashtable gets populated, which gives better performance I believe). Or, if core devs prefer to ask users to use a lock in their code base instead of implementing in Pandas, I'd be keen to close this PR and open a new one for the documentation with some note about it. Feel free to suggest any changes or even leave comments to educate me :-) The documentation for setting up the development environment and for contributing was super easy to follow, I hope I didn't forget anything. Thank you! Bruno - [ ] closes #21150 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/22006
2018-07-21T05:22:04Z
2018-11-23T03:35:20Z
null
2018-11-23T03:35:20Z
CLN: Remove Unneeded BlockManager methods
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 55f2e06a1a976..97cc7f96cb24f 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -32,6 +32,8 @@ cdef class BlockPlacement: def __init__(self, val): cdef slice slc + self._as_slice = None + self._as_array = None self._has_slice = False self._has_array = False @@ -144,6 +146,7 @@ cdef class BlockPlacement: other_int = <Py_ssize_t>other if other_int == 0: + # BlockPlacement is treated as immutable return self start, stop, step, l = slice_get_indices_ex(s) @@ -155,33 +158,21 @@ cdef class BlockPlacement: raise ValueError("iadd causes length change") if stop < 0: - self._as_slice = slice(start, None, step) + val = slice(start, None, step) else: - self._as_slice = slice(start, stop, step) + val = slice(start, stop, step) - self._has_array = False - self._as_array = None + return BlockPlacement(val) else: newarr = self.as_array + other if (newarr < 0).any(): raise ValueError("iadd causes length change") - self._as_array = newarr - self._has_array = True - self._has_slice = False - self._as_slice = None - - return self - - cdef BlockPlacement copy(self): - cdef slice s = self._ensure_has_slice() - if s is not None: - return BlockPlacement(s) - else: - return BlockPlacement(self._as_array) + val = newarr + return BlockPlacement(val) def add(self, other): - return self.copy().iadd(other) + return self.iadd(other) def sub(self, other): return self.add(-other) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 8ad569003a43a..e7b7cb463a27b 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -82,7 +82,6 @@ class BlockManager(PandasObject): get_slice(slice_like, axis) get(label) iget(loc) - get_scalar(label_tup) take(indexer, axis) reindex_axis(new_labels, axis) @@ -993,21 +992,6 @@ def iget(self, i, fastpath=True): ndim=1)], self.axes[1]) - def get_scalar(self, tup): - """ - Retrieve single item - """ - full_loc = [ax.get_loc(x) for ax, x in zip(self.axes, tup)] - blk = self.blocks[self._blknos[full_loc[0]]] - values = blk.values - - # FIXME: this may return non-upcasted types? - if values.ndim == 1: - return values[full_loc[1]] - - full_loc[0] = self._blklocs[full_loc[0]] - return values[tuple(full_loc)] - def delete(self, item): """ Delete selected item (items if non-unique) in-place. @@ -1382,9 +1366,9 @@ def take(self, indexer, axis=1, verify=True, convert=True): axis=axis, allow_dups=True) def merge(self, other, lsuffix='', rsuffix=''): - if not self._is_indexed_like(other): - raise AssertionError('Must have same axes to merge managers') - + # We assume at this point that the axes of self and other match. + # This is only called from Panel.join, which reindexes prior + # to calling to ensure this assumption holds. l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix, right=other.items, rsuffix=rsuffix) new_items = _concat_indexes([l, r]) @@ -1402,19 +1386,6 @@ def merge(self, other, lsuffix='', rsuffix=''): return self.__class__(_consolidate(new_blocks), new_axes) - def _is_indexed_like(self, other): - """ - Check all axes except items - """ - if self.ndim != other.ndim: - raise AssertionError( - 'Number of dimensions must agree got {ndim} and ' - '{oth_ndim}'.format(ndim=self.ndim, oth_ndim=other.ndim)) - for ax, oax in zip(self.axes[1:], other.axes[1:]): - if not ax.equals(oax): - return False - return True - def equals(self, other): self_axes, other_axes = self.axes, other.axes if len(self_axes) != len(other_axes): diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 39418fb72bf4a..0b06775326ab1 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -329,17 +329,6 @@ def test_is_mixed_dtype(self): assert create_mgr('a,b:f8; c,d: f4').is_mixed_type assert create_mgr('a,b:f8; c,d: object').is_mixed_type - def test_is_indexed_like(self): - mgr1 = create_mgr('a,b: f8') - mgr2 = create_mgr('a:i8; b:bool') - mgr3 = create_mgr('a,b,c: f8') - assert mgr1._is_indexed_like(mgr1) - assert mgr1._is_indexed_like(mgr2) - assert mgr1._is_indexed_like(mgr3) - - assert not mgr1._is_indexed_like(mgr1.get_slice( - slice(-1), axis=1)) - def test_duplicate_ref_loc_failure(self): tmp_mgr = create_mgr('a:bool; a: f8') @@ -396,15 +385,6 @@ def test_categorical_block_pickle(self): smgr2 = tm.round_trip_pickle(smgr) assert_series_equal(Series(smgr), Series(smgr2)) - def test_get_scalar(self, mgr): - for item in mgr.items: - for i, index in enumerate(mgr.axes[1]): - res = mgr.get_scalar((item, index)) - exp = mgr.get(item, fastpath=False)[i] - assert res == exp - exp = mgr.get(item).internal_values()[i] - assert res == exp - def test_get(self): cols = Index(list('abc')) values = np.random.rand(3, 3)
BlockManager.get_scalar is never used outside of tests, BlockManager._is_indexed_like is only ever called from `merge`, which is only called once in Panel, before which the indexed_like check is already done. Also BlockPlacements.iadd was more complicated than it needed to be, so tore out bits of that.
https://api.github.com/repos/pandas-dev/pandas/pulls/22002
2018-07-20T19:09:19Z
2018-07-26T12:41:33Z
2018-07-26T12:41:33Z
2018-07-26T16:20:56Z
CLN: De-privatize core.common funcs, remove unused
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 137fd5aafe5bd..8cb384f50d371 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -390,7 +390,7 @@ Removal of prior version deprecations/changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - The ``LongPanel`` and ``WidePanel`` classes have been removed (:issue:`10892`) -- +- Several private functions were removed from the (non-public) module ``pandas.core.common`` (:issue:`22001`) - - diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 78c9113ce60de..49705cb6d9ad2 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -262,7 +262,7 @@ def match(to_match, values, na_sentinel=-1): ------- match : ndarray of integers """ - values = com._asarray_tuplesafe(values) + values = com.asarray_tuplesafe(values) htable, _, values, dtype, ndtype = _get_hashtable_algo(values) to_match, _, _ = _ensure_data(to_match, dtype) table = htable(min(len(to_match), 1000000)) @@ -412,7 +412,7 @@ def isin(comps, values): # handle categoricals return comps._values.isin(values) - comps = com._values_from_object(comps) + comps = com.values_from_object(comps) comps, dtype, _ = _ensure_data(comps) values, _, _ = _ensure_data(values, dtype=dtype) diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 0d73b2c60d76d..4584e4694cdc5 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -43,6 +43,7 @@ import pandas.core.algorithms as algorithms +from pandas.io.formats import console from pandas.io.formats.terminal import get_terminal_size from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs from pandas.core.config import get_option @@ -1887,7 +1888,7 @@ def _repr_categories_info(self): length=len(self.categories), dtype=dtype) width, height = get_terminal_size() max_width = get_option("display.width") or width - if com.in_ipython_frontend(): + if console.in_ipython_frontend(): # 0 = no breaks max_width = 0 levstring = "" diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 78e6d1f222160..29f97b344f267 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -120,7 +120,7 @@ def wrapper(self, other): self._assert_tzawareness_compat(other) result = meth(self, np.asarray(other)) - result = com._values_from_object(result) + result = com.values_from_object(result) # Make sure to pass an array to result[...]; indexing with # Series breaks with older version of numpy diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 2c8853dec4f69..5ecc79e030f56 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -984,7 +984,7 @@ def __array__(self, dtype=None): examples='', )) def to_tuples(self, na_tuple=True): - tuples = com._asarray_tuplesafe(zip(self.left, self.right)) + tuples = com.asarray_tuplesafe(zip(self.left, self.right)) if not na_tuple: # GH 18756 tuples = np.where(~self.isna(), tuples, np.nan) diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index cb5afa34add2a..9c98f73312dbf 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -167,7 +167,7 @@ def _generate_range(cls, start, end, periods, freq, fields): freq = Period._maybe_convert_freq(freq) field_count = len(fields) - if com._count_not_none(start, end) > 0: + if com.count_not_none(start, end) > 0: if field_count > 0: raise ValueError('Can either instantiate from fields ' 'or endpoints, but not both') @@ -392,7 +392,7 @@ def _maybe_convert_timedelta(self, other): # Constructor Helpers def _get_ordinal_range(start, end, periods, freq, mult=1): - if com._count_not_none(start, end, periods) != 2: + if com.count_not_none(start, end, periods) != 2: raise ValueError('Of the three parameters: start, end, and periods, ' 'exactly two must be specified') diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index efa7c0b0e44d4..cc93644677463 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -81,7 +81,7 @@ def wrapper(self, other): else: other = type(self)(other).values result = meth(self, other) - result = com._values_from_object(result) + result = com.values_from_object(result) o_mask = np.array(isna(other)) if o_mask.any(): @@ -150,7 +150,7 @@ def __new__(cls, values, freq=None, start=None, end=None, periods=None, @classmethod def _generate_range(cls, start, end, periods, freq, closed=None, **kwargs): # **kwargs are for compat with TimedeltaIndex, which includes `name` - if com._count_not_none(start, end, periods, freq) != 3: + if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, ' 'and freq, exactly three must be specified') diff --git a/pandas/core/base.py b/pandas/core/base.py index 1226662824eb5..5382315bad32b 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -581,7 +581,7 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): results.append(colg.aggregate(a)) # make sure we find a good name - name = com._get_callable_name(a) or a + name = com.get_callable_name(a) or a keys.append(name) except (TypeError, DataError): pass @@ -856,7 +856,7 @@ def tolist(self): numpy.ndarray.tolist """ if is_datetimelike(self._values): - return [com._maybe_box_datetimelike(x) for x in self._values] + return [com.maybe_box_datetimelike(x) for x in self._values] elif is_extension_array_dtype(self._values): return list(self._values) else: diff --git a/pandas/core/common.py b/pandas/core/common.py index 0ca776b6bfa77..0350b338f2bee 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -1,5 +1,7 @@ """ Misc tools for implementing data structures + +Note: pandas.core.common is *not* part of the public API. """ from datetime import datetime, timedelta @@ -11,8 +13,7 @@ from pandas._libs import lib, tslibs from pandas import compat -from pandas.compat import long, zip, iteritems, PY36, OrderedDict -from pandas.core.config import get_option +from pandas.compat import iteritems, PY36, OrderedDict from pandas.core.dtypes.generic import ABCSeries, ABCIndex, ABCIndexClass from pandas.core.dtypes.common import is_integer from pandas.core.dtypes.inference import _iterable_not_string @@ -52,7 +53,7 @@ def flatten(l): yield el -def _consensus_name_attr(objs): +def consensus_name_attr(objs): name = objs[0].name for obj in objs[1:]: try: @@ -63,7 +64,8 @@ def _consensus_name_attr(objs): return name -def _get_info_slice(obj, indexer): +# TODO: only used once in frame.py; belongs elsewhere? +def get_info_slice(obj, indexer): """Slice the info axis of `obj` with `indexer`.""" if not hasattr(obj, '_info_axis_number'): msg = 'object of type {typ!r} has no info axis' @@ -73,7 +75,7 @@ def _get_info_slice(obj, indexer): return tuple(slices) -def _maybe_box(indexer, values, obj, key): +def maybe_box(indexer, values, obj, key): # if we have multiples coming back, box em if isinstance(values, np.ndarray): @@ -83,7 +85,7 @@ def _maybe_box(indexer, values, obj, key): return values -def _maybe_box_datetimelike(value): +def maybe_box_datetimelike(value): # turn a datetime like into a Timestamp/timedelta as needed if isinstance(value, (np.datetime64, datetime)): @@ -94,13 +96,13 @@ def _maybe_box_datetimelike(value): return value -_values_from_object = lib.values_from_object +values_from_object = lib.values_from_object def is_bool_indexer(key): if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)): if key.dtype == np.object_: - key = np.asarray(_values_from_object(key)) + key = np.asarray(values_from_object(key)) if not lib.is_bool_array(key): if isna(key).any(): @@ -120,19 +122,6 @@ def is_bool_indexer(key): return False -def _mut_exclusive(**kwargs): - item1, item2 = kwargs.items() - label1, val1 = item1 - label2, val2 = item2 - if val1 is not None and val2 is not None: - msg = 'mutually exclusive arguments: {label1!r} and {label2!r}' - raise TypeError(msg.format(label1=label1, label2=label2)) - elif val1 is not None: - return val1 - else: - return val2 - - def _not_none(*args): """Returns a generator consisting of the arguments that are not None""" return (arg for arg in args if arg is not None) @@ -170,12 +159,12 @@ def _all_not_none(*args): return True -def _count_not_none(*args): +def count_not_none(*args): """Returns the count of arguments that are not None""" return sum(x is not None for x in args) -def _try_sort(iterable): +def try_sort(iterable): listed = list(iterable) try: return sorted(listed) @@ -183,117 +172,17 @@ def _try_sort(iterable): return listed -def _dict_keys_to_ordered_list(mapping): +def dict_keys_to_ordered_list(mapping): # when pandas drops support for Python < 3.6, this function # can be replaced by a simple list(mapping.keys()) if PY36 or isinstance(mapping, OrderedDict): keys = list(mapping.keys()) else: - keys = _try_sort(mapping) + keys = try_sort(mapping) return keys -def iterpairs(seq): - """ - Parameters - ---------- - seq : sequence - - Returns - ------- - iterator returning overlapping pairs of elements - - Examples - -------- - >>> list(iterpairs([1, 2, 3, 4])) - [(1, 2), (2, 3), (3, 4)] - """ - # input may not be sliceable - seq_it = iter(seq) - seq_it_next = iter(seq) - next(seq_it_next) - - return zip(seq_it, seq_it_next) - - -def split_ranges(mask): - """ Generates tuples of ranges which cover all True value in mask - - >>> list(split_ranges([1,0,0,1,0])) - [(0, 1), (3, 4)] - """ - ranges = [(0, len(mask))] - - for pos, val in enumerate(mask): - if not val: # this pos should be omitted, split off the prefix range - r = ranges.pop() - if pos > r[0]: # yield non-zero range - yield (r[0], pos) - if pos + 1 < len(mask): # save the rest for processing - ranges.append((pos + 1, len(mask))) - if ranges: - yield ranges[-1] - - -def _long_prod(vals): - result = long(1) - for x in vals: - result *= x - return result - - -class groupby(dict): - """ - A simple groupby different from the one in itertools. - - Does not require the sequence elements to be sorted by keys, - however it is slower. - """ - - def __init__(self, seq, key=lambda x: x): - for value in seq: - k = key(value) - self.setdefault(k, []).append(value) - - try: - __iter__ = dict.iteritems - except AttributeError: # pragma: no cover - # Python 3 - def __iter__(self): - return iter(dict.items(self)) - - -def map_indices_py(arr): - """ - Returns a dictionary with (element, index) pairs for each element in the - given array/list - """ - return {x: i for i, x in enumerate(arr)} - - -def union(*seqs): - result = set([]) - for seq in seqs: - if not isinstance(seq, set): - seq = set(seq) - result |= seq - return type(seqs[0])(list(result)) - - -def difference(a, b): - return type(a)(list(set(a) - set(b))) - - -def intersection(*seqs): - result = set(seqs[0]) - for seq in seqs: - if not isinstance(seq, set): - seq = set(seq) - result &= seq - return type(seqs[0])(list(result)) - - -def _asarray_tuplesafe(values, dtype=None): +def asarray_tuplesafe(values, dtype=None): if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')): values = list(values) @@ -317,7 +206,7 @@ def _asarray_tuplesafe(values, dtype=None): return result -def _index_labels_to_array(labels, dtype=None): +def index_labels_to_array(labels, dtype=None): """ Transform label or iterable of labels to array, for use in Index. @@ -339,12 +228,12 @@ def _index_labels_to_array(labels, dtype=None): except TypeError: # non-iterable labels = [labels] - labels = _asarray_tuplesafe(labels, dtype=dtype) + labels = asarray_tuplesafe(labels, dtype=dtype) return labels -def _maybe_make_list(obj): +def maybe_make_list(obj): if obj is not None and not isinstance(obj, (tuple, list)): return [obj] return obj @@ -363,19 +252,20 @@ def is_true_slices(l): return [isinstance(k, slice) and not is_null_slice(k) for k in l] +# TODO: used only once in indexing; belongs elsewhere? def is_full_slice(obj, l): """ we have a full length slice """ return (isinstance(obj, slice) and obj.start == 0 and obj.stop == l and obj.step is None) -def _get_callable_name(obj): +def get_callable_name(obj): # typical case has name if hasattr(obj, '__name__'): return getattr(obj, '__name__') # some objects don't; could recurse if isinstance(obj, partial): - return _get_callable_name(obj.func) + return get_callable_name(obj.func) # fall back to class name if hasattr(obj, '__call__'): return obj.__class__.__name__ @@ -386,7 +276,7 @@ def _get_callable_name(obj): return None -def _apply_if_callable(maybe_callable, obj, **kwargs): +def apply_if_callable(maybe_callable, obj, **kwargs): """ Evaluate possibly callable input using obj and kwargs if it is callable, otherwise return as it is @@ -404,7 +294,7 @@ def _apply_if_callable(maybe_callable, obj, **kwargs): return maybe_callable -def _dict_compat(d): +def dict_compat(d): """ Helper function to convert datetimelike-keyed dicts to Timestamp-keyed dict @@ -417,7 +307,7 @@ def _dict_compat(d): dict """ - return dict((_maybe_box_datetimelike(key), value) + return dict((maybe_box_datetimelike(key), value) for key, value in iteritems(d)) @@ -464,78 +354,7 @@ class Sentinel(object): return Sentinel() -# ---------------------------------------------------------------------- -# Detect our environment - -def in_interactive_session(): - """ check if we're running in an interactive shell - - returns True if running under python/ipython interactive shell - """ - - def check_main(): - import __main__ as main - return (not hasattr(main, '__file__') or - get_option('mode.sim_interactive')) - - try: - return __IPYTHON__ or check_main() # noqa - except: - return check_main() - - -def in_qtconsole(): - """ - check if we're inside an IPython qtconsole - - .. deprecated:: 0.14.1 - This is no longer needed, or working, in IPython 3 and above. - """ - try: - ip = get_ipython() # noqa - front_end = ( - ip.config.get('KernelApp', {}).get('parent_appname', "") or - ip.config.get('IPKernelApp', {}).get('parent_appname', "")) - if 'qtconsole' in front_end.lower(): - return True - except: - return False - return False - - -def in_ipnb(): - """ - check if we're inside an IPython Notebook - - .. deprecated:: 0.14.1 - This is no longer needed, or working, in IPython 3 and above. - """ - try: - ip = get_ipython() # noqa - front_end = ( - ip.config.get('KernelApp', {}).get('parent_appname', "") or - ip.config.get('IPKernelApp', {}).get('parent_appname', "")) - if 'notebook' in front_end.lower(): - return True - except: - return False - return False - - -def in_ipython_frontend(): - """ - check if we're inside an an IPython zmq frontend - """ - try: - ip = get_ipython() # noqa - return 'zmq' in str(type(ip)).lower() - except: - pass - - return False - - -def _random_state(state=None): +def random_state(state=None): """ Helper function for processing random_state arguments. @@ -564,7 +383,8 @@ def _random_state(state=None): "RandomState, or None") -def _get_distinct_objs(objs): +# TODO: only used once in indexes.api; belongs elsewhere? +def get_distinct_objs(objs): """ Return a list with distinct elements of "objs" (different ids). Preserves order. diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 781101f5804e6..ac552e7b80de3 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -123,8 +123,8 @@ def _evaluate_numexpr(op, op_str, a, b, truediv=True, def _where_standard(cond, a, b): - return np.where(com._values_from_object(cond), com._values_from_object(a), - com._values_from_object(b)) + return np.where(com.values_from_object(cond), com.values_from_object(a), + com.values_from_object(b)) def _where_numexpr(cond, a, b): diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 26eefa75b2675..2bd1b0c5b3507 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -190,7 +190,7 @@ def stringify(value): v = _coerce_scalar_to_timedelta_type(v, unit='s').value return TermValue(int(v), v, kind) elif meta == u('category'): - metadata = com._values_from_object(self.metadata) + metadata = com.values_from_object(self.metadata) result = metadata.searchsorted(v, side='left') # result returns 0 if v is first element or if v is not in metadata diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 873170eb9813b..078e176ff2b99 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -466,7 +466,7 @@ def _init_dict(self, data, index, columns, dtype=None): arrays.loc[missing] = [v] * missing.sum() else: - keys = com._dict_keys_to_ordered_list(data) + keys = com.dict_keys_to_ordered_list(data) columns = data_names = Index(keys) arrays = [data[k] for k in keys] @@ -617,11 +617,11 @@ def _repr_fits_horizontal_(self, ignore_width=False): # used by repr_html under IPython notebook or scripts ignore terminal # dims - if ignore_width or not com.in_interactive_session(): + if ignore_width or not console.in_interactive_session(): return True if (get_option('display.width') is not None or - com.in_ipython_frontend()): + console.in_ipython_frontend()): # check at least the column row for excessive width max_rows = 1 else: @@ -689,7 +689,7 @@ def _repr_html_(self): # XXX: In IPython 3.x and above, the Qt console will not attempt to # display HTML, so this check can be removed when support for # IPython 2.x is no longer needed. - if com.in_qtconsole(): + if console.in_qtconsole(): # 'HTML output is disabled in QtConsole' return None @@ -1100,13 +1100,13 @@ def to_dict(self, orient='dict', into=dict): return into_c((('index', self.index.tolist()), ('columns', self.columns.tolist()), ('data', lib.map_infer(self.values.ravel(), - com._maybe_box_datetimelike) + com.maybe_box_datetimelike) .reshape(self.values.shape).tolist()))) elif orient.lower().startswith('s'): - return into_c((k, com._maybe_box_datetimelike(v)) + return into_c((k, com.maybe_box_datetimelike(v)) for k, v in compat.iteritems(self)) elif orient.lower().startswith('r'): - return [into_c((k, com._maybe_box_datetimelike(v)) + return [into_c((k, com.maybe_box_datetimelike(v)) for k, v in zip(self.columns, np.atleast_1d(row))) for row in self.values] elif orient.lower().startswith('i'): @@ -2614,7 +2614,7 @@ def _get_value(self, index, col, takeable=False): if takeable: series = self._iget_item_cache(col) - return com._maybe_box_datetimelike(series._values[index]) + return com.maybe_box_datetimelike(series._values[index]) series = self._get_item_cache(col) engine = self.index._engine @@ -2746,7 +2746,7 @@ def _ixs(self, i, axis=0): return result def __getitem__(self, key): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) # shortcut if the key is in columns try: @@ -3183,7 +3183,7 @@ def is_dtype_instance_mapper(idx, dtype): exclude_these.iloc[idx] = not any(map(f, exclude)) dtype_indexer = include_these & exclude_these - return self.loc[com._get_info_slice(self, dtype_indexer)] + return self.loc[com.get_info_slice(self, dtype_indexer)] def _box_item_values(self, key, values): items = self.columns[self.columns.get_loc(key)] @@ -3198,7 +3198,7 @@ def _box_col_values(self, values, items): return klass(values, index=self.index, name=items, fastpath=True) def __setitem__(self, key, value): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) # see if we can slice the rows indexer = convert_to_index_sliceable(self, key) @@ -3403,12 +3403,12 @@ def assign(self, **kwargs): # >= 3.6 preserve order of kwargs if PY36: for k, v in kwargs.items(): - data[k] = com._apply_if_callable(v, data) + data[k] = com.apply_if_callable(v, data) else: # <= 3.5: do all calculations first... results = OrderedDict() for k, v in kwargs.items(): - results[k] = com._apply_if_callable(v, data) + results[k] = com.apply_if_callable(v, data) # <= 3.5 and earlier results = sorted(results.items()) @@ -3489,7 +3489,7 @@ def reindexer(value): if isinstance(value, list) and len(value) > 0: value = maybe_convert_platform(value) else: - value = com._asarray_tuplesafe(value) + value = com.asarray_tuplesafe(value) elif value.ndim == 2: value = value.copy().T elif isinstance(value, Index): @@ -7827,7 +7827,7 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): else: indexer = indexer_cache[id(index)] = index.get_indexer(columns) - values = com._values_from_object(s) + values = com.values_from_object(s) aligned_values.append(algorithms.take_1d(values, indexer)) values = np.vstack(aligned_values) @@ -7915,7 +7915,7 @@ def _homogenize(data, index, dtype=None): oindex = index.astype('O') if isinstance(index, (DatetimeIndex, TimedeltaIndex)): - v = com._dict_compat(v) + v = com.dict_compat(v) else: v = dict(v) v = lib.fast_multiget(v, oindex.values, default=np.nan) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index fa4572dd7b979..fd3d2a5802413 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1090,7 +1090,7 @@ def rename(self, *args, **kwargs): raise TypeError('rename() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) - if com._count_not_none(*axes.values()) == 0: + if com.count_not_none(*axes.values()) == 0: raise TypeError('must pass an index to rename') # renamer function if passed a dict @@ -1265,7 +1265,7 @@ def _indexed_same(self, other): for a in self._AXIS_ORDERS) def __neg__(self): - values = com._values_from_object(self) + values = com.values_from_object(self) if is_bool_dtype(values): arr = operator.inv(values) elif (is_numeric_dtype(values) or is_timedelta64_dtype(values) @@ -1277,7 +1277,7 @@ def __neg__(self): return self.__array_wrap__(arr) def __pos__(self): - values = com._values_from_object(self) + values = com.values_from_object(self) if (is_bool_dtype(values) or is_period_arraylike(values)): arr = values elif (is_numeric_dtype(values) or is_timedelta64_dtype(values) @@ -1290,7 +1290,7 @@ def __pos__(self): def __invert__(self): try: - arr = operator.inv(com._values_from_object(self)) + arr = operator.inv(com.values_from_object(self)) return self.__array_wrap__(arr) except Exception: @@ -1587,7 +1587,7 @@ def _drop_labels_or_levels(self, keys, axis=0): .format(type=type(self))) # Validate keys - keys = com._maybe_make_list(keys) + keys = com.maybe_make_list(keys) invalid_keys = [k for k in keys if not self._is_label_or_level_reference(k, axis=axis)] @@ -1753,7 +1753,7 @@ def __round__(self, decimals=0): # Array Interface def __array__(self, dtype=None): - return com._values_from_object(self) + return com.values_from_object(self) def __array_wrap__(self, result, context=None): d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) @@ -3188,7 +3188,7 @@ def xs(self, key, axis=0, level=None, drop_level=True): # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) if not is_list_like(new_values) or self.ndim == 1: - return com._maybe_box_datetimelike(new_values) + return com.maybe_box_datetimelike(new_values) result = self._constructor_sliced( new_values, index=self.columns, @@ -3328,7 +3328,7 @@ def _drop_axis(self, labels, axis, level=None, errors='raise'): # Case for non-unique axis else: - labels = ensure_object(com._index_labels_to_array(labels)) + labels = ensure_object(com.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex') @@ -3893,7 +3893,7 @@ def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, def _needs_reindex_multi(self, axes, method, level): """Check if we do need a multi reindex.""" - return ((com._count_not_none(*axes.values()) == self._AXIS_LEN) and + return ((com.count_not_none(*axes.values()) == self._AXIS_LEN) and method is None and level is None and not self._is_mixed_type) def _reindex_multi(self, axes, copy, fill_value): @@ -4067,7 +4067,7 @@ def filter(self, items=None, like=None, regex=None, axis=None): """ import re - nkw = com._count_not_none(items, like, regex) + nkw = com.count_not_none(items, like, regex) if nkw > 1: raise TypeError('Keyword arguments `items`, `like`, or `regex` ' 'are mutually exclusive') @@ -4313,7 +4313,7 @@ def sample(self, n=None, frac=None, replace=False, weights=None, axis_length = self.shape[axis] # Process random_state argument - rs = com._random_state(random_state) + rs = com.random_state(random_state) # Check weights for compliance if weights is not None: @@ -7745,7 +7745,7 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, inplace = validate_bool_kwarg(inplace, 'inplace') # align the cond to same shape as myself - cond = com._apply_if_callable(cond, self) + cond = com.apply_if_callable(cond, self) if isinstance(cond, NDFrame): cond, _ = cond.align(self, join='right', broadcast_axis=1) else: @@ -7815,7 +7815,7 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, if try_quick: try: - new_other = com._values_from_object(self) + new_other = com.values_from_object(self) new_other = new_other.copy() new_other[icond] = other other = new_other @@ -8012,7 +8012,7 @@ def where(self, cond, other=np.nan, inplace=False, axis=None, level=None, else: errors = 'ignore' - other = com._apply_if_callable(other, self) + other = com.apply_if_callable(other, self) return self._where(cond, other, inplace, axis, level, errors=errors, try_cast=try_cast) @@ -8034,7 +8034,7 @@ def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None, errors = 'ignore' inplace = validate_bool_kwarg(inplace, 'inplace') - cond = com._apply_if_callable(cond, self) + cond = com.apply_if_callable(cond, self) # see gh-21891 if not hasattr(cond, "__invert__"): @@ -8982,7 +8982,7 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, **kwargs)) - 1) rs = rs.reindex_like(data) if freq is None: - mask = isna(com._values_from_object(data)) + mask = isna(com.values_from_object(data)) np.putmask(rs.values, mask, np.nan) return rs @@ -9913,7 +9913,7 @@ def cum_func(self, axis=None, skipna=True, *args, **kwargs): else: axis = self._get_axis_number(axis) - y = com._values_from_object(self).copy() + y = com.values_from_object(self).copy() if (skipna and issubclass(y.dtype.type, (np.datetime64, np.timedelta64))): diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 169416d6f8211..fdededc325b03 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -819,7 +819,7 @@ def _aggregate_multiple_funcs(self, arg, _level): columns.append(f) else: # protect against callables without names - columns.append(com._get_callable_name(f)) + columns.append(com.get_callable_name(f)) arg = lzip(columns, arg) results = {} diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index 3070fa0e63c88..36cdfbd3b3479 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -288,7 +288,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, self.grouper = self.obj[self.name] elif isinstance(self.grouper, (list, tuple)): - self.grouper = com._asarray_tuplesafe(self.grouper) + self.grouper = com.asarray_tuplesafe(self.grouper) # a passed Categorical elif is_categorical_dtype(self.grouper): @@ -533,7 +533,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True, if not any_callable and not all_in_columns_index and \ not any_arraylike and not any_groupers and \ match_axis_length and level is None: - keys = [com._asarray_tuplesafe(keys)] + keys = [com.asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index f2c55a56b119d..38ac144ac6c95 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -175,7 +175,7 @@ def apply(self, f, data, axis=0): group_keys = self._get_group_keys() # oh boy - f_name = com._get_callable_name(f) + f_name = com.get_callable_name(f) if (f_name not in base.plotting_methods and hasattr(splitter, 'fast_apply') and axis == 0): try: @@ -209,7 +209,7 @@ def indices(self): return self.groupings[0].indices else: label_list = [ping.labels for ping in self.groupings] - keys = [com._values_from_object(ping.group_index) + keys = [com.values_from_object(ping.group_index) for ping in self.groupings] return get_indexer_dict(label_list, keys) diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index b409d695a73e8..3f3448d104165 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -55,7 +55,7 @@ def _get_objs_combined_axis(objs, intersect=False, axis=0, sort=True): def _get_combined_index(indexes, intersect=False, sort=False): # TODO: handle index names! - indexes = com._get_distinct_objs(indexes) + indexes = com.get_distinct_objs(indexes) if len(indexes) == 0: index = Index([]) elif len(indexes) == 1: @@ -130,7 +130,7 @@ def _sanitize_and_check(indexes): if list in kinds: if len(kinds) > 1: - indexes = [Index(com._try_sort(x)) + indexes = [Index(com.try_sort(x)) if not isinstance(x, Index) else x for x in indexes] kinds.remove(list) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index cf4b4fe6bc084..20926ea5163af 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -381,9 +381,9 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data): subarr = data.astype('object') else: - subarr = com._asarray_tuplesafe(data, dtype=object) + subarr = com.asarray_tuplesafe(data, dtype=object) - # _asarray_tuplesafe does not always copy underlying data, + # asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens if copy: subarr = subarr.copy() @@ -449,7 +449,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, return MultiIndex.from_tuples( data, names=name or kwargs.get('names')) # other iterable of some kind - subarr = com._asarray_tuplesafe(data, dtype=object) + subarr = com.asarray_tuplesafe(data, dtype=object) return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs) """ @@ -1706,7 +1706,7 @@ def _convert_listlike_indexer(self, keyarr, kind=None): @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): - keyarr = com._asarray_tuplesafe(keyarr) + keyarr = com.asarray_tuplesafe(keyarr) return keyarr _index_shared_docs['_convert_index_indexer'] = """ @@ -2001,7 +2001,7 @@ def __getitem__(self, key): if com.is_bool_indexer(key): key = np.asarray(key) - key = com._values_from_object(key) + key = com.values_from_object(key) result = getitem(key) if not is_scalar(result): return promote(result) @@ -2367,8 +2367,8 @@ def equals(self, other): return other.equals(self) try: - return array_equivalent(com._values_from_object(self), - com._values_from_object(other)) + return array_equivalent(com.values_from_object(self), + com.values_from_object(other)) except Exception: return False @@ -3072,8 +3072,8 @@ def get_value(self, series, key): elif is_integer(key): return s[key] - s = com._values_from_object(series) - k = com._values_from_object(key) + s = com.values_from_object(series) + k = com.values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') try: @@ -3106,8 +3106,8 @@ def set_value(self, arr, key, value): Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ - self._engine.set_value(com._values_from_object(arr), - com._values_from_object(key), value) + self._engine.set_value(com.values_from_object(arr), + com.values_from_object(key), value) def _get_level_values(self, level): """ @@ -4432,7 +4432,7 @@ def drop(self, labels, errors='raise'): If not all of the labels are found in the selected axis """ arr_dtype = 'object' if self.dtype == 'object' else None - labels = com._index_labels_to_array(labels, dtype=arr_dtype) + labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): @@ -4725,7 +4725,7 @@ def _validate_for_numeric_binop(self, other, op): if len(self) != len(other): raise ValueError("cannot evaluate a numeric op with " "unequal lengths") - other = com._values_from_object(other) + other = com.values_from_object(other) if other.dtype.kind not in ['f', 'i', 'u']: raise TypeError("cannot evaluate a numeric op " "with a non-numeric dtype") diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index a03e478f81caf..d76a7ef00f625 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -440,7 +440,7 @@ def get_value(self, series, key): know what you're doing """ try: - k = com._values_from_object(key) + k = com.values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') indexer = self.get_loc(k) return series.iloc[indexer] @@ -629,7 +629,7 @@ def _convert_list_indexer(self, keyarr, kind=None): @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): - keyarr = com._asarray_tuplesafe(keyarr) + keyarr = com.asarray_tuplesafe(keyarr) if self.categories._defer_to_indexing: return keyarr diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 6ed752d3a213d..933e7406b5af3 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -377,7 +377,7 @@ def __new__(cls, data=None, @classmethod def _generate_range(cls, start, end, periods, name, freq, tz=None, normalize=False, ambiguous='raise', closed=None): - if com._count_not_none(start, end, periods, freq) != 3: + if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, ' 'and freq, exactly three must be specified') @@ -1276,8 +1276,8 @@ def get_value(self, series, key): return series.take(locs) try: - return com._maybe_box(self, Index.get_value(self, series, key), - series, key) + return com.maybe_box(self, Index.get_value(self, series, key), + series, key) except KeyError: try: loc = self._get_string_slice(key) @@ -1296,9 +1296,9 @@ def get_value_maybe_box(self, series, key): key = Timestamp(key, tz=self.tz) elif not isinstance(key, Timestamp): key = Timestamp(key) - values = self._engine.get_value(com._values_from_object(series), + values = self._engine.get_value(com.values_from_object(series), key, tz=self.tz) - return com._maybe_box(self, values, series, key) + return com.maybe_box(self, values, series, key) def get_loc(self, key, method=None, tolerance=None): """ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index e92f980caf3dc..246bd3d541b72 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1120,14 +1120,14 @@ def interval_range(start=None, end=None, periods=None, freq=None, -------- IntervalIndex : an Index of intervals that are all closed on the same side. """ - start = com._maybe_box_datetimelike(start) - end = com._maybe_box_datetimelike(end) + start = com.maybe_box_datetimelike(start) + end = com.maybe_box_datetimelike(end) endpoint = start if start is not None else end if freq is None and com._any_none(periods, start, end): freq = 1 if is_number(endpoint) else 'D' - if com._count_not_none(start, end, periods, freq) != 3: + if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, and ' 'freq, exactly three must be specified') diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 0d4ceb2783bad..7d24a901382bb 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -950,8 +950,8 @@ def get_value(self, series, key): from pandas.core.indexing import maybe_droplevels # Label-based - s = com._values_from_object(series) - k = com._values_from_object(key) + s = com.values_from_object(series) + k = com.values_from_object(key) def _try_mi(k): # TODO: what if a level contains tuples?? @@ -1691,7 +1691,7 @@ def drop(self, labels, level=None, errors='raise'): try: if not isinstance(labels, (np.ndarray, Index)): - labels = com._index_labels_to_array(labels) + labels = com.index_labels_to_array(labels) indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): @@ -1730,7 +1730,7 @@ def drop(self, labels, level=None, errors='raise'): return self.delete(inds) def _drop_from_level(self, labels, level): - labels = com._index_labels_to_array(labels) + labels = com.index_labels_to_array(labels) i = self._get_level_number(level) index = self.levels[i] values = index.get_indexer(labels) @@ -2628,7 +2628,7 @@ def equals(self, other): return False if not isinstance(other, MultiIndex): - other_vals = com._values_from_object(ensure_index(other)) + other_vals = com.values_from_object(ensure_index(other)) return array_equivalent(self._ndarray_values, other_vals) if self.nlevels != other.nlevels: diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 1fe0c8fa289e6..ea392d0b93377 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -249,9 +249,9 @@ def _convert_arr_indexer(self, keyarr): # Cast the indexer to uint64 if possible so # that the values returned from indexing are # also uint64. - keyarr = com._asarray_tuplesafe(keyarr) + keyarr = com.asarray_tuplesafe(keyarr) if is_integer_dtype(keyarr): - return com._asarray_tuplesafe(keyarr, dtype=np.uint64) + return com.asarray_tuplesafe(keyarr, dtype=np.uint64) return keyarr @Appender(_index_shared_docs['_convert_index_indexer']) @@ -354,9 +354,9 @@ def get_value(self, series, key): if not is_scalar(key): raise InvalidIndexError - k = com._values_from_object(key) + k = com.values_from_object(key) loc = self.get_loc(k) - new_values = com._values_from_object(series)[loc] + new_values = com.values_from_object(series)[loc] return new_values diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index ab1b3001e23e0..b315e3ec20830 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -524,11 +524,11 @@ def get_value(self, series, key): Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ - s = com._values_from_object(series) + s = com.values_from_object(series) try: - return com._maybe_box(self, - super(PeriodIndex, self).get_value(s, key), - series, key) + return com.maybe_box(self, + super(PeriodIndex, self).get_value(s, key), + series, key) except (KeyError, IndexError): try: asdt, parsed, reso = parse_time_string(key, self.freq) @@ -551,16 +551,16 @@ def get_value(self, series, key): return series[key] elif grp == freqn: key = Period(asdt, freq=self.freq).ordinal - return com._maybe_box(self, self._engine.get_value(s, key), - series, key) + return com.maybe_box(self, self._engine.get_value(s, key), + series, key) else: raise KeyError(key) except TypeError: pass key = Period(key, self.freq).ordinal - return com._maybe_box(self, self._engine.get_value(s, key), - series, key) + return com.maybe_box(self, self._engine.get_value(s, key), + series, key) @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): @@ -865,7 +865,7 @@ def period_range(start=None, end=None, periods=None, freq='D', name=None): PeriodIndex(['2017-03', '2017-04', '2017-05', '2017-06'], dtype='period[M]', freq='M') """ - if com._count_not_none(start, end, periods) != 2: + if com.count_not_none(start, end, periods) != 2: raise ValueError('Of the three parameters: start, end, and periods, ' 'exactly two must be specified') diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index af34ec8b22824..006758f276f87 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -491,8 +491,8 @@ def get_value(self, series, key): return self.get_value_maybe_box(series, key) try: - return com._maybe_box(self, Index.get_value(self, series, key), - series, key) + return com.maybe_box(self, Index.get_value(self, series, key), + series, key) except KeyError: try: loc = self._get_string_slice(key) @@ -508,8 +508,8 @@ def get_value(self, series, key): def get_value_maybe_box(self, series, key): if not isinstance(key, Timedelta): key = Timedelta(key) - values = self._engine.get_value(com._values_from_object(series), key) - return com._maybe_box(self, values, series, key) + values = self._engine.get_value(com.values_from_object(series), key) + return com.maybe_box(self, values, series, key) def get_loc(self, key, method=None, tolerance=None): """ diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e0b6048b2ad64..13c019dea469a 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -112,7 +112,7 @@ def __iter__(self): def __getitem__(self, key): if type(key) is tuple: - key = tuple(com._apply_if_callable(x, self.obj) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) try: values = self.obj._get_value(*key) @@ -126,7 +126,7 @@ def __getitem__(self, key): # we by definition only have the 0th axis axis = self.axis or 0 - key = com._apply_if_callable(key, self.obj) + key = com.apply_if_callable(key, self.obj) return self._getitem_axis(key, axis=axis) def _get_label(self, label, axis=None): @@ -186,10 +186,10 @@ def _get_setitem_indexer(self, key): def __setitem__(self, key, value): if isinstance(key, tuple): - key = tuple(com._apply_if_callable(x, self.obj) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: - key = com._apply_if_callable(key, self.obj) + key = com.apply_if_callable(key, self.obj) indexer = self._get_setitem_indexer(key) self._setitem_with_indexer(indexer, value) @@ -1474,7 +1474,7 @@ def _convert_for_reindex(self, key, axis=None): keyarr = labels._convert_index_indexer(key) else: # asarray can be unsafe, NumPy strings are weird - keyarr = com._asarray_tuplesafe(key) + keyarr = com.asarray_tuplesafe(key) if is_integer_dtype(keyarr): # Cast the indexer to uint64 if possible so @@ -1494,7 +1494,7 @@ class _LocationIndexer(_NDFrameIndexer): def __getitem__(self, key): if type(key) is tuple: - key = tuple(com._apply_if_callable(x, self.obj) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) try: if self._is_scalar_access(key): @@ -1506,7 +1506,7 @@ def __getitem__(self, key): # we by definition only have the 0th axis axis = self.axis or 0 - maybe_callable = com._apply_if_callable(key, self.obj) + maybe_callable = com.apply_if_callable(key, self.obj) return self._getitem_axis(maybe_callable, axis=axis) def _is_scalar_access(self, key): @@ -2282,11 +2282,11 @@ def __getitem__(self, key): def __setitem__(self, key, value): if isinstance(key, tuple): - key = tuple(com._apply_if_callable(x, self.obj) + key = tuple(com.apply_if_callable(x, self.obj) for x in key) else: # scalar callable may return tuple - key = com._apply_if_callable(key, self.obj) + key = com.apply_if_callable(key, self.obj) if not isinstance(key, tuple): key = self._tuplify(key) diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index de31c6ac11c3f..32fd70bcf654d 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -205,7 +205,7 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, if necessary copy and mask using the specified fill_value copy = True will force the copy """ - values = com._values_from_object(values) + values = com.values_from_object(values) if isfinite: mask = _isfinite(values) else: @@ -440,7 +440,7 @@ def nanstd(values, axis=None, skipna=True, ddof=1): @bottleneck_switch(ddof=1) def nanvar(values, axis=None, skipna=True, ddof=1): - values = com._values_from_object(values) + values = com.values_from_object(values) dtype = values.dtype mask = isna(values) if is_any_int_dtype(values): @@ -549,7 +549,7 @@ def nanskew(values, axis=None, skipna=True): """ - values = com._values_from_object(values) + values = com.values_from_object(values) mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') @@ -607,7 +607,7 @@ def nankurt(values, axis=None, skipna=True): central moment. """ - values = com._values_from_object(values) + values = com.values_from_object(values) mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') diff --git a/pandas/core/ops.py b/pandas/core/ops.py index a8c1b954a61b7..c65d2dcdc478c 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -89,7 +89,7 @@ def _maybe_match_name(a, b): See also -------- - pandas.core.common._consensus_name_attr + pandas.core.common.consensus_name_attr """ a_has = hasattr(a, 'name') b_has = hasattr(b, 'name') @@ -1111,7 +1111,7 @@ def na_op(x, y): dtype = find_common_type([x.dtype, y.dtype]) result = np.empty(x.size, dtype=dtype) mask = notna(x) & notna(y) - result[mask] = op(x[mask], com._values_from_object(y[mask])) + result[mask] = op(x[mask], com.values_from_object(y[mask])) else: assert isinstance(x, np.ndarray) result = np.empty(len(x), dtype=x.dtype) @@ -1407,7 +1407,7 @@ def wrapper(self, other, axis=None): .format(typ=type(other))) # always return a full value series here - res_values = com._values_from_object(res) + res_values = com.values_from_object(res) return self._constructor(res_values, index=self.index, name=res_name, dtype='bool') diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 16ade3fae90a1..4ebac55eea137 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -204,7 +204,7 @@ def _init_dict(self, data, axes, dtype=None): for k, v in compat.iteritems(data) if k in haxis) else: - keys = com._dict_keys_to_ordered_list(data) + keys = com.dict_keys_to_ordered_list(data) haxis = Index(keys) for k, v in compat.iteritems(data): @@ -282,7 +282,7 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): return cls(**d) def __getitem__(self, key): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) if isinstance(self._info_axis, MultiIndex): return self._getitem_multilevel(key) @@ -596,7 +596,7 @@ def _box_item_values(self, key, values): return self._constructor_sliced(values, **d) def __setitem__(self, key, value): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) shape = tuple(self.shape) if isinstance(value, self._constructor_sliced): value = value.reindex( diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index 1d6105cb68bf1..1c602a0af1ec1 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -385,7 +385,7 @@ def get_result(self): # stack blocks if self.axis == 0: - name = com._consensus_name_attr(self.objs) + name = com.consensus_name_attr(self.objs) mgr = self.objs[0]._data.concat([x._data for x in self.objs], self.new_axes) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index caaeb1bad2358..3989c70c9d13f 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -500,9 +500,9 @@ def __init__(self, left, right, how='inner', on=None, self.how = how self.axis = axis - self.on = com._maybe_make_list(on) - self.left_on = com._maybe_make_list(left_on) - self.right_on = com._maybe_make_list(right_on) + self.on = com.maybe_make_list(on) + self.left_on = com.maybe_make_list(left_on) + self.right_on = com.maybe_make_list(right_on) self.copy = copy self.suffixes = suffixes @@ -1552,8 +1552,8 @@ def _factorize_keys(lk, rk, sort=True): rk = ensure_int64(rk) elif is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk): klass = libhashtable.Int64Factorizer - lk = ensure_int64(com._values_from_object(lk)) - rk = ensure_int64(com._values_from_object(rk)) + lk = ensure_int64(com.values_from_object(lk)) + rk = ensure_int64(com.values_from_object(rk)) else: klass = libhashtable.Factorizer lk = ensure_object(lk) diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index da5246d389817..0d1caa3d57d73 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -470,8 +470,8 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None, crosstab : DataFrame """ - index = com._maybe_make_list(index) - columns = com._maybe_make_list(columns) + index = com.maybe_make_list(index) + columns = com.maybe_make_list(columns) rownames = _get_names(index, rownames, prefix='row') colnames = _get_names(columns, colnames, prefix='col') diff --git a/pandas/core/reshape/util.py b/pandas/core/reshape/util.py index 2fe82e5d6bc57..e83bcf800e949 100644 --- a/pandas/core/reshape/util.py +++ b/pandas/core/reshape/util.py @@ -60,7 +60,7 @@ def cartesian_product(X): # if any factor is empty, the cartesian product is empty b = np.zeros_like(cumprodX) - return [np.tile(np.repeat(np.asarray(com._values_from_object(x)), b[i]), + return [np.tile(np.repeat(np.asarray(com.values_from_object(x)), b[i]), np.product(a[i])) for i, x in enumerate(X)] diff --git a/pandas/core/series.py b/pandas/core/series.py index 3571e908fc6a7..03fc9701de1fc 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -766,7 +766,7 @@ def _slice(self, slobj, axis=0, kind=None): return self._get_values(slobj) def __getitem__(self, key): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) try: result = self.index.get_value(self, key) @@ -884,7 +884,7 @@ def _get_values(self, indexer): return self._values[indexer] def __setitem__(self, key, value): - key = com._apply_if_callable(key, self) + key = com.apply_if_callable(key, self) def setitem(key, value): try: @@ -990,7 +990,7 @@ def _set_labels(self, key, value): if isinstance(key, Index): key = key.values else: - key = com._asarray_tuplesafe(key) + key = com.asarray_tuplesafe(key) indexer = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): @@ -1042,7 +1042,7 @@ def get_value(self, label, takeable=False): def _get_value(self, label, takeable=False): if takeable is True: - return com._maybe_box_datetimelike(self._values[label]) + return com.maybe_box_datetimelike(self._values[label]) return self.index.get_value(self._values, label) _get_value.__doc__ = get_value.__doc__ @@ -1418,7 +1418,7 @@ def count(self, level=None): nobs : int or Series (if level specified) """ if level is None: - return notna(com._values_from_object(self)).sum() + return notna(com.values_from_object(self)).sum() if isinstance(level, compat.string_types): level = self.index._get_level_number(level) @@ -1722,7 +1722,7 @@ def idxmin(self, axis=0, skipna=True, *args, **kwargs): nan """ skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) - i = nanops.nanargmin(com._values_from_object(self), skipna=skipna) + i = nanops.nanargmin(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i] @@ -1792,7 +1792,7 @@ def idxmax(self, axis=0, skipna=True, *args, **kwargs): nan """ skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) - i = nanops.nanargmax(com._values_from_object(self), skipna=skipna) + i = nanops.nanargmax(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i] @@ -1835,7 +1835,7 @@ def round(self, decimals=0, *args, **kwargs): """ nv.validate_round(args, kwargs) - result = com._values_from_object(self).round(decimals) + result = com.values_from_object(self).round(decimals) result = self._constructor(result, index=self.index).__finalize__(self) return result @@ -2003,7 +2003,7 @@ def diff(self, periods=1): 5 NaN dtype: float64 """ - result = algorithms.diff(com._values_from_object(self), periods) + result = algorithms.diff(com.values_from_object(self), periods) return self._constructor(result, index=self.index).__finalize__(self) def autocorr(self, lag=1): @@ -4190,7 +4190,7 @@ def _try_cast(arr, take_fast_path): if isinstance(data, np.ndarray): raise Exception('Data must be 1-dimensional') else: - subarr = com._asarray_tuplesafe(data, dtype=dtype) + subarr = com.asarray_tuplesafe(data, dtype=dtype) # This is to prevent mixed-type Series getting all casted to # NumPy string type, e.g. NaN --> '-1#IND'. diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index f7071061d07ab..5cb9f4744cc58 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -143,7 +143,7 @@ def _init_dict(self, data, index, columns, dtype=None): columns = ensure_index(columns) data = {k: v for k, v in compat.iteritems(data) if k in columns} else: - keys = com._dict_keys_to_ordered_list(data) + keys = com.dict_keys_to_ordered_list(data) columns = Index(keys) if index is None: diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 96ee5b7954f45..1a92a27bfb390 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -375,7 +375,7 @@ def __getitem__(self, key): # Could not hash item, must be array-like? pass - key = com._values_from_object(key) + key = com.values_from_object(key) if self.index.nlevels > 1 and isinstance(key, tuple): # to handle MultiIndex labels key = self.index.get_loc(key) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index a3e091d43f261..6349af4d2e0ac 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -55,7 +55,7 @@ def _get_array_list(arr, others): """ from pandas.core.series import Series - if len(others) and isinstance(com._values_from_object(others)[0], + if len(others) and isinstance(com.values_from_object(others)[0], (list, np.ndarray, Series)): arrays = [arr] + list(others) else: @@ -702,7 +702,7 @@ def rep(x, r): return compat.text_type.__mul__(x, r) repeats = np.asarray(repeats, dtype=object) - result = libops.vec_binop(com._values_from_object(arr), repeats, rep) + result = libops.vec_binop(com.values_from_object(arr), repeats, rep) return result diff --git a/pandas/core/window.py b/pandas/core/window.py index 6b6f27bcb3863..f3b4aaa74ec6b 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -625,7 +625,7 @@ def _prep_window(self, **kwargs): window = self._get_window() if isinstance(window, (list, tuple, np.ndarray)): - return com._asarray_tuplesafe(window).astype(float) + return com.asarray_tuplesafe(window).astype(float) elif is_integer(window): import scipy.signal as sig @@ -2467,7 +2467,7 @@ def dataframe_from_int_dict(data, frame_template): def _get_center_of_mass(comass, span, halflife, alpha): - valid_count = com._count_not_none(comass, span, halflife, alpha) + valid_count = com.count_not_none(comass, span, halflife, alpha) if valid_count > 1: raise ValueError("comass, span, halflife, and alpha " "are mutually exclusive") diff --git a/pandas/io/formats/console.py b/pandas/io/formats/console.py index 36eac8dd57fbd..45d50ea3fa073 100644 --- a/pandas/io/formats/console.py +++ b/pandas/io/formats/console.py @@ -49,7 +49,6 @@ def get_console_size(): Returns (None,None) in non-interactive session. """ from pandas import get_option - from pandas.core import common as com display_width = get_option('display.width') # deprecated. @@ -65,8 +64,8 @@ def get_console_size(): # should use Auto-Detection, But only in interactive shell-terminal. # Simple. yeah. - if com.in_interactive_session(): - if com.in_ipython_frontend(): + if in_interactive_session(): + if in_ipython_frontend(): # sane defaults for interactive non-shell terminal # match default for width,height in config_init from pandas.core.config import get_default_val @@ -82,3 +81,75 @@ def get_console_size(): # and we're in a script (non-inter), this will return (None,None) # caller needs to deal. return (display_width or terminal_width, display_height or terminal_height) + + +# ---------------------------------------------------------------------- +# Detect our environment + +def in_interactive_session(): + """ check if we're running in an interactive shell + + returns True if running under python/ipython interactive shell + """ + from pandas import get_option + + def check_main(): + import __main__ as main + return (not hasattr(main, '__file__') or + get_option('mode.sim_interactive')) + + try: + return __IPYTHON__ or check_main() # noqa + except: + return check_main() + + +def in_qtconsole(): + """ + check if we're inside an IPython qtconsole + + .. deprecated:: 0.14.1 + This is no longer needed, or working, in IPython 3 and above. + """ + try: + ip = get_ipython() # noqa + front_end = ( + ip.config.get('KernelApp', {}).get('parent_appname', "") or + ip.config.get('IPKernelApp', {}).get('parent_appname', "")) + if 'qtconsole' in front_end.lower(): + return True + except: + return False + return False + + +def in_ipnb(): + """ + check if we're inside an IPython Notebook + + .. deprecated:: 0.14.1 + This is no longer needed, or working, in IPython 3 and above. + """ + try: + ip = get_ipython() # noqa + front_end = ( + ip.config.get('KernelApp', {}).get('parent_appname', "") or + ip.config.get('IPKernelApp', {}).get('parent_appname', "")) + if 'notebook' in front_end.lower(): + return True + except: + return False + return False + + +def in_ipython_frontend(): + """ + check if we're inside an an IPython zmq frontend + """ + try: + ip = get_ipython() # noqa + return 'zmq' in str(type(ip)).lower() + except: + pass + + return False diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index f2d6fe01e0573..c57b1c3e211f6 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -3935,7 +3935,7 @@ def read(self, where=None, columns=None, **kwargs): tuple_index = long_index.values unique_tuples = unique(tuple_index) - unique_tuples = com._asarray_tuplesafe(unique_tuples) + unique_tuples = com.asarray_tuplesafe(unique_tuples) indexer = match(unique_tuples, tuple_index) indexer = ensure_platform_int(indexer) diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index beebf84b8a033..3bb0b98851234 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -324,7 +324,7 @@ def try_parse(values): if isinstance(values, Index): values = values.values if not isinstance(values, np.ndarray): - values = com._asarray_tuplesafe(values) + values = com.asarray_tuplesafe(values) if is_integer_dtype(values) or is_float_dtype(values): return values diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 06020bdfd5d1d..7ce4c23f81ad6 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -233,7 +233,7 @@ def _iter_data(self, data=None, keep_index=False, fillna=None): # TODO: unused? # if self.sort_columns: - # columns = com._try_sort(data.columns) + # columns = com.try_sort(data.columns) # else: # columns = data.columns @@ -2428,7 +2428,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, layout=layout) _axes = _flatten(axes) - for i, col in enumerate(com._try_sort(data.columns)): + for i, col in enumerate(com.try_sort(data.columns)): ax = _axes[i] ax.hist(data[col].dropna().values, bins=bins, **kwds) ax.set_title(col) diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py index 426b29a8840f4..c72e092c73aa2 100644 --- a/pandas/plotting/_style.py +++ b/pandas/plotting/_style.py @@ -49,7 +49,7 @@ def _get_standard_colors(num_colors=None, colormap=None, color_type='default', def random_color(column): """ Returns a random color represented as a list of length 3""" # GH17525 use common._random_state to avoid resetting the seed - rs = com._random_state(column) + rs = com.random_state(column) return rs.rand(3).tolist() colors = lmap(random_color, lrange(num_colors)) diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 07ba0b681418e..118b05d16ab09 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -10,7 +10,6 @@ from pandas.core.dtypes.common import is_bool, is_list_like, is_scalar import pandas as pd -from pandas.core import common as com from pandas.errors import PerformanceWarning from pandas import DataFrame, Series, Panel, date_range from pandas.util.testing import makeCustomDataframe as mkdf @@ -94,7 +93,7 @@ def _is_py3_complex_incompat(result, expected): np.isnan(result)) -_good_arith_ops = com.difference(_arith_ops_syms, _special_case_arith_ops_syms) +_good_arith_ops = set(_arith_ops_syms).difference(_special_case_arith_ops_syms) @td.skip_if_no_ne diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 66577d738dd28..8b2b74802556d 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -1260,17 +1260,17 @@ def test_groupby_sort_multi(): 'd': np.random.randn(3)}) tups = lmap(tuple, df[['a', 'b', 'c']].values) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) result = df.groupby(['a', 'b', 'c'], sort=True).sum() tm.assert_numpy_array_equal(result.index.values, tups[[1, 2, 0]]) tups = lmap(tuple, df[['c', 'a', 'b']].values) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) result = df.groupby(['c', 'a', 'b'], sort=True).sum() tm.assert_numpy_array_equal(result.index.values, tups) tups = lmap(tuple, df[['b', 'c', 'a']].values) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) result = df.groupby(['b', 'c', 'a'], sort=True).sum() tm.assert_numpy_array_equal(result.index.values, tups[[2, 1, 0]]) @@ -1282,7 +1282,7 @@ def test_groupby_sort_multi(): def _check_groupby(df, result, keys, field, f=lambda x: x.sum()): tups = lmap(tuple, df[keys].values) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) expected = f(df.groupby(tups)[field]) for k, v in compat.iteritems(expected): assert (result[k] == v) diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py index 8c9d0459eff55..208d498180692 100644 --- a/pandas/tests/indexes/interval/test_construction.py +++ b/pandas/tests/indexes/interval/test_construction.py @@ -253,7 +253,7 @@ def get_kwargs_from_breaks(self, breaks, closed='right'): return {'data': tuples} elif is_categorical_dtype(breaks): return {'data': breaks._constructor(tuples)} - return {'data': com._asarray_tuplesafe(tuples)} + return {'data': com.asarray_tuplesafe(tuples)} def test_constructor_errors(self): # non-tuple diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 0dc5970c22803..e179286e839db 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -947,7 +947,7 @@ def test_to_tuples(self, tuples): # GH 18756 idx = IntervalIndex.from_tuples(tuples) result = idx.to_tuples() - expected = Index(com._asarray_tuplesafe(tuples)) + expected = Index(com.asarray_tuplesafe(tuples)) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('tuples', [ @@ -963,7 +963,7 @@ def test_to_tuples_na(self, tuples, na_tuple): result = idx.to_tuples(na_tuple=na_tuple) # check the non-NA portion - expected_notna = Index(com._asarray_tuplesafe(tuples[:-1])) + expected_notna = Index(com.asarray_tuplesafe(tuples[:-1])) result_notna = result[:-1] tm.assert_index_equal(result_notna, expected_notna) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 25e64aa82cc36..62b37a35249d0 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -217,8 +217,8 @@ def test_factorize_tuple_list(self, data, expected_label, expected_level): tm.assert_numpy_array_equal(result[0], np.array(expected_label, dtype=np.intp)) - expected_level_array = com._asarray_tuplesafe(expected_level, - dtype=object) + expected_level_array = com.asarray_tuplesafe(expected_level, + dtype=object) tm.assert_numpy_array_equal(result[1], expected_level_array) def test_complex_sorting(self): diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 61f838eeeeb30..e1c9202189972 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -8,24 +8,14 @@ import numpy as np from pandas import Series, DataFrame, Timestamp -from pandas.compat import range, lmap import pandas.core.common as com from pandas.core import ops from pandas.io.common import _get_handle import pandas.util.testing as tm -def test_mut_exclusive(): - msg = "mutually exclusive arguments: '[ab]' and '[ab]'" - with tm.assert_raises_regex(TypeError, msg): - com._mut_exclusive(a=1, b=2) - assert com._mut_exclusive(a=1, b=None) == 1 - assert com._mut_exclusive(major=None, major_axis=None) is None - assert com._mut_exclusive(a=None, b=2) == 2 - - def test_get_callable_name(): - getname = com._get_callable_name + getname = com.get_callable_name def fn(x): return x @@ -58,112 +48,25 @@ def test_all_not_none(): assert (not com._all_not_none(None, None, None, None)) -def test_iterpairs(): - data = [1, 2, 3, 4] - expected = [(1, 2), (2, 3), (3, 4)] - - result = list(com.iterpairs(data)) - - assert (result == expected) - - -def test_split_ranges(): - def _bin(x, width): - "return int(x) as a base2 string of given width" - return ''.join(str((x >> i) & 1) for i in range(width - 1, -1, -1)) - - def test_locs(mask): - nfalse = sum(np.array(mask) == 0) - - remaining = 0 - for s, e in com.split_ranges(mask): - remaining += e - s - - assert 0 not in mask[s:e] - - # make sure the total items covered by the ranges are a complete cover - assert remaining + nfalse == len(mask) - - # exhaustively test all possible mask sequences of length 8 - ncols = 8 - for i in range(2 ** ncols): - cols = lmap(int, list(_bin(i, ncols))) # count up in base2 - mask = [cols[i] == 1 for i in range(len(cols))] - test_locs(mask) - - # base cases - test_locs([]) - test_locs([0]) - test_locs([1]) - - -def test_map_indices_py(): - data = [4, 3, 2, 1] - expected = {4: 0, 3: 1, 2: 2, 1: 3} - - result = com.map_indices_py(data) - - assert (result == expected) - - -def test_union(): - a = [1, 2, 3] - b = [4, 5, 6] - - union = sorted(com.union(a, b)) - - assert ((a + b) == union) - - -def test_difference(): - a = [1, 2, 3] - b = [1, 2, 3, 4, 5, 6] - - inter = sorted(com.difference(b, a)) - - assert ([4, 5, 6] == inter) - - -def test_intersection(): - a = [1, 2, 3] - b = [1, 2, 3, 4, 5, 6] - - inter = sorted(com.intersection(a, b)) - - assert (a == inter) - - -def test_groupby(): - values = ['foo', 'bar', 'baz', 'baz2', 'qux', 'foo3'] - expected = {'f': ['foo', 'foo3'], - 'b': ['bar', 'baz', 'baz2'], - 'q': ['qux']} - - grouped = com.groupby(values, lambda x: x[0]) - - for k, v in grouped: - assert v == expected[k] - - def test_random_state(): import numpy.random as npr # Check with seed - state = com._random_state(5) + state = com.random_state(5) assert state.uniform() == npr.RandomState(5).uniform() # Check with random state object state2 = npr.RandomState(10) - assert com._random_state(state2).uniform() == npr.RandomState(10).uniform() + assert com.random_state(state2).uniform() == npr.RandomState(10).uniform() # check with no arg random state - assert com._random_state() is np.random + assert com.random_state() is np.random # Error for floats or strings with pytest.raises(ValueError): - com._random_state('test') + com.random_state('test') with pytest.raises(ValueError): - com._random_state(5.5) + com.random_state(5.5) @pytest.mark.parametrize('left, right, expected', [ @@ -182,9 +85,9 @@ def test_dict_compat(): np.datetime64('2015-03-15'): 2} data_unchanged = {1: 2, 3: 4, 5: 6} expected = {Timestamp('1990-3-15'): 1, Timestamp('2015-03-15'): 2} - assert (com._dict_compat(data_datetime64) == expected) - assert (com._dict_compat(expected) == expected) - assert (com._dict_compat(data_unchanged) == data_unchanged) + assert (com.dict_compat(data_datetime64) == expected) + assert (com.dict_compat(expected) == expected) + assert (com.dict_compat(data_unchanged) == data_unchanged) def test_standardize_mapping(): diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index d0350ba252329..98026f6d4cf0e 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -50,7 +50,7 @@ def test_int64_overflow(self): tups = list(map(tuple, df[['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H' ]].values)) - tups = com._asarray_tuplesafe(tups) + tups = com.asarray_tuplesafe(tups) expected = df.groupby(tups).sum()['values']
Moves a few console-checking functions to `io.formats.console`. A bunch of core.common functions were never used outside of tests, got rid of em. The ones I left alone were _any_not_none, _all_not_none etc, as I'm inclined to think these should be removed in favor of python builtins.
https://api.github.com/repos/pandas-dev/pandas/pulls/22001
2018-07-20T19:05:58Z
2018-07-24T22:10:06Z
2018-07-24T22:10:06Z
2020-04-05T17:40:40Z
Sparse get dummies perf
diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py index 9044b080c45f9..07634811370c7 100644 --- a/asv_bench/benchmarks/reshape.py +++ b/asv_bench/benchmarks/reshape.py @@ -1,7 +1,9 @@ +import string from itertools import product import numpy as np from pandas import DataFrame, MultiIndex, date_range, melt, wide_to_long +import pandas as pd from .pandas_vb_common import setup # noqa @@ -132,3 +134,19 @@ def setup(self): def time_pivot_table(self): self.df.pivot_table(index='key1', columns=['key2', 'key3']) + + +class GetDummies(object): + goal_time = 0.2 + + def setup(self): + categories = list(string.ascii_letters[:12]) + s = pd.Series(np.random.choice(categories, size=1_000_000), + dtype=pd.api.types.CategoricalDtype(categories)) + self.s = s + + def time_get_dummies_1d(self): + pd.get_dummies(self.s, sparse=False) + + def time_get_dummies_1d_sparse(self): + pd.get_dummies(self.s, sparse=True) diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 9e3f7ec73f852..fc51ff2df001a 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -343,7 +343,7 @@ Performance Improvements - Improved performance of :meth:`HDFStore.groups` (and dependent functions like :meth:`~HDFStore.keys`. (i.e. ``x in store`` checks are much faster) (:issue:`21372`) -- +- Improved the performance of :func:`pandas.get_dummies` with ``sparse=True`` (:issue:`21997`) .. _whatsnew_0240.docs: diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index d5d2e594b8d6b..b63a938112522 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -940,10 +940,11 @@ def get_empty_Frame(data, sparse): sparse_series = {} N = len(data) sp_indices = [[] for _ in range(len(dummy_cols))] - for ndx, code in enumerate(codes): - if code == -1: - # Blank entries if not dummy_na and code == -1, #GH4446 - continue + mask = codes != -1 + codes = codes[mask] + n_idx = np.arange(N)[mask] + + for ndx, code in zip(n_idx, codes): sp_indices[code].append(ndx) if drop_first:
Previously, we did a scalar `elem == -1` for every element in the ndarray. This replaces that check with a vectorized `array == -1`. Running the ASV now. In the meantime, here's a simple timeit on the same problem ```python # HEAD In [3]: %timeit pd.get_dummies(s, sparse=True) 561 ms ± 4.96 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) # Master In [3]: %timeit pd.get_dummies(s, sparse=True) 2.18 s ± 273 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21997
2018-07-20T15:47:29Z
2018-07-20T20:46:13Z
2018-07-20T20:46:13Z
2018-07-20T20:46:17Z
TST: tuple and namedtuple multiindex tests for read_csv
diff --git a/pandas/tests/io/parser/header.py b/pandas/tests/io/parser/header.py index 3fb0650348763..ad3d4592bd599 100644 --- a/pandas/tests/io/parser/header.py +++ b/pandas/tests/io/parser/header.py @@ -5,6 +5,8 @@ during parsing for all of the parsers defined in parsers.py """ +from collections import namedtuple + import pytest import numpy as np @@ -149,6 +151,22 @@ def test_header_multiindex_common_format(self): result = self.read_csv(StringIO(data), header=[0, 1], index_col=0) tm.assert_frame_equal(df, result) + # to_csv, tuples + result = self.read_csv(StringIO(data), skiprows=3, + names=[('a', 'q'), ('a', 'r'), ('a', 's'), + ('b', 't'), ('c', 'u'), ('c', 'v')], + index_col=0) + tm.assert_frame_equal(df, result) + + # to_csv, namedtuples + TestTuple = namedtuple('names', ['first', 'second']) + result = self.read_csv( + StringIO(data), skiprows=3, index_col=0, + names=[TestTuple('a', 'q'), TestTuple('a', 'r'), + TestTuple('a', 's'), TestTuple('b', 't'), + TestTuple('c', 'u'), TestTuple('c', 'v')]) + tm.assert_frame_equal(df, result) + # common data = """,a,a,a,b,c,c ,q,r,s,t,u,v @@ -158,6 +176,22 @@ def test_header_multiindex_common_format(self): result = self.read_csv(StringIO(data), header=[0, 1], index_col=0) tm.assert_frame_equal(df, result) + # common, tuples + result = self.read_csv(StringIO(data), skiprows=2, + names=[('a', 'q'), ('a', 'r'), ('a', 's'), + ('b', 't'), ('c', 'u'), ('c', 'v')], + index_col=0) + tm.assert_frame_equal(df, result) + + # common, namedtuples + TestTuple = namedtuple('names', ['first', 'second']) + result = self.read_csv( + StringIO(data), skiprows=2, index_col=0, + names=[TestTuple('a', 'q'), TestTuple('a', 'r'), + TestTuple('a', 's'), TestTuple('b', 't'), + TestTuple('c', 'u'), TestTuple('c', 'v')]) + tm.assert_frame_equal(df, result) + # common, no index_col data = """a,a,a,b,c,c q,r,s,t,u,v @@ -167,6 +201,22 @@ def test_header_multiindex_common_format(self): result = self.read_csv(StringIO(data), header=[0, 1], index_col=None) tm.assert_frame_equal(df.reset_index(drop=True), result) + # common, no index_col, tuples + result = self.read_csv(StringIO(data), skiprows=2, + names=[('a', 'q'), ('a', 'r'), ('a', 's'), + ('b', 't'), ('c', 'u'), ('c', 'v')], + index_col=None) + tm.assert_frame_equal(df.reset_index(drop=True), result) + + # common, no index_col, namedtuples + TestTuple = namedtuple('names', ['first', 'second']) + result = self.read_csv( + StringIO(data), skiprows=2, index_col=None, + names=[TestTuple('a', 'q'), TestTuple('a', 'r'), + TestTuple('a', 's'), TestTuple('b', 't'), + TestTuple('c', 'u'), TestTuple('c', 'v')]) + tm.assert_frame_equal(df.reset_index(drop=True), result) + # malformed case 1 expected = DataFrame(np.array( [[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype='int64'),
- [x] closes #7589 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/21994
2018-07-20T14:38:58Z
2018-07-25T12:05:57Z
2018-07-25T12:05:56Z
2018-07-25T18:50:13Z
Move FrequencyInferer out of libresolution
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index a418e54e4da9b..ecfc7355dddfc 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -22,6 +22,11 @@ cnp.import_array() cimport util from util cimport numeric, get_nat +from khash cimport (khiter_t, + kh_destroy_int64, kh_put_int64, + kh_init_int64, kh_int64_t, + kh_resize_int64, kh_get_int64) + import missing cdef float64_t FP_ERR = 1e-13 @@ -71,6 +76,42 @@ class NegInfinity(object): __ge__ = lambda self, other: isinstance(other, NegInfinity) +cpdef ndarray[int64_t, ndim=1] unique_deltas(ndarray[int64_t] arr): + """ + Efficiently find the unique first-differences of the given array. + + Parameters + ---------- + arr : ndarray[in64_t] + + Returns + ------- + result : ndarray[int64_t] + result is sorted + """ + cdef: + Py_ssize_t i, n = len(arr) + int64_t val + khiter_t k + kh_int64_t *table + int ret = 0 + list uniques = [] + + table = kh_init_int64() + kh_resize_int64(table, 10) + for i in range(n - 1): + val = arr[i + 1] - arr[i] + k = kh_get_int64(table, val) + if k == table.n_buckets: + kh_put_int64(table, val, &ret) + uniques.append(val) + kh_destroy_int64(table) + + result = np.array(uniques, dtype=np.int64) + result.sort() + return result + + @cython.wraparound(False) @cython.boundscheck(False) def is_lexsorted(list list_of_arrays): diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 0835a43411783..4b90c669eebba 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # cython: profile=False +cimport cython from cython cimport Py_ssize_t import numpy as np @@ -10,23 +11,12 @@ cnp.import_array() from util cimport is_string_object, get_nat -from pandas._libs.khash cimport (khiter_t, - kh_destroy_int64, kh_put_int64, - kh_init_int64, kh_int64_t, - kh_resize_int64, kh_get_int64) - from np_datetime cimport npy_datetimestruct, dt64_to_dtstruct from frequencies cimport get_freq_code from timezones cimport (is_utc, is_tzlocal, maybe_get_tz, get_dst_info) -from fields import build_field_sarray -from conversion import tz_convert from conversion cimport tz_convert_utc_to_tzlocal -from ccalendar import MONTH_ALIASES, int_to_weekday from ccalendar cimport get_days_in_month -from timestamps import Timestamp - -from pandas._libs.properties import cache_readonly # ---------------------------------------------------------------------- # Constants @@ -41,13 +31,6 @@ cdef int RESO_MIN = 4 cdef int RESO_HR = 5 cdef int RESO_DAY = 6 -_ONE_MICRO = <int64_t>1000L -_ONE_MILLI = <int64_t>(_ONE_MICRO * 1000) -_ONE_SECOND = <int64_t>(_ONE_MILLI * 1000) -_ONE_MINUTE = <int64_t>(60 * _ONE_SECOND) -_ONE_HOUR = <int64_t>(60 * _ONE_MINUTE) -_ONE_DAY = <int64_t>(24 * _ONE_HOUR) - # ---------------------------------------------------------------------- cpdef resolution(ndarray[int64_t] stamps, tz=None): @@ -331,31 +314,7 @@ class Resolution(object): # ---------------------------------------------------------------------- # Frequency Inference -cdef ndarray[int64_t, ndim=1] unique_deltas(ndarray[int64_t] arr): - cdef: - Py_ssize_t i, n = len(arr) - int64_t val - khiter_t k - kh_int64_t *table - int ret = 0 - list uniques = [] - - table = kh_init_int64() - kh_resize_int64(table, 10) - for i in range(n - 1): - val = arr[i + 1] - arr[i] - k = kh_get_int64(table, val) - if k == table.n_buckets: - kh_put_int64(table, val, &ret) - uniques.append(val) - kh_destroy_int64(table) - - result = np.array(uniques, dtype=np.int64) - result.sort() - return result - - -cdef object month_position_check(fields, weekdays): +def month_position_check(fields, weekdays): cdef: int32_t daysinmonth, y, m, d bint calendar_end = True @@ -397,247 +356,3 @@ cdef object month_position_check(fields, weekdays): return 'bs' else: return None - - -cdef inline bint _is_multiple(int64_t us, int64_t mult): - return us % mult == 0 - - -cdef inline str _maybe_add_count(str base, int64_t count): - if count != 1: - return '{count}{base}'.format(count=count, base=base) - else: - return base - - -cdef class _FrequencyInferer(object): - """ - Not sure if I can avoid the state machine here - """ - cdef public: - object index - object values - bint warn - bint is_monotonic - dict _cache - - def __init__(self, index, warn=True): - self.index = index - self.values = np.asarray(index).view('i8') - - # This moves the values, which are implicitly in UTC, to the - # the timezone so they are in local time - if hasattr(index, 'tz'): - if index.tz is not None: - self.values = tz_convert(self.values, 'UTC', index.tz) - - self.warn = warn - - if len(index) < 3: - raise ValueError('Need at least 3 dates to infer frequency') - - self.is_monotonic = (self.index.is_monotonic_increasing or - self.index.is_monotonic_decreasing) - - @cache_readonly - def deltas(self): - return unique_deltas(self.values) - - @cache_readonly - def deltas_asi8(self): - return unique_deltas(self.index.asi8) - - @cache_readonly - def is_unique(self): - return len(self.deltas) == 1 - - @cache_readonly - def is_unique_asi8(self): - return len(self.deltas_asi8) == 1 - - def get_freq(self): - if not self.is_monotonic or not self.index.is_unique: - return None - - delta = self.deltas[0] - if _is_multiple(delta, _ONE_DAY): - return self._infer_daily_rule() - else: - # Business hourly, maybe. 17: one day / 65: one weekend - if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): - return 'BH' - # Possibly intraday frequency. Here we use the - # original .asi8 values as the modified values - # will not work around DST transitions. See #8772 - elif not self.is_unique_asi8: - return None - delta = self.deltas_asi8[0] - if _is_multiple(delta, _ONE_HOUR): - # Hours - return _maybe_add_count('H', delta / _ONE_HOUR) - elif _is_multiple(delta, _ONE_MINUTE): - # Minutes - return _maybe_add_count('T', delta / _ONE_MINUTE) - elif _is_multiple(delta, _ONE_SECOND): - # Seconds - return _maybe_add_count('S', delta / _ONE_SECOND) - elif _is_multiple(delta, _ONE_MILLI): - # Milliseconds - return _maybe_add_count('L', delta / _ONE_MILLI) - elif _is_multiple(delta, _ONE_MICRO): - # Microseconds - return _maybe_add_count('U', delta / _ONE_MICRO) - else: - # Nanoseconds - return _maybe_add_count('N', delta) - - @cache_readonly - def day_deltas(self): - return [x / _ONE_DAY for x in self.deltas] - - @cache_readonly - def hour_deltas(self): - return [x / _ONE_HOUR for x in self.deltas] - - @cache_readonly - def fields(self): - return build_field_sarray(self.values) - - @cache_readonly - def rep_stamp(self): - return Timestamp(self.values[0]) - - cdef object month_position_check(self): - return month_position_check(self.fields, self.index.dayofweek) - - @cache_readonly - def mdiffs(self): - nmonths = self.fields['Y'] * 12 + self.fields['M'] - return unique_deltas(nmonths.astype('i8')) - - @cache_readonly - def ydiffs(self): - return unique_deltas(self.fields['Y'].astype('i8')) - - cdef _infer_daily_rule(self): - annual_rule = self._get_annual_rule() - if annual_rule: - nyears = self.ydiffs[0] - month = MONTH_ALIASES[self.rep_stamp.month] - alias = '{prefix}-{month}'.format(prefix=annual_rule, month=month) - return _maybe_add_count(alias, nyears) - - quarterly_rule = self._get_quarterly_rule() - if quarterly_rule: - nquarters = self.mdiffs[0] / 3 - mod_dict = {0: 12, 2: 11, 1: 10} - month = MONTH_ALIASES[mod_dict[self.rep_stamp.month % 3]] - alias = '{prefix}-{month}'.format(prefix=quarterly_rule, - month=month) - return _maybe_add_count(alias, nquarters) - - monthly_rule = self._get_monthly_rule() - if monthly_rule: - return _maybe_add_count(monthly_rule, self.mdiffs[0]) - - if self.is_unique: - days = self.deltas[0] / _ONE_DAY - if days % 7 == 0: - # Weekly - day = int_to_weekday[self.rep_stamp.weekday()] - return _maybe_add_count('W-{day}'.format(day=day), days / 7) - else: - return _maybe_add_count('D', days) - - if self._is_business_daily(): - return 'B' - - wom_rule = self._get_wom_rule() - if wom_rule: - return wom_rule - - cdef _get_annual_rule(self): - if len(self.ydiffs) > 1: - return None - - # lazy import to prevent circularity - # TODO: Avoid non-cython dependency - from pandas.core.algorithms import unique - - if len(unique(self.fields['M'])) > 1: - return None - - pos_check = self.month_position_check() - return {'cs': 'AS', 'bs': 'BAS', - 'ce': 'A', 'be': 'BA'}.get(pos_check) - - cdef _get_quarterly_rule(self): - if len(self.mdiffs) > 1: - return None - - if not self.mdiffs[0] % 3 == 0: - return None - - pos_check = self.month_position_check() - return {'cs': 'QS', 'bs': 'BQS', - 'ce': 'Q', 'be': 'BQ'}.get(pos_check) - - cdef _get_monthly_rule(self): - if len(self.mdiffs) > 1: - return None - pos_check = self.month_position_check() - return {'cs': 'MS', 'bs': 'BMS', - 'ce': 'M', 'be': 'BM'}.get(pos_check) - - cdef bint _is_business_daily(self): - # quick check: cannot be business daily - if self.day_deltas != [1, 3]: - return False - - # probably business daily, but need to confirm - first_weekday = self.index[0].weekday() - shifts = np.diff(self.index.asi8) - shifts = np.floor_divide(shifts, _ONE_DAY) - weekdays = np.mod(first_weekday + np.cumsum(shifts), 7) - return np.all(((weekdays == 0) & (shifts == 3)) | - ((weekdays > 0) & (weekdays <= 4) & (shifts == 1))) - - cdef _get_wom_rule(self): - # wdiffs = unique(np.diff(self.index.week)) - # We also need -47, -49, -48 to catch index spanning year boundary - # if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all(): - # return None - - # lazy import to prevent circularity - # TODO: Avoid non-cython dependency - from pandas.core.algorithms import unique - - weekdays = unique(self.index.weekday) - if len(weekdays) > 1: - return None - - week_of_months = unique((self.index.day - 1) // 7) - # Only attempt to infer up to WOM-4. See #9425 - week_of_months = week_of_months[week_of_months < 4] - if len(week_of_months) == 0 or len(week_of_months) > 1: - return None - - # get which week - week = week_of_months[0] + 1 - wd = int_to_weekday[weekdays[0]] - - return 'WOM-{week}{weekday}'.format(week=week, weekday=wd) - - -cdef class _TimedeltaFrequencyInferer(_FrequencyInferer): - - cdef _infer_daily_rule(self): - if self.is_unique: - days = self.deltas[0] / _ONE_DAY - if days % 7 == 0: - # Weekly - wd = int_to_weekday[self.rep_stamp.weekday()] - alias = 'W-{weekday}'.format(weekday=wd) - return _maybe_add_count(alias, days / 7) - else: - return _maybe_add_count('D', days) diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 59cd4743f857b..d6e4824575468 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -6,25 +6,32 @@ import numpy as np +from pandas.util._decorators import cache_readonly + from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.common import ( is_period_arraylike, is_timedelta64_dtype, is_datetime64_dtype) +from pandas.core.algorithms import unique + from pandas.tseries.offsets import DateOffset -from pandas._libs.tslibs import Timedelta +from pandas._libs.tslibs import Timedelta, Timestamp import pandas._libs.tslibs.frequencies as libfreqs from pandas._libs.tslibs.frequencies import ( # noqa, semi-public API get_freq, get_base_alias, get_to_timestamp_base, get_freq_code, FreqGroup, is_subperiod, is_superperiod) +from pandas._libs.tslibs.ccalendar import MONTH_ALIASES, int_to_weekday +import pandas._libs.tslibs.resolution as libresolution +from pandas._libs.tslibs.resolution import Resolution +from pandas._libs.tslibs.fields import build_field_sarray +from pandas._libs.tslibs.conversion import tz_convert -from pandas._libs.tslibs.resolution import (Resolution, - _FrequencyInferer, - _TimedeltaFrequencyInferer) +from pandas._libs.algos import unique_deltas from pytz import AmbiguousTimeError @@ -37,6 +44,13 @@ RESO_HR = 5 RESO_DAY = 6 +_ONE_MICRO = 1000 +_ONE_MILLI = (_ONE_MICRO * 1000) +_ONE_SECOND = (_ONE_MILLI * 1000) +_ONE_MINUTE = (60 * _ONE_SECOND) +_ONE_HOUR = (60 * _ONE_MINUTE) +_ONE_DAY = (24 * _ONE_HOUR) + # --------------------------------------------------------------------- # Offset names ("time rules") and related functions @@ -269,3 +283,246 @@ def infer_freq(index, warn=True): inferer = _FrequencyInferer(index, warn=warn) return inferer.get_freq() + + +class _FrequencyInferer(object): + """ + Not sure if I can avoid the state machine here + """ + + def __init__(self, index, warn=True): + self.index = index + self.values = np.asarray(index).view('i8') + + # This moves the values, which are implicitly in UTC, to the + # the timezone so they are in local time + if hasattr(index, 'tz'): + if index.tz is not None: + self.values = tz_convert(self.values, 'UTC', index.tz) + + self.warn = warn + + if len(index) < 3: + raise ValueError('Need at least 3 dates to infer frequency') + + self.is_monotonic = (self.index.is_monotonic_increasing or + self.index.is_monotonic_decreasing) + + @cache_readonly + def deltas(self): + return unique_deltas(self.values) + + @cache_readonly + def deltas_asi8(self): + return unique_deltas(self.index.asi8) + + @cache_readonly + def is_unique(self): + return len(self.deltas) == 1 + + @cache_readonly + def is_unique_asi8(self): + return len(self.deltas_asi8) == 1 + + def get_freq(self): # noqa:F811 + """ + Find the appropriate frequency string to describe the inferred + frequency of self.values + + Returns + ------- + freqstr : str or None + """ + if not self.is_monotonic or not self.index.is_unique: + return None + + delta = self.deltas[0] + if _is_multiple(delta, _ONE_DAY): + return self._infer_daily_rule() + + # Business hourly, maybe. 17: one day / 65: one weekend + if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): + return 'BH' + # Possibly intraday frequency. Here we use the + # original .asi8 values as the modified values + # will not work around DST transitions. See #8772 + elif not self.is_unique_asi8: + return None + + delta = self.deltas_asi8[0] + if _is_multiple(delta, _ONE_HOUR): + # Hours + return _maybe_add_count('H', delta / _ONE_HOUR) + elif _is_multiple(delta, _ONE_MINUTE): + # Minutes + return _maybe_add_count('T', delta / _ONE_MINUTE) + elif _is_multiple(delta, _ONE_SECOND): + # Seconds + return _maybe_add_count('S', delta / _ONE_SECOND) + elif _is_multiple(delta, _ONE_MILLI): + # Milliseconds + return _maybe_add_count('L', delta / _ONE_MILLI) + elif _is_multiple(delta, _ONE_MICRO): + # Microseconds + return _maybe_add_count('U', delta / _ONE_MICRO) + else: + # Nanoseconds + return _maybe_add_count('N', delta) + + @cache_readonly + def day_deltas(self): + return [x / _ONE_DAY for x in self.deltas] + + @cache_readonly + def hour_deltas(self): + return [x / _ONE_HOUR for x in self.deltas] + + @cache_readonly + def fields(self): + return build_field_sarray(self.values) + + @cache_readonly + def rep_stamp(self): + return Timestamp(self.values[0]) + + def month_position_check(self): + return libresolution.month_position_check(self.fields, + self.index.dayofweek) + + @cache_readonly + def mdiffs(self): + nmonths = self.fields['Y'] * 12 + self.fields['M'] + return unique_deltas(nmonths.astype('i8')) + + @cache_readonly + def ydiffs(self): + return unique_deltas(self.fields['Y'].astype('i8')) + + def _infer_daily_rule(self): + annual_rule = self._get_annual_rule() + if annual_rule: + nyears = self.ydiffs[0] + month = MONTH_ALIASES[self.rep_stamp.month] + alias = '{prefix}-{month}'.format(prefix=annual_rule, month=month) + return _maybe_add_count(alias, nyears) + + quarterly_rule = self._get_quarterly_rule() + if quarterly_rule: + nquarters = self.mdiffs[0] / 3 + mod_dict = {0: 12, 2: 11, 1: 10} + month = MONTH_ALIASES[mod_dict[self.rep_stamp.month % 3]] + alias = '{prefix}-{month}'.format(prefix=quarterly_rule, + month=month) + return _maybe_add_count(alias, nquarters) + + monthly_rule = self._get_monthly_rule() + if monthly_rule: + return _maybe_add_count(monthly_rule, self.mdiffs[0]) + + if self.is_unique: + days = self.deltas[0] / _ONE_DAY + if days % 7 == 0: + # Weekly + day = int_to_weekday[self.rep_stamp.weekday()] + return _maybe_add_count( + 'W-{day}'.format(day=day), days / 7) + else: + return _maybe_add_count('D', days) + + if self._is_business_daily(): + return 'B' + + wom_rule = self._get_wom_rule() + if wom_rule: + return wom_rule + + def _get_annual_rule(self): + if len(self.ydiffs) > 1: + return None + + if len(unique(self.fields['M'])) > 1: + return None + + pos_check = self.month_position_check() + return {'cs': 'AS', 'bs': 'BAS', + 'ce': 'A', 'be': 'BA'}.get(pos_check) + + def _get_quarterly_rule(self): + if len(self.mdiffs) > 1: + return None + + if not self.mdiffs[0] % 3 == 0: + return None + + pos_check = self.month_position_check() + return {'cs': 'QS', 'bs': 'BQS', + 'ce': 'Q', 'be': 'BQ'}.get(pos_check) + + def _get_monthly_rule(self): + if len(self.mdiffs) > 1: + return None + pos_check = self.month_position_check() + return {'cs': 'MS', 'bs': 'BMS', + 'ce': 'M', 'be': 'BM'}.get(pos_check) + + def _is_business_daily(self): + # quick check: cannot be business daily + if self.day_deltas != [1, 3]: + return False + + # probably business daily, but need to confirm + first_weekday = self.index[0].weekday() + shifts = np.diff(self.index.asi8) + shifts = np.floor_divide(shifts, _ONE_DAY) + weekdays = np.mod(first_weekday + np.cumsum(shifts), 7) + return np.all(((weekdays == 0) & (shifts == 3)) | + ((weekdays > 0) & (weekdays <= 4) & (shifts == 1))) + + def _get_wom_rule(self): + # wdiffs = unique(np.diff(self.index.week)) + # We also need -47, -49, -48 to catch index spanning year boundary + # if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all(): + # return None + + weekdays = unique(self.index.weekday) + if len(weekdays) > 1: + return None + + week_of_months = unique((self.index.day - 1) // 7) + # Only attempt to infer up to WOM-4. See #9425 + week_of_months = week_of_months[week_of_months < 4] + if len(week_of_months) == 0 or len(week_of_months) > 1: + return None + + # get which week + week = week_of_months[0] + 1 + wd = int_to_weekday[weekdays[0]] + + return 'WOM-{week}{weekday}'.format(week=week, weekday=wd) + + +class _TimedeltaFrequencyInferer(_FrequencyInferer): + + def _infer_daily_rule(self): + if self.is_unique: + days = self.deltas[0] / _ONE_DAY + if days % 7 == 0: + # Weekly + wd = int_to_weekday[self.rep_stamp.weekday()] + alias = 'W-{weekday}'.format(weekday=wd) + return _maybe_add_count(alias, days / 7) + else: + return _maybe_add_count('D', days) + + +def _is_multiple(us, mult): + return us % mult == 0 + + +def _maybe_add_count(base, count): + if count != 1: + assert count == int(count) + count = int(count) + return '{count}{base}'.format(count=count, base=base) + else: + return base
Discussed briefly, FrequencyInferer doesn't benefit much from cython, isn't needed elsewhere in tslibs, and with this move loses the dependency on khash.
https://api.github.com/repos/pandas-dev/pandas/pulls/21992
2018-07-20T14:29:44Z
2018-07-25T10:12:04Z
2018-07-25T10:12:04Z
2018-07-26T16:23:22Z
allow using Iterable in Series and DataFrame constructor
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 06498b28cb77b..8751e882b825b 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -179,7 +179,7 @@ Other Enhancements - :class:`IntervalIndex` has gained the :meth:`~IntervalIndex.set_closed` method to change the existing ``closed`` value (:issue:`21670`) - :func:`~DataFrame.to_csv` and :func:`~DataFrame.to_json` now support ``compression='infer'`` to infer compression based on filename (:issue:`15008`) - :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`) -- +- :class:`Series` and :class:`DataFrame` now support :class:`Iterable` in constructor (:issue:`2193`) .. _whatsnew_0240.api_breaking: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 078e176ff2b99..16332738ce610 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -16,7 +16,6 @@ import collections import itertools import sys -import types import warnings from textwrap import dedent @@ -75,7 +74,8 @@ from pandas.core.arrays import Categorical, ExtensionArray import pandas.core.algorithms as algorithms from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u, - OrderedDict, raise_with_traceback) + OrderedDict, raise_with_traceback, + string_and_binary_types) from pandas import compat from pandas.compat import PY36 from pandas.compat.numpy import function as nv @@ -267,7 +267,7 @@ class DataFrame(NDFrame): Parameters ---------- - data : numpy ndarray (structured or homogeneous), dict, or DataFrame + data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame Dict can contain Series, arrays, constants, or list-like objects .. versionchanged :: 0.23.0 @@ -391,8 +391,11 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, else: mgr = self._init_ndarray(data, index, columns, dtype=dtype, copy=copy) - elif isinstance(data, (list, types.GeneratorType)): - if isinstance(data, types.GeneratorType): + + # For data is list-like, or Iterable (will consume into list) + elif (isinstance(data, collections.Iterable) + and not isinstance(data, string_and_binary_types)): + if not isinstance(data, collections.Sequence): data = list(data) if len(data) > 0: if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1: @@ -417,8 +420,6 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, copy=copy) else: mgr = self._init_dict({}, index, columns, dtype=dtype) - elif isinstance(data, collections.Iterator): - raise TypeError("data argument can't be an iterator") else: try: arr = np.array(data, dtype=dtype, copy=copy) diff --git a/pandas/core/series.py b/pandas/core/series.py index d4c11b19082ab..08b77c505463e 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -6,7 +6,7 @@ # pylint: disable=E1101,E1103 # pylint: disable=W0703,W0622,W0613,W0201 -import types +import collections import warnings from textwrap import dedent @@ -144,7 +144,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame): Parameters ---------- - data : array-like, dict, or scalar value + data : array-like, Iterable, dict, or scalar value Contains data stored in Series .. versionchanged :: 0.23.0 @@ -238,12 +238,13 @@ def __init__(self, data=None, index=None, dtype=None, name=None, elif is_extension_array_dtype(data): pass - elif (isinstance(data, types.GeneratorType) or - (compat.PY3 and isinstance(data, map))): - data = list(data) elif isinstance(data, (set, frozenset)): raise TypeError("{0!r} type is unordered" "".format(data.__class__.__name__)) + # If data is Iterable but not list-like, consume into list. + elif (isinstance(data, collections.Iterable) + and not isinstance(data, collections.Sized)): + data = list(data) else: # handle sparse passed here (and force conversion) diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index bef38288ff3a5..4426d4ba8ead1 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -865,12 +865,6 @@ def test_constructor_more(self): dm = DataFrame(index=np.arange(10)) assert dm.values.shape == (10, 0) - # corner, silly - # TODO: Fix this Exception to be better... - with tm.assert_raises_regex(ValueError, 'constructor not ' - 'properly called'): - DataFrame((1, 2, 3)) - # can't cast mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1) with tm.assert_raises_regex(ValueError, 'cast'): @@ -953,6 +947,17 @@ def __len__(self, n): array.array('i', range(10))]) tm.assert_frame_equal(result, expected, check_dtype=False) + def test_constructor_iterable(self): + # GH 21987 + class Iter(): + def __iter__(self): + for i in range(10): + yield [1, 2, 3] + + expected = DataFrame([[1, 2, 3]] * 10) + result = DataFrame(Iter()) + tm.assert_frame_equal(result, expected) + def test_constructor_iterator(self): expected = DataFrame([list(range(10)), list(range(10))]) @@ -1374,10 +1379,6 @@ def test_constructor_miscast_na_int_dtype(self): expected = DataFrame([[np.nan, 1], [1, 0]]) tm.assert_frame_equal(df, expected) - def test_constructor_iterator_failure(self): - with tm.assert_raises_regex(TypeError, 'iterator'): - DataFrame(iter([1, 2, 3])) - def test_constructor_column_duplicates(self): # it works! #2079 df = DataFrame([[8, 5]], columns=['a', 'a']) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index e95e41bbdeefa..145682e5be863 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -156,12 +156,29 @@ def test_constructor_series(self): assert_series_equal(s2, s1.sort_index()) - def test_constructor_iterator(self): + def test_constructor_iterable(self): + # GH 21987 + class Iter(): + def __iter__(self): + for i in range(10): + yield i + expected = Series(list(range(10)), dtype='int64') + result = Series(Iter(), dtype='int64') + assert_series_equal(result, expected) + + def test_constructor_sequence(self): + # GH 21987 expected = Series(list(range(10)), dtype='int64') result = Series(range(10), dtype='int64') assert_series_equal(result, expected) + def test_constructor_single_str(self): + # GH 21987 + expected = Series(['abc']) + result = Series('abc') + assert_series_equal(result, expected) + def test_constructor_list_like(self): # make sure that we are coercing different
- [X] closes #2193 - [x] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry --- @TomAugspurger Hope you can review this. BTW, perhpas you may want to change `Iterable` in `is_list_like()` to `Sequence`. ```python gen = (i for i in range(10)) pandas.core.dtypes.inference.is_list_like(gen) # True len(gen) # Traceback (most recent call last): # File "<stdin>", line 1, in <module> # TypeError: object of type 'generator' has no len() gen[0] # Traceback (most recent call last): # File "<stdin>", line 1, in <module> # TypeError: 'generator' object is not subscriptable ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21987
2018-07-20T08:00:50Z
2018-07-26T13:00:55Z
2018-07-26T13:00:55Z
2018-07-26T13:01:09Z
CLN: Remove unused variables
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 4584e4694cdc5..204e800b932a9 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -348,7 +348,6 @@ def __init__(self, values, categories=None, ordered=None, dtype=None, " or `ordered`.") categories = dtype.categories - ordered = dtype.ordered elif is_categorical(values): # If no "dtype" was passed, use the one from "values", but honor diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 5ecc79e030f56..ad01d4ec9b3ca 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -401,7 +401,6 @@ def from_tuples(cls, data, closed='right', copy=False, dtype=None): msg = ('{name}.from_tuples received an invalid ' 'item, {tpl}').format(name=name, tpl=d) raise TypeError(msg) - lhs, rhs = d left.append(lhs) right.append(rhs) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 376700f1418f6..edf341ae2898f 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1084,7 +1084,8 @@ def rename(self, *args, **kwargs): level = kwargs.pop('level', None) axis = kwargs.pop('axis', None) if axis is not None: - axis = self._get_axis_number(axis) + # Validate the axis + self._get_axis_number(axis) if kwargs: raise TypeError('rename() got an unexpected keyword ' @@ -5299,6 +5300,12 @@ def __copy__(self, deep=True): return self.copy(deep=deep) def __deepcopy__(self, memo=None): + """ + Parameters + ---------- + memo, default None + Standard signature. Unused + """ if memo is None: memo = {} return self.copy(deep=True) diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 38ac144ac6c95..ba04ff3a3d3ee 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -582,7 +582,6 @@ def _transform(self, result, values, comp_ids, transform_func, elif values.ndim > 2: for i, chunk in enumerate(values.transpose(2, 0, 1)): - chunk = chunk.squeeze() transform_func(result[:, :, i], values, comp_ids, is_datetimelike, **kwargs) else: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index f09fe8c8abdcf..8ad058c001bba 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -993,6 +993,12 @@ def __copy__(self, **kwargs): return self.copy(**kwargs) def __deepcopy__(self, memo=None): + """ + Parameters + ---------- + memo, default None + Standard signature. Unused + """ if memo is None: memo = {} return self.copy(deep=True) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index d76a7ef00f625..ab180a13ab4f3 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -133,7 +133,7 @@ def _create_from_codes(self, codes, categories=None, ordered=None, if name is None: name = self.name cat = Categorical.from_codes(codes, categories=categories, - ordered=self.ordered) + ordered=ordered) return CategoricalIndex(cat, name=name) @classmethod diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 246bd3d541b72..0b467760d82d9 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -939,7 +939,6 @@ def _format_data(self, name=None): summary = '[{head} ... {tail}]'.format( head=', '.join(head), tail=', '.join(tail)) else: - head = [] tail = [formatter(x) for x in self] summary = '[{tail}]'.format(tail=', '.join(tail)) diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index ffa2267dd6877..0f3ffb8055330 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -1248,7 +1248,7 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): if fill_tuple is None: fill_value = self.fill_value new_values = algos.take_nd(values, indexer, axis=axis, - allow_fill=False) + allow_fill=False, fill_value=fill_value) else: fill_value = fill_tuple[0] new_values = algos.take_nd(values, indexer, axis=axis, @@ -2699,7 +2699,6 @@ def _try_coerce_args(self, values, other): values_mask = isna(values) values = values.view('i8') - other_mask = False if isinstance(other, bool): raise TypeError @@ -2872,11 +2871,9 @@ def _try_coerce_args(self, values, other): values_mask = _block_shape(isna(values), ndim=self.ndim) # asi8 is a view, needs copy values = _block_shape(values.asi8, ndim=self.ndim) - other_mask = False if isinstance(other, ABCSeries): other = self._holder(other) - other_mask = isna(other) if isinstance(other, bool): raise TypeError diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index 32fd70bcf654d..f44fb4f6e9e14 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -479,7 +479,9 @@ def nanvar(values, axis=None, skipna=True, ddof=1): @disallow('M8', 'm8') def nansem(values, axis=None, skipna=True, ddof=1): - var = nanvar(values, axis, skipna, ddof=ddof) + # This checks if non-numeric-like data is passed with numeric_only=False + # and raises a TypeError otherwise + nanvar(values, axis, skipna, ddof=ddof) mask = isna(values) if not is_float_dtype(values.dtype): @@ -635,7 +637,6 @@ def nankurt(values, axis=None, skipna=True): adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3)) numer = count * (count + 1) * (count - 1) * m4 denom = (count - 2) * (count - 3) * m2**2 - result = numer / denom - adj # floating point error # diff --git a/pandas/core/series.py b/pandas/core/series.py index 08b77c505463e..8f9fe5ee516e6 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2052,7 +2052,6 @@ def dot(self, other): lvals = left.values rvals = right.values else: - left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: @@ -2480,7 +2479,8 @@ def sort_values(self, axis=0, ascending=True, inplace=False, dtype: object """ inplace = validate_bool_kwarg(inplace, 'inplace') - axis = self._get_axis_number(axis) + # Validate the axis parameter + self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: @@ -2652,7 +2652,8 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, # TODO: this can be combined with DataFrame.sort_index impl as # almost identical inplace = validate_bool_kwarg(inplace, 'inplace') - axis = self._get_axis_number(axis) + # Validate the axis parameter + self._get_axis_number(axis) index = self.index if level is not None: @@ -3073,7 +3074,8 @@ def _gotitem(self, key, ndim, subset=None): versionadded='.. versionadded:: 0.20.0', **_shared_doc_kwargs)) def aggregate(self, func, axis=0, *args, **kwargs): - axis = self._get_axis_number(axis) + # Validate the axis parameter + self._get_axis_number(axis) result, how = self._aggregate(func, *args, **kwargs) if result is None: @@ -3919,8 +3921,8 @@ def dropna(self, axis=0, inplace=False, **kwargs): if kwargs: raise TypeError('dropna() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) - - axis = self._get_axis_number(axis or 0) + # Validate the axis parameter + self._get_axis_number(axis or 0) if self._can_hold_na: result = remove_na_arraylike(self) diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 5cb9f4744cc58..58e3001bcfe6a 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -597,7 +597,6 @@ def _combine_match_index(self, other, func, level=None): new_data[col] = func(series.values, other.values) # fill_value is a function of our operator - fill_value = None if isna(other.fill_value) or isna(self.default_fill_value): fill_value = np.nan else: diff --git a/pandas/core/window.py b/pandas/core/window.py index f3b4aaa74ec6b..eed0e97f30dc9 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -933,7 +933,8 @@ class _Rolling_and_Expanding(_Rolling): def count(self): blocks, obj, index = self._create_blocks() - index, indexi = self._get_index(index=index) + # Validate the index + self._get_index(index=index) window = self._get_window() window = min(window, len(obj)) if not self.center else window diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index f69e4a484d177..c6ca59aa08bf9 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -495,8 +495,6 @@ def _chk_truncate(self): frame.iloc[:, -col_num:]), axis=1) self.tr_col_num = col_num if truncate_v: - if max_rows_adj == 0: - row_num = len(frame) if max_rows_adj == 1: row_num = max_rows frame = frame.iloc[:max_rows, :] diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 20be903f54967..3ea5cb95b9c5a 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -222,7 +222,6 @@ def _column_header(): return row self.write('<thead>', indent) - row = [] indent += self.indent_delta diff --git a/pandas/io/formats/terminal.py b/pandas/io/formats/terminal.py index 52262ea05bf96..dcd6f2cf4a718 100644 --- a/pandas/io/formats/terminal.py +++ b/pandas/io/formats/terminal.py @@ -67,7 +67,7 @@ def is_terminal(): def _get_terminal_size_windows(): - res = None + try: from ctypes import windll, create_string_buffer diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 3ec5e8d9be955..629e00ebfa7d0 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -547,7 +547,7 @@ def _get_object_parser(self, json): if typ == 'series' or obj is None: if not isinstance(dtype, bool): - dtype = dict(data=dtype) + kwargs['dtype'] = dtype obj = SeriesParser(json, **kwargs).parse() return obj diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index 52b25898fc67e..14e7ad9682db6 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -181,10 +181,6 @@ def _parse_float_vec(vec): # number sans exponent ieee1 = xport1 & 0x00ffffff - # Get the second half of the ibm number into the second half of - # the ieee number - ieee2 = xport2 - # The fraction bit to the left of the binary point in the ieee # format was set and the number was shifted 0, 1, 2, or 3 # places. This will tell us how to adjust the ibm exponent to be a diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index 0522d7e721b65..96e7532747c78 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -86,7 +86,6 @@ def _maybe_resample(series, ax, kwargs): freq = ax_freq elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): _upsample_others(ax, freq, kwargs) - ax_freq = freq else: # pragma: no cover raise ValueError('Incompatible frequency conversion') return freq, series diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index bcbac4400c953..d6e7c644cc780 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -642,6 +642,13 @@ def test_series_from_json_precise_float(self): result = read_json(s.to_json(), typ='series', precise_float=True) assert_series_equal(result, s, check_index_type=False) + def test_series_with_dtype(self): + # GH 21986 + s = Series([4.56, 4.56, 4.56]) + result = read_json(s.to_json(), typ='series', dtype=np.int64) + expected = Series([4] * 3) + assert_series_equal(result, expected) + def test_frame_from_json_precise_float(self): df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]]) result = read_json(df.to_json(), precise_float=True)
Breaking up #21974. Removes non-noqa, seemingly non-controversial, unused local variables according to PyCharm. These are mostly redefined elsewhere or not used. I added some TODO comments about other unused local variables that seem misused.
https://api.github.com/repos/pandas-dev/pandas/pulls/21986
2018-07-20T04:59:05Z
2018-07-29T15:32:50Z
2018-07-29T15:32:50Z
2018-07-29T18:16:26Z
CLN: Unreachable code, Boolean comparison, duplicate functions
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 0725bbeb6c36d..b51b41614bc49 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -142,7 +142,7 @@ def time_frame_nth(self, dtype): def time_series_nth_any(self, dtype): self.df['values'].groupby(self.df['key']).nth(0, dropna='any') - def time_groupby_nth_all(self, dtype): + def time_series_nth_all(self, dtype): self.df['values'].groupby(self.df['key']).nth(0, dropna='all') def time_series_nth(self, dtype): diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 2179999859dbb..68698f45d5623 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -390,7 +390,7 @@ def get_blkno_indexers(int64_t[:] blknos, bint group=True): start = 0 cur_blkno = blknos[start] - if group == False: + if group is False: for i in range(1, n): if blknos[i] != cur_blkno: yield cur_blkno, slice(start, i) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 4d8e57820f29d..c5cb507e729f1 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -315,7 +315,6 @@ def __contains__(self, key): return True except Exception: return False - return False contains = __contains__
Breaking up #21974. This batch is: - Unreachable code - Redefined function (in asv) - Boolean equality comparison
https://api.github.com/repos/pandas-dev/pandas/pulls/21985
2018-07-20T04:44:22Z
2018-07-20T19:46:22Z
2018-07-20T19:46:22Z
2018-07-20T20:11:32Z
[CLN] Collect __repr__ and rendering methods together
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4578d2ac08199..f1a049b1f62fe 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -587,6 +587,9 @@ def shape(self): """ return len(self.index), len(self.columns) + # ---------------------------------------------------------------------- + # Rendering Methods + def _repr_fits_vertical_(self): """ Check length against max_rows. @@ -723,6 +726,60 @@ def style(self): from pandas.io.formats.style import Styler return Styler(self) + @Substitution(header='Write out the column names. If a list of strings ' + 'is given, it is assumed to be aliases for the ' + 'column names') + @Substitution(shared_params=fmt.common_docstring, + returns=fmt.return_docstring) + def to_string(self, buf=None, columns=None, col_space=None, header=True, + index=True, na_rep='NaN', formatters=None, float_format=None, + sparsify=None, index_names=True, justify=None, + line_width=None, max_rows=None, max_cols=None, + show_dimensions=False): + """ + Render a DataFrame to a console-friendly tabular output. + + %(shared_params)s + line_width : int, optional + Width to wrap a line in characters. + + %(returns)s + + See Also + -------- + to_html : Convert DataFrame to HTML. + + Examples + -------- + >>> d = {'col1' : [1, 2, 3], 'col2' : [4, 5, 6]} + >>> df = pd.DataFrame(d) + >>> print(df.to_string()) + col1 col2 + 0 1 4 + 1 2 5 + 2 3 6 + """ + + formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, + col_space=col_space, na_rep=na_rep, + formatters=formatters, + float_format=float_format, + sparsify=sparsify, justify=justify, + index_names=index_names, + header=header, index=index, + line_width=line_width, + max_rows=max_rows, + max_cols=max_cols, + show_dimensions=show_dimensions) + formatter.to_string() + + if buf is None: + result = formatter.buf.getvalue() + return result + + # ---------------------------------------------------------------------- + # Iteration + def iteritems(self): """ Iterator over (column name, Series) pairs. @@ -854,6 +911,8 @@ def itertuples(self, index=True, name="Pandas"): items = iteritems + # ---------------------------------------------------------------------- + def __len__(self): """Returns length of info axis, but here we use the index """ return len(self.index) @@ -2005,57 +2064,6 @@ def to_parquet(self, fname, engine='auto', compression='snappy', to_parquet(self, fname, engine, compression=compression, **kwargs) - @Substitution(header='Write out the column names. If a list of strings ' - 'is given, it is assumed to be aliases for the ' - 'column names') - @Substitution(shared_params=fmt.common_docstring, - returns=fmt.return_docstring) - def to_string(self, buf=None, columns=None, col_space=None, header=True, - index=True, na_rep='NaN', formatters=None, float_format=None, - sparsify=None, index_names=True, justify=None, - line_width=None, max_rows=None, max_cols=None, - show_dimensions=False): - """ - Render a DataFrame to a console-friendly tabular output. - - %(shared_params)s - line_width : int, optional - Width to wrap a line in characters. - - %(returns)s - - See Also - -------- - to_html : Convert DataFrame to HTML. - - Examples - -------- - >>> d = {'col1' : [1, 2, 3], 'col2' : [4, 5, 6]} - >>> df = pd.DataFrame(d) - >>> print(df.to_string()) - col1 col2 - 0 1 4 - 1 2 5 - 2 3 6 - """ - - formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns, - col_space=col_space, na_rep=na_rep, - formatters=formatters, - float_format=float_format, - sparsify=sparsify, justify=justify, - index_names=index_names, - header=header, index=index, - line_width=line_width, - max_rows=max_rows, - max_cols=max_cols, - show_dimensions=show_dimensions) - formatter.to_string() - - if buf is None: - result = formatter.buf.getvalue() - return result - @Substitution(header='whether to print column labels, default True') @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 610bcf5d1d6c4..504ea46d2ee6d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -150,17 +150,6 @@ def is_copy(self, msg): "in a future version.", FutureWarning, stacklevel=2) self._is_copy = msg - def _repr_data_resource_(self): - """ - Not a real Jupyter special repr method, but we use the same - naming convention. - """ - if config.get_option("display.html.table_schema"): - data = self.head(config.get_option('display.max_rows')) - payload = json.loads(data.to_json(orient='table'), - object_pairs_hook=collections.OrderedDict) - return payload - def _validate_dtype(self, dtype): """ validate the passed dtype """ @@ -202,12 +191,6 @@ def _constructor(self): """ raise com.AbstractMethodError(self) - def __unicode__(self): - # unicode representation based upon iterating over self - # (since, by definition, `PandasContainers` are iterable) - prepr = '[%s]' % ','.join(map(pprint_thing, self)) - return '%s(%s)' % (self.__class__.__name__, prepr) - def _dir_additions(self): """ add the string-like attributes from the info_axis. If info_axis is a MultiIndex, it's first level values are used. @@ -230,6 +213,36 @@ def _constructor_expanddim(self): """ raise NotImplementedError + # ---------------------------------------------------------------------- + # Rendering Methods + + def __unicode__(self): + # unicode representation based upon iterating over self + # (since, by definition, `PandasContainers` are iterable) + prepr = '[%s]' % ','.join(map(pprint_thing, self)) + return '%s(%s)' % (self.__class__.__name__, prepr) + + def _repr_data_resource_(self): + """ + Not a real Jupyter special repr method, but we use the same + naming convention. + """ + if config.get_option("display.html.table_schema"): + data = self.head(config.get_option('display.max_rows')) + payload = json.loads(data.to_json(orient='table'), + object_pairs_hook=collections.OrderedDict) + return payload + + def _repr_latex_(self): + """ + Returns a LaTeX representation for a particular object. + Mainly for use with nbconvert (jupyter notebook conversion to pdf). + """ + if config.get_option('display.latex.repr'): + return self.to_latex() + else: + return None + # ---------------------------------------------------------------------- # Axis @@ -1577,15 +1590,11 @@ def _drop_labels_or_levels(self, keys, axis=0): # ---------------------------------------------------------------------- # Iteration - def __hash__(self): - raise TypeError('{0!r} objects are mutable, thus they cannot be' - ' hashed'.format(self.__class__.__name__)) - def __iter__(self): """Iterate over infor axis""" return iter(self._info_axis) - # can we get a better explanation of this? + # TODO: can we get a better explanation of this? def keys(self): """Get the 'info axis' (see Indexing for more) @@ -1603,6 +1612,12 @@ def iteritems(self): for h in self._info_axis: yield h, self[h] + # ---------------------------------------------------------------------- + + def __hash__(self): + raise TypeError('{0!r} objects are mutable, thus they cannot be' + ' hashed'.format(self.__class__.__name__)) + def __len__(self): """Returns length of info axis""" return len(self._info_axis) @@ -1759,19 +1774,6 @@ def __setstate__(self, state): self._item_cache = {} - # ---------------------------------------------------------------------- - # IO - - def _repr_latex_(self): - """ - Returns a LaTeX representation for a particular object. - Mainly for use with nbconvert (jupyter notebook conversion to pdf). - """ - if config.get_option('display.latex.repr'): - return self.to_latex() - else: - return None - # ---------------------------------------------------------------------- # I/O Methods diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 83b70baf4065b..0067fc8aaf94c 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1002,6 +1002,9 @@ def _validate_names(self, name=None, names=None, deep=False): return [name] return name + # ---------------------------------------------------------------------- + # Rendering Methods + def __unicode__(self): """ Return a string representation for this object. @@ -1061,6 +1064,90 @@ def _format_attrs(self): """ return format_object_attrs(self) + def _mpl_repr(self): + # how to represent ourselves to matplotlib + return self.values + + def format(self, name=False, formatter=None, **kwargs): + """ + Render a string representation of the Index + """ + header = [] + if name: + header.append(pprint_thing(self.name, + escape_chars=('\t', '\r', '\n')) if + self.name is not None else '') + + if formatter is not None: + return header + list(self.map(formatter)) + + return self._format_with_header(header, **kwargs) + + def _format_with_header(self, header, na_rep='NaN', **kwargs): + values = self.values + + from pandas.io.formats.format import format_array + + if is_categorical_dtype(values.dtype): + values = np.array(values) + + elif is_object_dtype(values.dtype): + values = lib.maybe_convert_objects(values, safe=1) + + if is_object_dtype(values.dtype): + result = [pprint_thing(x, escape_chars=('\t', '\r', '\n')) + for x in values] + + # could have nans + mask = isna(values) + if mask.any(): + result = np.array(result) + result[mask] = na_rep + result = result.tolist() + + else: + result = _trim_front(format_array(values, None, justify='left')) + return header + result + + def _format_native_types(self, na_rep='', quoting=None, **kwargs): + """ actually format my specific types """ + mask = isna(self) + if not self.is_object() and not quoting: + values = np.asarray(self).astype(str) + else: + values = np.array(self, dtype=object, copy=True) + + values[mask] = na_rep + return values + + def to_native_types(self, slicer=None, **kwargs): + """ + Format specified values of `self` and return them. + + Parameters + ---------- + slicer : int, array-like + An indexer into `self` that specifies which values + are used in the formatting process. + kwargs : dict + Options for specifying how the values should be formatted. + These options include the following: + + 1) na_rep : str + The value that serves as a placeholder for NULL values + 2) quoting : bool or None + Whether or not there are quoted values in `self` + 3) date_format : str + The format used to represent date-like values + """ + + values = self + if slicer is not None: + values = values[slicer] + return values._format_native_types(**kwargs) + + # ---------------------------------------------------------------------- + def to_series(self, index=None, name=None): """ Create a Series with both index and values equal to the index keys @@ -1374,10 +1461,6 @@ def summary(self, name=None): "future version.", FutureWarning, stacklevel=2) return self._summary(name) - def _mpl_repr(self): - # how to represent ourselves to matplotlib - return self.values - _na_value = np.nan """The expected NA value to use with this index.""" @@ -1869,6 +1952,9 @@ def is_all_dates(self): return False return is_datetime_array(ensure_object(self.values)) + # ---------------------------------------------------------------------- + # Picklability + def __reduce__(self): d = dict(data=self._data) d.update(self._get_attributes_dict()) @@ -1901,6 +1987,8 @@ def __setstate__(self, state): _unpickle_compat = __setstate__ + # ---------------------------------------------------------------------- + def __nonzero__(self): raise ValueError("The truth value of a {0} is ambiguous. " "Use a.empty, a.bool(), a.item(), a.any() or a.all()." @@ -2254,84 +2342,6 @@ def putmask(self, mask, value): # coerces to object return self.astype(object).putmask(mask, value) - def format(self, name=False, formatter=None, **kwargs): - """ - Render a string representation of the Index - """ - header = [] - if name: - header.append(pprint_thing(self.name, - escape_chars=('\t', '\r', '\n')) if - self.name is not None else '') - - if formatter is not None: - return header + list(self.map(formatter)) - - return self._format_with_header(header, **kwargs) - - def _format_with_header(self, header, na_rep='NaN', **kwargs): - values = self.values - - from pandas.io.formats.format import format_array - - if is_categorical_dtype(values.dtype): - values = np.array(values) - - elif is_object_dtype(values.dtype): - values = lib.maybe_convert_objects(values, safe=1) - - if is_object_dtype(values.dtype): - result = [pprint_thing(x, escape_chars=('\t', '\r', '\n')) - for x in values] - - # could have nans - mask = isna(values) - if mask.any(): - result = np.array(result) - result[mask] = na_rep - result = result.tolist() - - else: - result = _trim_front(format_array(values, None, justify='left')) - return header + result - - def to_native_types(self, slicer=None, **kwargs): - """ - Format specified values of `self` and return them. - - Parameters - ---------- - slicer : int, array-like - An indexer into `self` that specifies which values - are used in the formatting process. - kwargs : dict - Options for specifying how the values should be formatted. - These options include the following: - - 1) na_rep : str - The value that serves as a placeholder for NULL values - 2) quoting : bool or None - Whether or not there are quoted values in `self` - 3) date_format : str - The format used to represent date-like values - """ - - values = self - if slicer is not None: - values = values[slicer] - return values._format_native_types(**kwargs) - - def _format_native_types(self, na_rep='', quoting=None, **kwargs): - """ actually format my specific types """ - mask = isna(self) - if not self.is_object() and not quoting: - values = np.asarray(self).astype(str) - else: - values = np.array(self, dtype=object, copy=True) - - values[mask] = na_rep - return values - def equals(self, other): """ Determines if two Index objects contain the same elements. diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index a03e478f81caf..0706996dcac79 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -261,6 +261,9 @@ def equals(self, other): return False + # ---------------------------------------------------------------------- + # Rendering Methods + @property def _formatter_func(self): return self.categories._formatter_func @@ -284,6 +287,8 @@ def _format_attrs(self): attrs.append(('length', len(self))) return attrs + # ---------------------------------------------------------------------- + @property def inferred_type(self): return 'categorical' diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 7257be421c3e1..f5b16b80cc4c9 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -682,6 +682,18 @@ def _cached_range(cls, start=None, end=None, periods=None, freq=None, return indexSlice + # ---------------------------------------------------------------------- + # Rendering Methods + + def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs): + from pandas.io.formats.format import _get_format_datetime64_from_values + format = _get_format_datetime64_from_values(self, date_format) + + return libts.format_array_from_datetime(self.asi8, + tz=self.tz, + format=format, + na_rep=na_rep) + def _mpl_repr(self): # how to represent ourselves to matplotlib return libts.ints_to_pydatetime(self.asi8, self.tz) @@ -698,6 +710,9 @@ def _formatter_func(self): formatter = _get_format_datetime64(is_dates_only=self._is_dates_only) return lambda x: "'%s'" % formatter(x, tz=self.tz) + # ---------------------------------------------------------------------- + # Picklability + def __reduce__(self): # we use a special reudce here because we need @@ -740,6 +755,8 @@ def __setstate__(self, state): raise Exception("invalid pickle state") _unpickle_compat = __setstate__ + # ---------------------------------------------------------------------- + def _maybe_update_attributes(self, attrs): """ Update Index attributes (e.g. freq) depending on op """ freq = attrs.get('freq', None) @@ -748,15 +765,6 @@ def _maybe_update_attributes(self, attrs): attrs['freq'] = 'infer' return attrs - def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs): - from pandas.io.formats.format import _get_format_datetime64_from_values - format = _get_format_datetime64_from_values(self, date_format) - - return libts.format_array_from_datetime(self.asi8, - tz=self.tz, - format=format, - na_rep=na_rep) - @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): dtype = pandas_dtype(dtype) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index e92f980caf3dc..5628b02d20df0 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -899,7 +899,8 @@ def __getitem__(self, value): return self._shallow_copy(left, right) - # __repr__ associated methods are based on MultiIndex + # ---------------------------------------------------------------------- + # Rendering Methods def _format_with_header(self, header, **kwargs): return header + list(self._format_native_types(**kwargs)) @@ -956,6 +957,8 @@ def _format_space(self): space = ' ' * (len(self.__class__.__name__) + 1) return "\n{space}".format(space=space) + # ---------------------------------------------------------------------- + def argsort(self, *args, **kwargs): return np.lexsort((self.right, self.left)) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 0d4ceb2783bad..daf95b04b3450 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -607,6 +607,69 @@ def _nbytes(self, deep=False): result += self._engine.sizeof(deep=deep) return result + # ---------------------------------------------------------------------- + # Rendering Methods + + def format(self, space=2, sparsify=None, adjoin=True, names=False, + na_rep=None, formatter=None): + if len(self) == 0: + return [] + + stringified_levels = [] + for lev, lab in zip(self.levels, self.labels): + na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type) + + if len(lev) > 0: + + formatted = lev.take(lab).format(formatter=formatter) + + # we have some NA + mask = lab == -1 + if mask.any(): + formatted = np.array(formatted, dtype=object) + formatted[mask] = na + formatted = formatted.tolist() + + else: + # weird all NA case + formatted = [pprint_thing(na if isna(x) else x, + escape_chars=('\t', '\r', '\n')) + for x in algos.take_1d(lev._values, lab)] + stringified_levels.append(formatted) + + result_levels = [] + for lev, name in zip(stringified_levels, self.names): + level = [] + + if names: + level.append(pprint_thing(name, + escape_chars=('\t', '\r', '\n')) + if name is not None else '') + + level.extend(np.array(lev, dtype=object)) + result_levels.append(level) + + if sparsify is None: + sparsify = get_option("display.multi_sparse") + + if sparsify: + sentinel = '' + # GH3547 + # use value of sparsify as sentinel, unless it's an obvious + # "Truthey" value + if sparsify not in [True, 1]: + sentinel = sparsify + # little bit of a kludge job for #1217 + result_levels = _sparsify(result_levels, start=int(names), + sentinel=sentinel) + + if adjoin: + from pandas.io.formats.format import _get_adjustment + adj = _get_adjustment() + return adj.adjoin(space, *result_levels).split('\n') + else: + return result_levels + def _format_attrs(self): """ Return a list of tuples of the (attr,formatted_value) @@ -629,6 +692,31 @@ def _format_data(self, name=None): # we are formatting thru the attributes return None + def _format_native_types(self, na_rep='nan', **kwargs): + new_levels = [] + new_labels = [] + + # go through the levels and format them + for level, label in zip(self.levels, self.labels): + level = level._format_native_types(na_rep=na_rep, **kwargs) + # add nan values, if there are any + mask = (label == -1) + if mask.any(): + nan_index = len(level) + level = np.append(level, na_rep) + label = label.values() + label[mask] = nan_index + new_levels.append(level) + new_labels.append(label) + + # reconstruct the multi-index + mi = MultiIndex(levels=new_levels, labels=new_labels, names=self.names, + sortorder=self.sortorder, verify_integrity=False) + + return mi.values + + # ---------------------------------------------------------------------- + def __len__(self): return len(self.labels[0]) @@ -690,29 +778,6 @@ def _set_names(self, names, level=None, validate=True): names = property(fset=_set_names, fget=_get_names, doc="Names of levels in MultiIndex") - def _format_native_types(self, na_rep='nan', **kwargs): - new_levels = [] - new_labels = [] - - # go through the levels and format them - for level, label in zip(self.levels, self.labels): - level = level._format_native_types(na_rep=na_rep, **kwargs) - # add nan values, if there are any - mask = (label == -1) - if mask.any(): - nan_index = len(level) - level = np.append(level, na_rep) - label = label.values() - label[mask] = nan_index - new_levels.append(level) - new_labels.append(label) - - # reconstruct the multi-index - mi = MultiIndex(levels=new_levels, labels=new_labels, names=self.names, - sortorder=self.sortorder, verify_integrity=False) - - return mi.values - @Appender(_index_shared_docs['_get_grouper_for_level']) def _get_grouper_for_level(self, mapper, level): indexer = self.labels[level] @@ -1076,66 +1141,6 @@ def unique(self, level=None): level = self._get_level_number(level) return self._get_level_values(level=level, unique=True) - def format(self, space=2, sparsify=None, adjoin=True, names=False, - na_rep=None, formatter=None): - if len(self) == 0: - return [] - - stringified_levels = [] - for lev, lab in zip(self.levels, self.labels): - na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type) - - if len(lev) > 0: - - formatted = lev.take(lab).format(formatter=formatter) - - # we have some NA - mask = lab == -1 - if mask.any(): - formatted = np.array(formatted, dtype=object) - formatted[mask] = na - formatted = formatted.tolist() - - else: - # weird all NA case - formatted = [pprint_thing(na if isna(x) else x, - escape_chars=('\t', '\r', '\n')) - for x in algos.take_1d(lev._values, lab)] - stringified_levels.append(formatted) - - result_levels = [] - for lev, name in zip(stringified_levels, self.names): - level = [] - - if names: - level.append(pprint_thing(name, - escape_chars=('\t', '\r', '\n')) - if name is not None else '') - - level.extend(np.array(lev, dtype=object)) - result_levels.append(level) - - if sparsify is None: - sparsify = get_option("display.multi_sparse") - - if sparsify: - sentinel = '' - # GH3547 - # use value of sparsify as sentinel, unless it's an obvious - # "Truthey" value - if sparsify not in [True, 1]: - sentinel = sparsify - # little bit of a kludge job for #1217 - result_levels = _sparsify(result_levels, start=int(names), - sentinel=sentinel) - - if adjoin: - from pandas.io.formats.format import _get_adjustment - adj = _get_adjustment() - return adj.adjoin(space, *result_levels).split('\n') - else: - return result_levels - def _to_safe_for_reshape(self): """ convert to object if we are a categorical """ return self.set_levels([i._to_safe_for_reshape() for i in self.levels]) @@ -1535,6 +1540,9 @@ def levshape(self): """A tuple with the length of each level.""" return tuple(len(x) for x in self.levels) + # ---------------------------------------------------------------------- + # Picklability + def __reduce__(self): """Necessary for making this object picklable""" d = dict(levels=[lev for lev in self.levels], @@ -1563,6 +1571,8 @@ def __setstate__(self, state): self._verify_integrity() self._reset_identity() + # ---------------------------------------------------------------------- + def __getitem__(self, key): if is_scalar(key): retval = [] diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 4d8e57820f29d..3ddfd05619a84 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -387,10 +387,38 @@ def shape(self): # Avoid materializing self._values return self._ndarray_values.shape + # ---------------------------------------------------------------------- + # Rendering Methods + @property def _formatter_func(self): return lambda x: "'%s'" % x + def _mpl_repr(self): + # how to represent ourselves to matplotlib + return self.astype(object).values + + def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs): + + values = self.astype(object).values + + if date_format: + formatter = lambda dt: dt.strftime(date_format) + else: + formatter = lambda dt: u'%s' % dt + + if self.hasnans: + mask = self._isnan + values[mask] = na_rep + imask = ~mask + values[imask] = np.array([formatter(dt) for dt + in values[imask]]) + else: + values = np.array([formatter(dt) for dt in values]) + return values + + # ---------------------------------------------------------------------- + def asof_locs(self, where, mask): """ where : array of timestamps @@ -481,10 +509,6 @@ def start_time(self): def end_time(self): return self.to_timestamp(how='end') - def _mpl_repr(self): - # how to represent ourselves to matplotlib - return self.astype(object).values - def to_timestamp(self, freq=None, how='start'): """ Cast to DatetimeIndex @@ -756,25 +780,6 @@ def _apply_meta(self, rawarr): name=self.name) return rawarr - def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs): - - values = self.astype(object).values - - if date_format: - formatter = lambda dt: dt.strftime(date_format) - else: - formatter = lambda dt: u'%s' % dt - - if self.hasnans: - mask = self._isnan - values[mask] = na_rep - imask = ~mask - values[imask] = np.array([formatter(dt) for dt - in values[imask]]) - else: - values = np.array([formatter(dt) for dt in values]) - return values - def __setstate__(self, state): """Necessary for making this object picklable""" diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 939ec0b79ac6b..5c78dc5f6c771 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -185,6 +185,9 @@ def __reduce__(self): d.update(dict(self._get_data_as_items())) return ibase._new_Index, (self.__class__, d), None + # ---------------------------------------------------------------------- + # Rendering Methods + def _format_attrs(self): """ Return a list of tuples of the (attr, formatted_value) @@ -198,6 +201,8 @@ def _format_data(self, name=None): # we are formatting thru the attributes return None + # ---------------------------------------------------------------------- + @cache_readonly def nbytes(self): """ diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index dc26c9cc0c248..4802cf4498bf9 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -246,11 +246,22 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs): result._reset_identity() return result + # ---------------------------------------------------------------------- + # Rendering Methods + @property def _formatter_func(self): from pandas.io.formats.format import _get_format_timedelta64 return _get_format_timedelta64(self, box=True) + def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs): + from pandas.io.formats.format import Timedelta64Formatter + return Timedelta64Formatter(values=self, + nat_rep=na_rep, + justify='all').get_result() + + # ---------------------------------------------------------------------- + def __setstate__(self, state): """Necessary for making this object picklable""" if isinstance(state, dict): @@ -302,12 +313,6 @@ def _addsub_offset_array(self, other, op): raise TypeError("Cannot add/subtract non-tick DateOffset to {cls}" .format(cls=type(self).__name__)) - def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs): - from pandas.io.formats.format import Timedelta64Formatter - return Timedelta64Formatter(values=self, - nat_rep=na_rep, - justify='all').get_result() - days = _wrap_field_accessor("days") seconds = _wrap_field_accessor("seconds") microseconds = _wrap_field_accessor("microseconds") diff --git a/pandas/core/series.py b/pandas/core/series.py index 77445159129f2..c79df776e0951 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -476,12 +476,6 @@ def _values(self): """ return the internal repr of this data """ return self._data.internal_values() - def _formatting_values(self): - """Return the values that can be formatted (used by SeriesFormatter - and DataFrameFormatter) - """ - return self._data.formatting_values() - def get_values(self): """ same as values (but handles sparseness conversions); is a view """ return self._data.get_values() @@ -1226,6 +1220,9 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): df = self.to_frame(name) return df.reset_index(level=level, drop=drop) + # ---------------------------------------------------------------------- + # Rendering Methods + def __unicode__(self): """ Return a string representation for a particular DataFrame @@ -1301,6 +1298,15 @@ def to_string(self, buf=None, na_rep='NaN', float_format=None, header=True, with open(buf, 'w') as f: f.write(result) + def _formatting_values(self): + """Return the values that can be formatted (used by SeriesFormatter + and DataFrameFormatter) + """ + return self._data.formatting_values() + + # ---------------------------------------------------------------------- + # Iteration + def iteritems(self): """ Lazily iterate over (index, value) tuples @@ -1309,13 +1315,13 @@ def iteritems(self): items = iteritems - # ---------------------------------------------------------------------- - # Misc public methods - def keys(self): """Alias for index""" return self.index + # ---------------------------------------------------------------------- + # Misc public methods + def to_dict(self, into=dict): """ Convert Series to {label -> value} dict or dict-like object.
Motivation here is in looking at options for rendering EA subclasses. There's a decent argument to be made that `to_html` and/or `to_latex` should be included in "rendering methods", in large part because they both call `fmt.Formatter`. Holding off on that pending feedback.
https://api.github.com/repos/pandas-dev/pandas/pulls/21984
2018-07-19T22:20:54Z
2018-07-23T17:09:52Z
null
2018-07-23T17:09:52Z
ENH GH20927 Added convert_dates parameter (default=True) to read_sas()
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index b015495b095b6..2e029c8bc9341 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -458,6 +458,7 @@ I/O - :func:`read_html()` no longer ignores all-whitespace ``<tr>`` within ``<thead>`` when considering the ``skiprows`` and ``header`` arguments. Previously, users had to decrease their ``header`` and ``skiprows`` values on such tables to work around the issue. (:issue:`21641`) - :func:`read_excel()` will correctly show the deprecation warning for previously deprecated ``sheetname`` (:issue:`17994`) +- :func:`read_sas()` now has new parameter convert_dates(default=True) which is passed through to SAS7BDATReader. Setting convert_dates=False allows SAS datasets with dates larger than pd.Timestamp.max (2262-04-11 23:47:16.854775807) to be read in with all date/datetime columns as their native float64. The SAS dates can then be converted to datetime.date/datetime objects post-import, or the values capped to pd.Timestamp.max and converted with pd.to_datetime() (:issue:`20927`) - Plotting diff --git a/pandas/io/sas/sasreader.py b/pandas/io/sas/sasreader.py index b8a0bf5733158..f53482f8f6787 100644 --- a/pandas/io/sas/sasreader.py +++ b/pandas/io/sas/sasreader.py @@ -6,7 +6,7 @@ def read_sas(filepath_or_buffer, format=None, index=None, encoding=None, - chunksize=None, iterator=False): + chunksize=None, iterator=False, convert_dates=True): """ Read SAS files stored as either XPORT or SAS7BDAT format files. @@ -25,6 +25,15 @@ def read_sas(filepath_or_buffer, format=None, index=None, encoding=None, Read file `chunksize` lines at a time, returns iterator. iterator : bool, defaults to False If True, returns an iterator for reading the file incrementally. + convert_dates: bool, default to True + If True convert SAS date and datetime columns to Pandas datetime + NB. For datetimes larger than pd.Timestamp.max + '2262-04-11 23:47:16.854775807' an exception + pandas._libs.tslibs.np_datetime.OutOfBoundsDatetime is thrown + If False SAS date and datetime columns are read as their native + float64 and can be converted after the import to + datetime.datetime or datetime.date values (or high values capped + to pd.Timestamp.max and converted with pandas.to_datetime) Returns ------- @@ -58,7 +67,8 @@ def read_sas(filepath_or_buffer, format=None, index=None, encoding=None, from pandas.io.sas.sas7bdat import SAS7BDATReader reader = SAS7BDATReader(filepath_or_buffer, index=index, encoding=encoding, - chunksize=chunksize) + chunksize=chunksize, + convert_dates=convert_dates) else: raise ValueError('unknown SAS format') diff --git a/pandas/tests/io/sas/data/max_sas_date.csv b/pandas/tests/io/sas/data/max_sas_date.csv new file mode 100644 index 0000000000000..97ca2e73700c6 --- /dev/null +++ b/pandas/tests/io/sas/data/max_sas_date.csv @@ -0,0 +1,2 @@ +dt_as_float,dt_as_string,dt_as_dt,date_as_date,date_as_string,date_as_num +253717747199.999,31DEC9999:23:59:59.999,253717747199.999,2936547,9999-12-31,2936547 diff --git a/pandas/tests/io/sas/data/max_sas_date.sas7bdat b/pandas/tests/io/sas/data/max_sas_date.sas7bdat new file mode 100644 index 0000000000000..f036befde7809 Binary files /dev/null and b/pandas/tests/io/sas/data/max_sas_date.sas7bdat differ diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index 101ee3e619f5b..811cd48178885 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -188,3 +188,26 @@ def test_zero_variables(datapath): fname = datapath("io", "sas", "data", "zero_variables.sas7bdat") with pytest.raises(EmptyDataError): pd.read_sas(fname) + + +def test_max_sas_date_exception(datapath): + # GH 20927 + fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat") + with pytest.raises(pd._libs.tslibs.np_datetime.OutOfBoundsDatetime): + pd.read_sas(fname) + assert True + + +def test_max_sas_date(datapath): + # GH 20927 + fname = datapath("io", "sas", "data", "max_sas_date.sas7bdat") + df = pd.read_sas(fname, encoding='iso-8859-1', convert_dates=False) + # SAS likes to left pad strings with spaces - lstrip before comparing + str_cols = df.select_dtypes(['object']).columns + df[str_cols] = df[str_cols].apply(lambda x: x.str.lstrip(' ')) + fname = datapath("io", "sas", "data", "max_sas_date.csv") + df0 = pd.read_csv( + fname, dtype={'dt_as_float': np.float64, + 'dt_as_dt': np.float64, 'date_as_date': np.float64, + 'date_as_num': np.float64}) + tm.assert_frame_equal(df, df0)
Added parameter which is passed down to the underlying SAS7BDATReader. This allows SAS datasets with dates too large for pandas.datetime to be read in with the dates/datetimes as SAS native float64 values, rather than trying (and failing) to convert to pandas.datetime. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21983
2018-07-19T21:59:03Z
2018-11-23T03:33:46Z
null
2019-08-20T21:27:15Z
REF: No need to delegate to index check of whether an int is an int
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 8ffc7548059b7..e0b6048b2ad64 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2124,7 +2124,25 @@ def _getitem_scalar(self, key): return values def _validate_integer(self, key, axis): - # return a boolean if we have a valid integer indexer + """ + Check that 'key' is a valid position in the desired axis. + + Parameters + ---------- + key : int + Requested position + axis : int + Desired axis + + Returns + ------- + None + + Raises + ------ + IndexError + If 'key' is not a valid position in axis 'axis' + """ ax = self.obj._get_axis(axis) l = len(ax) @@ -2215,8 +2233,6 @@ def _getitem_axis(self, key, axis=None): # a single integer else: - key = self._convert_scalar_indexer(key, axis) - if not is_integer(key): raise TypeError("Cannot index by location index with a " "non-integer key") diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index 32a56aeafc6ad..ba1f1de21871f 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -50,7 +50,7 @@ def test_scalar_error(self): def f(): s.iloc[3.0] tm.assert_raises_regex(TypeError, - 'cannot do positional indexing', + 'Cannot index by location index', f) def f(): diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 81397002abd2b..3dcfe6a68ad9f 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -126,6 +126,18 @@ def test_iloc_getitem_neg_int(self): typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) + @pytest.mark.parametrize('dims', [1, 2]) + def test_iloc_getitem_invalid_scalar(self, dims): + # GH 21982 + + if dims == 1: + s = Series(np.arange(10)) + else: + s = DataFrame(np.arange(100).reshape(10, 10)) + + tm.assert_raises_regex(TypeError, 'Cannot index by location index', + lambda: s.iloc['a']) + def test_iloc_array_not_mutating_negative_indices(self): # GH 21867
- [x] tests passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Just fixing the following nonsensical error: ``` python In [2]: pd.Series(range(10)).iloc['a'] --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-2-4a32011255fe> in <module>() ----> 1 pd.Series(range(10)).iloc['a'] [...] TypeError: cannot do positional indexing on <class 'pandas.core.indexes.range.RangeIndex'> with these indexers [a] of <class 'str'> ``` (you just cannot do positional indexing with a ``str``, regardless of the index) ... and adding a docstring while I was at it.
https://api.github.com/repos/pandas-dev/pandas/pulls/21982
2018-07-19T21:04:16Z
2018-07-20T19:18:59Z
2018-07-20T19:18:59Z
2018-07-20T19:20:22Z
ENH: Implement subtraction for object-dtype Index
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index b015495b095b6..4439529faf208 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -310,6 +310,7 @@ Other API Changes - Invalid construction of ``IntervalDtype`` will now always raise a ``TypeError`` rather than a ``ValueError`` if the subdtype is invalid (:issue:`21185`) - Trying to reindex a ``DataFrame`` with a non unique ``MultiIndex`` now raises a ``ValueError`` instead of an ``Exception`` (:issue:`21770`) - :meth:`PeriodIndex.tz_convert` and :meth:`PeriodIndex.tz_localize` have been removed (:issue:`21781`) +- :class:`Index` subtraction will attempt to operate element-wise instead of raising ``TypeError`` (:issue:`19369`) .. _whatsnew_0240.deprecations: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 83b70baf4065b..3a42c7963f21b 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2630,8 +2630,10 @@ def __iadd__(self, other): return self + other def __sub__(self, other): - raise TypeError("cannot perform __sub__ with this index type: " - "{typ}".format(typ=type(self).__name__)) + return Index(np.array(self) - other) + + def __rsub__(self, other): + return Index(other - np.array(self)) def __and__(self, other): return self.intersection(other) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 7b105390db40b..754703dfc4bee 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -3,7 +3,7 @@ import pytest from datetime import datetime, timedelta - +from decimal import Decimal from collections import defaultdict import pandas.util.testing as tm @@ -864,13 +864,47 @@ def test_add(self): expected = Index(['1a', '1b', '1c']) tm.assert_index_equal('1' + index, expected) - def test_sub(self): + def test_sub_fail(self): index = self.strIndex pytest.raises(TypeError, lambda: index - 'a') pytest.raises(TypeError, lambda: index - index) pytest.raises(TypeError, lambda: index - index.tolist()) pytest.raises(TypeError, lambda: index.tolist() - index) + def test_sub_object(self): + # GH#19369 + index = pd.Index([Decimal(1), Decimal(2)]) + expected = pd.Index([Decimal(0), Decimal(1)]) + + result = index - Decimal(1) + tm.assert_index_equal(result, expected) + + result = index - pd.Index([Decimal(1), Decimal(1)]) + tm.assert_index_equal(result, expected) + + with pytest.raises(TypeError): + index - 'foo' + + with pytest.raises(TypeError): + index - np.array([2, 'foo']) + + def test_rsub_object(self): + # GH#19369 + index = pd.Index([Decimal(1), Decimal(2)]) + expected = pd.Index([Decimal(1), Decimal(0)]) + + result = Decimal(2) - index + tm.assert_index_equal(result, expected) + + result = np.array([Decimal(2), Decimal(2)]) - index + tm.assert_index_equal(result, expected) + + with pytest.raises(TypeError): + 'foo' - index + + with pytest.raises(TypeError): + np.array([True, pd.Timestamp.now()]) - index + def test_map_identity_mapping(self): # GH 12766 # TODO: replace with fixture
- [x] closes #19369 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry @jreback @jorisvandenbossche discussed briefly at the sprint. Merits more thorough testing, but I'd like to get the go-ahead to separate out arithmetic tests that are common to EA/Index/Series/Frame[1col] that are highly duplicative first.
https://api.github.com/repos/pandas-dev/pandas/pulls/21981
2018-07-19T19:13:32Z
2018-07-23T10:01:53Z
2018-07-23T10:01:53Z
2020-04-05T17:40:45Z
CLN:Unused Variables
diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 0725bbeb6c36d..b51b41614bc49 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -142,7 +142,7 @@ def time_frame_nth(self, dtype): def time_series_nth_any(self, dtype): self.df['values'].groupby(self.df['key']).nth(0, dropna='any') - def time_groupby_nth_all(self, dtype): + def time_series_nth_all(self, dtype): self.df['values'].groupby(self.df['key']).nth(0, dropna='all') def time_series_nth(self, dtype): diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 2179999859dbb..96841ec21e962 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -390,7 +390,7 @@ def get_blkno_indexers(int64_t[:] blknos, bint group=True): start = 0 cur_blkno = blknos[start] - if group == False: + if not group: for i in range(1, n): if blknos[i] != cur_blkno: yield cur_blkno, slice(start, i) diff --git a/pandas/_libs/ops.pyx b/pandas/_libs/ops.pyx index 148018ece20e2..72a0baa763c8a 100644 --- a/pandas/_libs/ops.pyx +++ b/pandas/_libs/ops.pyx @@ -260,8 +260,8 @@ def maybe_convert_bool(ndarray[object] arr, result = np.empty(n, dtype=np.uint8) # the defaults - true_vals = set(('True', 'TRUE', 'true')) - false_vals = set(('False', 'FALSE', 'false')) + true_vals = {'True', 'TRUE', 'true'} + false_vals = {'False', 'FALSE', 'false'} if true_values is not None: true_vals = true_vals | set(true_values) diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx index 7803595badee1..2caf36d24b745 100644 --- a/pandas/_libs/tslibs/frequencies.pyx +++ b/pandas/_libs/tslibs/frequencies.pyx @@ -126,7 +126,7 @@ _lite_rule_alias = { 'us': 'U', 'ns': 'N'} -_dont_uppercase = set(('MS', 'ms')) +_dont_uppercase = {'MS', 'ms'} # ---------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 2fe8fab2e2e19..20b43f9d5644b 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -23,7 +23,7 @@ from util cimport (get_nat, # ---------------------------------------------------------------------- # Constants -nat_strings = set(['NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN']) +nat_strings = {'NaT', 'nat', 'NAT', 'nan', 'NaN', 'NAN'} cdef int64_t NPY_NAT = get_nat() iNaT = NPY_NAT # python-visible constant diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 7881529f04ed3..822c1d44457f0 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -252,12 +252,12 @@ def _validate_business_time(t_input): # --------------------------------------------------------------------- # Constructor Helpers -relativedelta_kwds = set([ +relativedelta_kwds = { 'years', 'months', 'weeks', 'days', 'year', 'month', 'week', 'day', 'weekday', 'hour', 'minute', 'second', 'microsecond', 'nanosecond', 'nanoseconds', - 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds']) + 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds'} def _determine_offset(kwds): diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 59db371833957..732725975690c 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1965,6 +1965,6 @@ def _validate_end_alias(how): 'START': 'S', 'FINISH': 'E', 'BEGIN': 'S', 'END': 'E'} how = how_dict.get(str(how).upper()) - if how not in set(['S', 'E']): + if how not in {'S', 'E'}: raise ValueError('How must be one of S or E') return how diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index c1a9a9fc1ed13..fb64289e5632f 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -15,9 +15,6 @@ def load_reduce(self): args = stack.pop() func = stack[-1] - if len(args) and type(args[0]) is type: - n = args[0].__name__ # noqa - try: stack[-1] = func(*args) return diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 973a8af76bb07..9b7863224bae0 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -347,7 +347,6 @@ def __init__(self, values, categories=None, ordered=None, dtype=None, " or `ordered`.") categories = dtype.categories - ordered = dtype.ordered elif is_categorical(values): # If no "dtype" was passed, use the one from "values", but honor diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index c915b272aee8b..f1ef095ce54e4 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -25,7 +25,7 @@ from . import ExtensionArray, Categorical -_VALID_CLOSED = set(['left', 'right', 'both', 'neither']) +_VALID_CLOSED = {'left', 'right', 'both', 'neither'} _interval_shared_docs = {} _shared_docs_kwargs = dict( klass='IntervalArray', @@ -401,7 +401,6 @@ def from_tuples(cls, data, closed='right', copy=False, dtype=None): msg = ('{name}.from_tuples received an invalid ' 'item, {tpl}').format(name=name, tpl=d) raise TypeError(msg) - lhs, rhs = d left.append(lhs) right.append(rhs) @@ -815,7 +814,6 @@ def _format_data(self): summary = '[{head} ... {tail}]'.format( head=', '.join(head), tail=', '.join(tail)) else: - head = [] tail = [formatter(x) for x in self] summary = '[{tail}]'.format(tail=', '.join(tail)) diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 781101f5804e6..5f59bea57fbe1 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -24,8 +24,8 @@ # the set of dtypes that we will allow pass to numexpr _ALLOWED_DTYPES = { - 'evaluate': set(['int64', 'int32', 'float64', 'float32', 'bool']), - 'where': set(['int64', 'float64', 'bool']) + 'evaluate': {'int64', 'int32', 'float64', 'float32', 'bool'}, + 'where': {'int64', 'float64', 'bool'} } # the minimum prod shape that we will use numexpr @@ -81,7 +81,7 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check): return False dtypes |= set(s.index) elif isinstance(o, np.ndarray): - dtypes |= set([o.dtype.name]) + dtypes |= {o.dtype.name} # allowed are a superset if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes: diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 4a41b14cee071..2bf7d57fb36e1 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -188,8 +188,8 @@ def is_nonempty(x): typs = get_dtype_kinds(to_concat) if len(typs) != 1: - if (not len(typs - set(['i', 'u', 'f'])) or - not len(typs - set(['bool', 'i', 'u']))): + if (not len(typs - {'i', 'u', 'f'}) or + not len(typs - {'bool', 'i', 'u'})): # let numpy coerce pass else: @@ -599,7 +599,7 @@ def convert_sparse(x, axis): to_concat = [convert_sparse(x, axis) for x in to_concat] result = np.concatenate(to_concat, axis=axis) - if not len(typs - set(['sparse', 'f', 'i'])): + if not len(typs - {'sparse', 'f', 'i'}): # sparsify if inputs are sparse and dense numerics # first sparse input's fill_value and SparseIndex is used result = SparseArray(result.ravel(), fill_value=fill_values[0], diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 57b1d81d94754..cf771a127a696 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -305,7 +305,6 @@ def _hash_categories(categories, ordered=True): # everything to a str first, which means we treat # {'1', '2'} the same as {'1', 2} # find a better solution - cat_array = np.array([hash(x) for x in categories]) hashed = hash((tuple(categories), ordered)) return hashed cat_array = hash_array(np.asarray(categories), categorize=False) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 610bcf5d1d6c4..04c7def208885 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1022,9 +1022,6 @@ def rename(self, *args, **kwargs): copy = kwargs.pop('copy', True) inplace = kwargs.pop('inplace', False) level = kwargs.pop('level', None) - axis = kwargs.pop('axis', None) - if axis is not None: - axis = self._get_axis_number(axis) if kwargs: raise TypeError('rename() got an unexpected keyword ' @@ -5206,8 +5203,6 @@ def __copy__(self, deep=True): return self.copy(deep=deep) def __deepcopy__(self, memo=None): - if memo is None: - memo = {} return self.copy(deep=True) def _convert(self, datetime=False, numeric=False, timedelta=False, diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 169416d6f8211..3683809554e18 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -134,7 +134,6 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True, obj = self.obj[data.items[locs]] s = groupby(obj, self.grouper) result = s.aggregate(lambda x: alt(x, axis=self.axis)) - newb = result._data.blocks[0] finally: diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index a1511b726c705..88643f0c1700a 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -481,7 +481,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True, if key.key is None: return grouper, [], obj else: - return grouper, set([key.key]), obj + return grouper, {key.key}, obj # already have a BaseGrouper, just return it elif isinstance(key, BaseGrouper): diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index f2c55a56b119d..853927a247e50 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -387,6 +387,7 @@ def get_func(fname): # otherwise find dtype-specific version, falling back to object for dt in [dtype_str, 'object']: + # TODO: Should dtype_str below be replaced with dt? f = getattr(libgroupby, "%s_%s" % (fname, dtype_str), None) if f is not None: return f @@ -582,7 +583,6 @@ def _transform(self, result, values, comp_ids, transform_func, elif values.ndim > 2: for i, chunk in enumerate(values.transpose(2, 0, 1)): - chunk = chunk.squeeze() transform_func(result[:, :, i], values, comp_ids, is_datetimelike, **kwargs) else: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 83b70baf4065b..c8fe02252dbb7 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -237,7 +237,7 @@ class Index(IndexOpsMixin, PandasObject): _engine_type = libindex.ObjectEngine - _accessors = set(['str']) + _accessors = {'str'} str = CachedAccessor("str", StringMethods) @@ -979,8 +979,6 @@ def __copy__(self, **kwargs): return self.copy(**kwargs) def __deepcopy__(self, memo=None): - if memo is None: - memo = {} return self.copy(deep=True) def _validate_names(self, name=None, names=None, deep=False): @@ -1622,11 +1620,6 @@ def is_int(v): # if we are mixed and have integers try: if is_positional and self.is_mixed(): - # TODO: i, j are not used anywhere - if start is not None: - i = self.get_loc(start) # noqa - if stop is not None: - j = self.get_loc(stop) # noqa is_positional = False except KeyError: if self.inferred_type == 'mixed-integer-float': diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index a03e478f81caf..25e737b9d35ff 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -132,6 +132,7 @@ def _create_from_codes(self, codes, categories=None, ordered=None, ordered = self.ordered if name is None: name = self.name + # TODO: ordered above is unused, should ordered be passed here? cat = Categorical.from_codes(codes, categories=categories, ordered=self.ordered) return CategoricalIndex(cat, name=name) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index e92f980caf3dc..0463b19ece6f6 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -44,7 +44,7 @@ from pandas.core.arrays.interval import (IntervalArray, _interval_shared_docs) -_VALID_CLOSED = set(['left', 'right', 'both', 'neither']) +_VALID_CLOSED = {'left', 'right', 'both', 'neither'} _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update( dict(klass='IntervalIndex', @@ -939,7 +939,6 @@ def _format_data(self, name=None): summary = '[{head} ... {tail}]'.format( head=', '.join(head), tail=', '.join(tail)) else: - head = [] tail = [formatter(x) for x in self] summary = '[{tail}]'.format(tail=', '.join(tail)) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 4d8e57820f29d..c5cb507e729f1 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -315,7 +315,6 @@ def __contains__(self, key): return True except Exception: return False - return False contains = __contains__ diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 8ffc7548059b7..e41d22e12a539 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -789,9 +789,6 @@ def _align_frame(self, indexer, df): if isinstance(indexer, tuple): aligners = [not com.is_null_slice(idx) for idx in indexer] - sum_aligners = sum(aligners) - # TODO: single_aligner is not used - single_aligner = sum_aligners == 1 # noqa idx, cols = None, None sindexers = [] @@ -865,9 +862,6 @@ def _align_frame(self, indexer, df): raise ValueError('Incompatible indexer with DataFrame') def _align_panel(self, indexer, df): - # TODO: is_frame, is_panel are unused - is_frame = self.obj.ndim == 2 # noqa - is_panel = self.obj.ndim >= 3 # noqa raise NotImplementedError("cannot set using an indexer with a Panel " "yet!") diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 5a5418dcc1e7f..93ae5be344f66 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1255,7 +1255,6 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): values = self.get_values() if fill_tuple is None: - fill_value = self.fill_value new_values = algos.take_nd(values, indexer, axis=axis, allow_fill=False) else: @@ -2708,7 +2707,6 @@ def _try_coerce_args(self, values, other): values_mask = isna(values) values = values.view('i8') - other_mask = False if isinstance(other, bool): raise TypeError @@ -2881,11 +2879,9 @@ def _try_coerce_args(self, values, other): values_mask = _block_shape(isna(values), ndim=self.ndim) # asi8 is a view, needs copy values = _block_shape(values.asi8, ndim=self.ndim) - other_mask = False if isinstance(other, ABCSeries): other = self._holder(other) - other_mask = isna(other) if isinstance(other, bool): raise TypeError diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index de31c6ac11c3f..8cb8e92aea4a4 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -479,7 +479,6 @@ def nanvar(values, axis=None, skipna=True, ddof=1): @disallow('M8', 'm8') def nansem(values, axis=None, skipna=True, ddof=1): - var = nanvar(values, axis, skipna, ddof=ddof) mask = isna(values) if not is_float_dtype(values.dtype): @@ -635,7 +634,6 @@ def nankurt(values, axis=None, skipna=True): adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3)) numer = count * (count + 1) * (count - 1) * m4 denom = (count - 2) * (count - 3) * m2**2 - result = numer / denom - adj # floating point error # diff --git a/pandas/core/ops.py b/pandas/core/ops.py index bccc5a587bd83..0caf6987d6214 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -1743,9 +1743,6 @@ def na_op(x, y): @Appender('Wrapper for comparison method {name}'.format(name=op_name)) def f(self, other, axis=None): - # Validate the axis parameter - if axis is not None: - axis = self._get_axis_number(axis) if isinstance(other, self._constructor): return self._compare_constructor(other, na_op, try_cast=False) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 16ade3fae90a1..95ac8017ba8b4 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -716,7 +716,7 @@ def dropna(self, axis=0, how='any', inplace=False): values = self.values mask = notna(values) - for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))): + for ax in reversed(sorted(set(range(self._AXIS_LEN)) - {axis})): mask = mask.sum(ax) per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:]) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 32251430deec7..08bfec89a22a8 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1199,7 +1199,7 @@ def __init__(self, freq='Min', closed=None, label=None, how='mean', freq = to_offset(freq) - end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W']) + end_types = {'M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'} rule = freq.rule_code if (rule in end_types or ('-' in rule and rule[:rule.find('-')] in end_types)): diff --git a/pandas/core/series.py b/pandas/core/series.py index 77445159129f2..dcedf0f6020b4 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -163,7 +163,7 @@ class Series(base.IndexOpsMixin, generic.NDFrame): Copy input data """ _metadata = ['name'] - _accessors = set(['dt', 'cat', 'str']) + _accessors = {'dt', 'cat', 'str'} _deprecations = generic.NDFrame._deprecations | frozenset( ['asobject', 'sortlevel', 'reshape', 'get_value', 'set_value', 'from_csv', 'valid']) @@ -2051,7 +2051,6 @@ def dot(self, other): lvals = left.values rvals = right.values else: - left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: @@ -2479,7 +2478,6 @@ def sort_values(self, axis=0, ascending=True, inplace=False, dtype: object """ inplace = validate_bool_kwarg(inplace, 'inplace') - axis = self._get_axis_number(axis) # GH 5856/5853 if inplace and self._is_cached: @@ -2651,7 +2649,6 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, # TODO: this can be combined with DataFrame.sort_index impl as # almost identical inplace = validate_bool_kwarg(inplace, 'inplace') - axis = self._get_axis_number(axis) index = self.index if level is not None: @@ -3072,7 +3069,6 @@ def _gotitem(self, key, ndim, subset=None): versionadded='.. versionadded:: 0.20.0', **_shared_doc_kwargs)) def aggregate(self, func, axis=0, *args, **kwargs): - axis = self._get_axis_number(axis) result, how = self._aggregate(func, *args, **kwargs) if result is None: @@ -3919,8 +3915,6 @@ def dropna(self, axis=0, inplace=False, **kwargs): raise TypeError('dropna() got an unexpected keyword ' 'argument "{0}"'.format(list(kwargs.keys())[0])) - axis = self._get_axis_number(axis or 0) - if self._can_hold_na: result = remove_na_arraylike(self) if inplace: diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index f7071061d07ab..818e46b0e53ee 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -597,7 +597,6 @@ def _combine_match_index(self, other, func, level=None): new_data[col] = func(series.values, other.values) # fill_value is a function of our operator - fill_value = None if isna(other.fill_value) or isna(self.default_fill_value): fill_value = np.nan else: diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 96ee5b7954f45..e5bc8ad9068f2 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -624,8 +624,6 @@ def cumsum(self, axis=0, *args, **kwargs): cumsum : SparseSeries """ nv.validate_cumsum(args, kwargs) - if axis is not None: - axis = self._get_axis_number(axis) new_array = self.values.cumsum() @@ -654,7 +652,6 @@ def dropna(self, axis=0, inplace=False, **kwargs): Analogous to Series.dropna. If fill_value=NaN, returns a dense Series """ # TODO: make more efficient - axis = self._get_axis_number(axis or 0) dense_valid = self.to_dense().dropna() if inplace: raise NotImplementedError("Cannot perform inplace dropna" diff --git a/pandas/core/strings.py b/pandas/core/strings.py index e4765c00f80fd..4e68dd22eabe6 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1078,7 +1078,7 @@ def str_get_dummies(arr, sep='|'): tags = set() for ts in arr.str.split(sep): tags.update(ts) - tags = sorted(tags - set([""])) + tags = sorted(tags - {""}) dummies = np.empty((len(arr), len(tags)), dtype=np.int64) diff --git a/pandas/core/window.py b/pandas/core/window.py index 6b6f27bcb3863..1ce6a7ec8ff14 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -773,7 +773,6 @@ class _GroupByMixin(GroupByMixin): """ provide the groupby facilities """ def __init__(self, obj, *args, **kwargs): - parent = kwargs.pop('parent', None) # noqa groupby = kwargs.pop('groupby', None) if groupby is None: groupby, obj = obj, obj.obj @@ -933,7 +932,6 @@ class _Rolling_and_Expanding(_Rolling): def count(self): blocks, obj, index = self._create_blocks() - index, indexi = self._get_index(index=index) window = self._get_window() window = min(window, len(obj)) if not self.center else window @@ -974,8 +972,6 @@ def count(self): def apply(self, func, raw=None, args=(), kwargs={}): from pandas import Series - # TODO: _level is unused? - _level = kwargs.pop('_level', None) # noqa window = self._get_window() offset = _offset(window, self.center) index, indexi = self._get_index() diff --git a/pandas/io/common.py b/pandas/io/common.py index 17dda903cdadb..4761f20154960 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -23,10 +23,10 @@ # common NA values # no longer excluding inf representations # '1.#INF','-1.#INF', '1.#INF000000', -_NA_VALUES = set([ +_NA_VALUES = { '-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', '#N/A N/A', '#N/A', 'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null', 'NaN', '-NaN', 'nan', '-nan', '' -]) +} if compat.PY3: diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 0796888554a46..a7c664cfafbff 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -267,7 +267,7 @@ def _save_header(self): # Write out the index line if it's not empty. # Otherwise, we will print out an extraneous # blank line between the mi and the data rows. - if encoded_labels and set(encoded_labels) != set(['']): + if encoded_labels and set(encoded_labels) != {''}: encoded_labels.extend([''] * len(columns)) writer.writerow(encoded_labels) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index f69e4a484d177..bf3402342afc9 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -496,6 +496,8 @@ def _chk_truncate(self): self.tr_col_num = col_num if truncate_v: if max_rows_adj == 0: + # TODO: should the next condition be an elif? row_num gets + # overwritten in the next block row_num = len(frame) if max_rows_adj == 1: row_num = max_rows diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index 20be903f54967..3ea5cb95b9c5a 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -222,7 +222,6 @@ def _column_header(): return row self.write('<thead>', indent) - row = [] indent += self.indent_delta diff --git a/pandas/io/formats/terminal.py b/pandas/io/formats/terminal.py index 52262ea05bf96..9056aa89da411 100644 --- a/pandas/io/formats/terminal.py +++ b/pandas/io/formats/terminal.py @@ -67,7 +67,6 @@ def is_terminal(): def _get_terminal_size_windows(): - res = None try: from ctypes import windll, create_string_buffer diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 3ec5e8d9be955..6da97c297486b 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -547,6 +547,8 @@ def _get_object_parser(self, json): if typ == 'series' or obj is None: if not isinstance(dtype, bool): + # TODO: dtype is unused. This is probably supposed to be + # kwargs.update({'dtype' : dtype}) dtype = dict(data=dtype) obj = SeriesParser(json, **kwargs).parse() diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index f2d6fe01e0573..3ac4e29c70110 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2819,7 +2819,6 @@ def read_index_legacy(self, key, start=None, stop=None): class LegacySeriesFixed(LegacyFixed): def read(self, **kwargs): - kwargs = self.validate_read(kwargs) index = self.read_index_legacy('index') values = self.read_array('values') return Series(values, index=index) @@ -2828,7 +2827,6 @@ def read(self, **kwargs): class LegacyFrameFixed(LegacyFixed): def read(self, **kwargs): - kwargs = self.validate_read(kwargs) index = self.read_index_legacy('index') columns = self.read_index_legacy('columns') values = self.read_array('values') @@ -2877,7 +2875,6 @@ class SparseSeriesFixed(SparseFixed): attributes = ['name', 'fill_value', 'kind'] def read(self, **kwargs): - kwargs = self.validate_read(kwargs) index = self.read_index('index') sp_values = self.read_array('sp_values') sp_index = self.read_index('sp_index') @@ -2901,7 +2898,6 @@ class SparseFrameFixed(SparseFixed): attributes = ['default_kind', 'default_fill_value'] def read(self, **kwargs): - kwargs = self.validate_read(kwargs) columns = self.read_index('columns') sdict = {} for c in columns: @@ -2967,7 +2963,6 @@ def shape(self): def read(self, start=None, stop=None, **kwargs): # start, stop applied to rows, so 0th axis only - kwargs = self.validate_read(kwargs) select_axis = self.obj_type()._get_block_manager_axis(0) axes = [] diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index 52b25898fc67e..87be54a1934ba 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -183,6 +183,7 @@ def _parse_float_vec(vec): # Get the second half of the ibm number into the second half of # the ieee number + # TODO: This value is unused ieee2 = xport2 # The fraction bit to the left of the binary point in the ieee diff --git a/pandas/io/sql.py b/pandas/io/sql.py index a582d32741ae9..e9ac4ea0972cb 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -845,7 +845,6 @@ def _sqlalchemy_type(self, col): if col_type == 'datetime64' or col_type == 'datetime': try: - tz = col.tzinfo # noqa return DateTime(timezone=True) except: return DateTime diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index 0522d7e721b65..96e7532747c78 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -86,7 +86,6 @@ def _maybe_resample(series, ax, kwargs): freq = ax_freq elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): _upsample_others(ax, freq, kwargs) - ax_freq = freq else: # pragma: no cover raise ValueError('Incompatible frequency conversion') return freq, series diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index bef38288ff3a5..0b74867461eca 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -900,7 +900,6 @@ def test_constructor_empty_list(self): # Empty generator: list(empty_gen()) == [] def empty_gen(): return - yield df = DataFrame(empty_gen(), columns=['A', 'B']) tm.assert_frame_equal(df, expected) diff --git a/pandas/tests/indexing/common.py b/pandas/tests/indexing/common.py index a77c170221bea..cdd1fafe7cfaa 100644 --- a/pandas/tests/indexing/common.py +++ b/pandas/tests/indexing/common.py @@ -28,9 +28,9 @@ def _axify(obj, key, axis): class Base(object): """ indexing comprehensive base class """ - _objs = set(['series', 'frame', 'panel']) - _typs = set(['ints', 'uints', 'labels', 'mixed', - 'ts', 'floats', 'empty', 'ts_rev', 'multi']) + _objs = {'series', 'frame', 'panel'} + _typs = {'ints', 'uints', 'labels', 'mixed', + 'ts', 'floats', 'empty', 'ts_rev', 'multi'} def setup_method(self, method): diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py index d2c3f82e95c4d..2a43a590cbe6b 100644 --- a/pandas/tests/io/parser/na_values.py +++ b/pandas/tests/io/parser/na_values.py @@ -69,9 +69,9 @@ def test_non_string_na_values(self): tm.assert_frame_equal(out, expected) def test_default_na_values(self): - _NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', - '#N/A', 'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null', - 'NaN', 'nan', '-NaN', '-nan', '#N/A N/A', '']) + _NA_VALUES = {'-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN', + '#N/A', 'N/A', 'n/a', 'NA', '#NA', 'NULL', 'null', + 'NaN', 'nan', '-NaN', '-nan', '#N/A N/A', ''} assert _NA_VALUES == com._NA_VALUES nv = len(_NA_VALUES) diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py index 919b357f14236..1bf055854de88 100644 --- a/pandas/tests/io/parser/parse_dates.py +++ b/pandas/tests/io/parser/parse_dates.py @@ -455,7 +455,7 @@ def test_read_with_parse_dates_invalid_type(self): self.read_csv, StringIO(data), parse_dates=np.array([4, 5])) tm.assert_raises_regex(TypeError, errmsg, self.read_csv, - StringIO(data), parse_dates=set([1, 3, 3])) + StringIO(data), parse_dates={1, 3, 3}) def test_parse_dates_empty_string(self): # see gh-2263 diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 5d076bf33a8ac..2d36e50fd361f 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -226,9 +226,9 @@ def apply_index(self, i): "implementation".format( name=self.__class__.__name__)) kwds = self.kwds - relativedelta_fast = set(['years', 'months', 'weeks', - 'days', 'hours', 'minutes', - 'seconds', 'microseconds']) + relativedelta_fast = {'years', 'months', 'weeks', + 'days', 'hours', 'minutes', + 'seconds', 'microseconds'} # relativedelta/_offset path only valid for base DateOffset if (self._use_relativedelta and set(kwds).issubset(relativedelta_fast)): @@ -270,7 +270,7 @@ def isAnchored(self): # set of attributes on each object rather than the existing behavior of # iterating over internal ``__dict__`` def _repr_attrs(self): - exclude = set(['n', 'inc', 'normalize']) + exclude = {'n', 'inc', 'normalize'} attrs = [] for attr in sorted(self.__dict__): if attr.startswith('_') or attr == 'kwds': diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 9697c991122dd..50989ae68cc93 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -909,8 +909,8 @@ def repr_class(x): if exact == 'equiv': if type(left) != type(right): # allow equivalence of Int64Index/RangeIndex - types = set([type(left).__name__, type(right).__name__]) - if len(types - set(['Int64Index', 'RangeIndex'])): + types = {type(left).__name__, type(right).__name__} + if len(types - {'Int64Index', 'RangeIndex'}): msg = '{obj} classes are not equivalent'.format(obj=obj) raise_assert_detail(obj, msg, repr_class(left), repr_class(right)) @@ -2317,7 +2317,7 @@ def wrapper(*args, **kwargs): raise else: skip("Skipping test due to lack of connectivity" - " and error {error}".format(e)) + " and error {error}".format(error=e)) return wrapper diff --git a/scripts/find_commits_touching_func.py b/scripts/find_commits_touching_func.py index 29eb4161718ff..10035aeeec94c 100755 --- a/scripts/find_commits_touching_func.py +++ b/scripts/find_commits_touching_func.py @@ -135,7 +135,7 @@ def search(defname, head_commit="HEAD"): # seed with hits from q allhits = set(get_hits(defname, files=files)) - q = set([HEAD]) + q = {HEAD} try: while q: h = q.pop()
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Used PyCharm inspections to check: - Unused local variables - Unreachable code - `set([..])` to `{...}`
https://api.github.com/repos/pandas-dev/pandas/pulls/21974
2018-07-19T04:06:09Z
2018-07-19T16:33:39Z
null
2018-07-19T16:33:48Z
Backport PR #21966 on branch 0.23.x
diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt index 5e19ab491647d..a30fbc75f11f8 100644 --- a/doc/source/whatsnew/v0.23.4.txt +++ b/doc/source/whatsnew/v0.23.4.txt @@ -27,6 +27,7 @@ Bug Fixes **Groupby/Resample/Rolling** - Bug where calling :func:`DataFrameGroupBy.agg` with a list of functions including ``ohlc`` as the non-initial element would raise a ``ValueError`` (:issue:`21716`) +- Bug in ``roll_quantile`` caused a memory leak when calling ``.rolling(...).quantile(q)`` with ``q`` in (0,1) (:issue:`21965`) - **Conversion** diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index 5121d293efcb6..a77433e5d1115 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -1482,6 +1482,8 @@ def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win, else: output[i] = NaN + skiplist_destroy(skiplist) + return output
Backport PR #21966: Fix memory leak in roll_quantile
https://api.github.com/repos/pandas-dev/pandas/pulls/21973
2018-07-19T01:42:20Z
2018-07-20T12:28:02Z
2018-07-20T12:28:02Z
2018-07-20T12:28:02Z
Backport PR #21921 on branch 0.23.x
diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt index a88c22e3d01f7..5e19ab491647d 100644 --- a/doc/source/whatsnew/v0.23.4.txt +++ b/doc/source/whatsnew/v0.23.4.txt @@ -58,3 +58,7 @@ Bug Fixes - - + +**Missing** + +- Bug in :func:`Series.clip` and :func:`DataFrame.clip` cannot accept list-like threshold containing ``NaN`` (:issue:`19992`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 02462218e8b02..facc709877285 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6433,9 +6433,11 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False, # GH 17276 # numpy doesn't like NaN as a clip value # so ignore - if np.any(pd.isnull(lower)): + # GH 19992 + # numpy doesn't drop a list-like bound containing NaN + if not is_list_like(lower) and np.any(pd.isnull(lower)): lower = None - if np.any(pd.isnull(upper)): + if not is_list_like(upper) and np.any(pd.isnull(upper)): upper = None # GH 2747 (arguments were reversed) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 437d3a9d24730..415ae982673ee 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -2195,13 +2195,23 @@ def test_clip_with_na_args(self): """Should process np.nan argument as None """ # GH # 17276 tm.assert_frame_equal(self.frame.clip(np.nan), self.frame) - tm.assert_frame_equal(self.frame.clip(upper=[1, 2, np.nan]), - self.frame) - tm.assert_frame_equal(self.frame.clip(lower=[1, np.nan, 3]), - self.frame) tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan), self.frame) + # GH #19992 + df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6], + 'col_2': [7, 8, 9]}) + + result = df.clip(lower=[4, 5, np.nan], axis=0) + expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan], + 'col_2': [7, 8, np.nan]}) + tm.assert_frame_equal(result, expected) + + result = df.clip(lower=[4, 5, np.nan], axis=1) + expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6], + 'col_2': [np.nan, np.nan, np.nan]}) + tm.assert_frame_equal(result, expected) + # Matrix-like def test_dot(self): a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'], diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 1e6ea96a5de51..bcf209521f913 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -1140,11 +1140,15 @@ def test_clip_with_na_args(self): s = Series([1, 2, 3]) assert_series_equal(s.clip(np.nan), Series([1, 2, 3])) - assert_series_equal(s.clip(upper=[1, 1, np.nan]), Series([1, 2, 3])) - assert_series_equal(s.clip(lower=[1, np.nan, 1]), Series([1, 2, 3])) assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3])) + # GH #19992 + assert_series_equal(s.clip(lower=[0, 4, np.nan]), + Series([1, 4, np.nan])) + assert_series_equal(s.clip(upper=[1, np.nan, 1]), + Series([1, np.nan, 1])) + def test_clip_against_series(self): # GH #6966
Backport PR #21921: BUG:Clip with a list-like threshold with a nan is broken (GH19992)
https://api.github.com/repos/pandas-dev/pandas/pulls/21967
2018-07-18T10:23:52Z
2018-07-19T01:41:03Z
2018-07-19T01:41:03Z
2018-09-08T04:42:10Z
Fix memory leak in roll_quantile
diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt index ac1ef78fd6fd2..6d98334ace9e2 100644 --- a/doc/source/whatsnew/v0.23.4.txt +++ b/doc/source/whatsnew/v0.23.4.txt @@ -31,6 +31,7 @@ Bug Fixes **Groupby/Resample/Rolling** - Bug where calling :func:`DataFrameGroupBy.agg` with a list of functions including ``ohlc`` as the non-initial element would raise a ``ValueError`` (:issue:`21716`) +- Bug in ``roll_quantile`` caused a memory leak when calling ``.rolling(...).quantile(q)`` with ``q`` in (0,1) (:issue:`21965`) - **Conversion** diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index 9e704a9bd8d3f..cea77e2c88b1b 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -1481,6 +1481,8 @@ def roll_quantile(ndarray[float64_t, cast=True] input, int64_t win, else: output[i] = NaN + skiplist_destroy(skiplist) + return output
closes #21965
https://api.github.com/repos/pandas-dev/pandas/pulls/21966
2018-07-18T10:08:42Z
2018-07-19T01:41:35Z
2018-07-19T01:41:35Z
2018-07-19T01:41:44Z
Trim unncessary code in datetime/np_datetime.c
diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c index 1ad8c780ba7a4..9e56802b92bf0 100644 --- a/pandas/_libs/src/datetime/np_datetime.c +++ b/pandas/_libs/src/datetime/np_datetime.c @@ -235,8 +235,7 @@ NPY_NO_EXPORT void add_seconds_to_datetimestruct(npy_datetimestruct *dts, * Fills in the year, month, day in 'dts' based on the days * offset from 1970. */ -static void set_datetimestruct_days(npy_int64 days, - npy_datetimestruct *dts) { +static void set_datetimestruct_days(npy_int64 days, npy_datetimestruct *dts) { const int *month_lengths; int i; @@ -318,7 +317,7 @@ int cmp_npy_datetimestruct(const npy_datetimestruct *a, /* * - * Tests for and converts a Python datetime.datetime or datetime.date + * Converts a Python datetime.datetime or datetime.date * object into a NumPy npy_datetimestruct. Uses tzinfo (if present) * to convert to UTC time. * @@ -330,68 +329,22 @@ int cmp_npy_datetimestruct(const npy_datetimestruct *a, * Returns -1 on error, 0 on success, and 1 (with no error set) * if obj doesn't have the needed date or datetime attributes. */ -int convert_pydatetime_to_datetimestruct(PyObject *obj, +int convert_pydatetime_to_datetimestruct(PyDateTime_Date *obj, npy_datetimestruct *out) { + // Assumes that obj is a valid datetime object PyObject *tmp; - int isleap; /* Initialize the output to all zeros */ memset(out, 0, sizeof(npy_datetimestruct)); out->month = 1; out->day = 1; - /* Need at least year/month/day attributes */ - if (!PyObject_HasAttrString(obj, "year") || - !PyObject_HasAttrString(obj, "month") || - !PyObject_HasAttrString(obj, "day")) { - return 1; - } - - /* Get the year */ - tmp = PyObject_GetAttrString(obj, "year"); - if (tmp == NULL) { - return -1; - } - out->year = PyInt_AsLong(tmp); - if (out->year == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - - /* Get the month */ - tmp = PyObject_GetAttrString(obj, "month"); - if (tmp == NULL) { - return -1; - } - out->month = PyInt_AsLong(tmp); - if (out->month == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - - /* Get the day */ - tmp = PyObject_GetAttrString(obj, "day"); - if (tmp == NULL) { - return -1; - } - out->day = PyInt_AsLong(tmp); - if (out->day == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); + out->year = PyInt_AsLong(PyObject_GetAttrString(obj, "year")); + out->month = PyInt_AsLong(PyObject_GetAttrString(obj, "month")); + out->day = PyInt_AsLong(PyObject_GetAttrString(obj, "day")); - /* Validate that the month and day are valid for the year */ - if (out->month < 1 || out->month > 12) { - goto invalid_date; - } - isleap = is_leapyear(out->year); - if (out->day < 1 || - out->day > days_per_month_table[isleap][out->month - 1]) { - goto invalid_date; - } + // TODO(anyone): If we can get PyDateTime_IMPORT to work, we could use + // PyDateTime_Check here, and less verbose attribute lookups. /* Check for time attributes (if not there, return success as a date) */ if (!PyObject_HasAttrString(obj, "hour") || @@ -401,61 +354,13 @@ int convert_pydatetime_to_datetimestruct(PyObject *obj, return 0; } - /* Get the hour */ - tmp = PyObject_GetAttrString(obj, "hour"); - if (tmp == NULL) { - return -1; - } - out->hour = PyInt_AsLong(tmp); - if (out->hour == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - - /* Get the minute */ - tmp = PyObject_GetAttrString(obj, "minute"); - if (tmp == NULL) { - return -1; - } - out->min = PyInt_AsLong(tmp); - if (out->min == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - - /* Get the second */ - tmp = PyObject_GetAttrString(obj, "second"); - if (tmp == NULL) { - return -1; - } - out->sec = PyInt_AsLong(tmp); - if (out->sec == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); + out->hour = PyInt_AsLong(PyObject_GetAttrString(obj, "hour")); + out->min = PyInt_AsLong(PyObject_GetAttrString(obj, "minute")); + out->sec = PyInt_AsLong(PyObject_GetAttrString(obj, "second")); + out->us = PyInt_AsLong(PyObject_GetAttrString(obj, "microsecond")); - /* Get the microsecond */ - tmp = PyObject_GetAttrString(obj, "microsecond"); - if (tmp == NULL) { - return -1; - } - out->us = PyInt_AsLong(tmp); - if (out->us == -1 && PyErr_Occurred()) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - - if (out->hour < 0 || out->hour >= 24 || out->min < 0 || out->min >= 60 || - out->sec < 0 || out->sec >= 60 || out->us < 0 || out->us >= 1000000) { - goto invalid_time; - } - - /* Apply the time zone offset if it exists */ - if (PyObject_HasAttrString(obj, "tzinfo")) { + /* Apply the time zone offset if datetime obj is tz-aware */ + if (PyObject_HasAttrString((PyObject*)obj, "tzinfo")) { tmp = PyObject_GetAttrString(obj, "tzinfo"); if (tmp == NULL) { return -1; @@ -497,50 +402,15 @@ int convert_pydatetime_to_datetimestruct(PyObject *obj, } return 0; - -invalid_date: - PyErr_Format(PyExc_ValueError, - "Invalid date (%d,%d,%d) when converting to NumPy datetime", - (int)out->year, (int)out->month, (int)out->day); - return -1; - -invalid_time: - PyErr_Format(PyExc_ValueError, - "Invalid time (%d,%d,%d,%d) when converting " - "to NumPy datetime", - (int)out->hour, (int)out->min, (int)out->sec, (int)out->us); - return -1; -} - -npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, - npy_datetimestruct *d) { - npy_datetime result = NPY_DATETIME_NAT; - - convert_datetimestruct_to_datetime(fr, d, &result); - return result; -} - -void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, - npy_datetimestruct *result) { - convert_datetime_to_datetimestruct(fr, val, result); -} - -void pandas_timedelta_to_timedeltastruct(npy_timedelta val, - NPY_DATETIMEUNIT fr, - pandas_timedeltastruct *result) { - convert_timedelta_to_timedeltastruct(fr, val, result); } /* * Converts a datetime from a datetimestruct to a datetime based * on a metadata unit. The date is assumed to be valid. - * - * Returns 0 on success, -1 on failure. */ -int convert_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, - const npy_datetimestruct *dts, - npy_datetime *out) { +npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, + const npy_datetimestruct *dts) { npy_datetime ret; if (base == NPY_FR_Y) { @@ -632,17 +502,14 @@ int convert_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, return -1; } } - - *out = ret; - - return 0; + return ret; } /* * Converts a datetime based on the given metadata into a datetimestruct */ -int convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, - npy_datetime dt, +void pandas_datetime_to_datetimestruct(npy_datetime dt, + NPY_DATETIMEUNIT base, npy_datetimestruct *out) { npy_int64 perday; @@ -850,10 +717,7 @@ int convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, PyErr_SetString(PyExc_RuntimeError, "NumPy datetime metadata is corrupted with invalid " "base unit"); - return -1; } - - return 0; } /* @@ -862,8 +726,8 @@ int convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, * * Returns 0 on success, -1 on failure. */ -int convert_timedelta_to_timedeltastruct(NPY_DATETIMEUNIT base, - npy_timedelta td, +void pandas_timedelta_to_timedeltastruct(npy_timedelta td, + NPY_DATETIMEUNIT base, pandas_timedeltastruct *out) { npy_int64 frac; npy_int64 sfrac; @@ -953,8 +817,5 @@ int convert_timedelta_to_timedeltastruct(NPY_DATETIMEUNIT base, PyErr_SetString(PyExc_RuntimeError, "NumPy timedelta metadata is corrupted with " "invalid base unit"); - return -1; } - - return 0; } diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h index f5c48036c16f8..4347d0c8c47d4 100644 --- a/pandas/_libs/src/datetime/np_datetime.h +++ b/pandas/_libs/src/datetime/np_datetime.h @@ -18,6 +18,7 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #define PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_ #include <numpy/ndarraytypes.h> +#include <datetime.h> typedef struct { npy_int64 days; @@ -30,11 +31,11 @@ extern const npy_datetimestruct _NS_MAX_DTS; // stuff pandas needs // ---------------------------------------------------------------------------- -int convert_pydatetime_to_datetimestruct(PyObject *obj, +int convert_pydatetime_to_datetimestruct(PyDateTime_Date *obj, npy_datetimestruct *out); -npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, - npy_datetimestruct *d); +npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, + const npy_datetimestruct *dts); void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, npy_datetimestruct *result); @@ -74,9 +75,4 @@ void add_minutes_to_datetimestruct(npy_datetimestruct *dts, int minutes); -int -convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, - npy_datetime dt, - npy_datetimestruct *out); - #endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_ diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index c9b0143ffc6ca..4bab32e93ab1e 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -481,16 +481,17 @@ static void *NpyDateTimeScalarToJSON(JSOBJ _obj, JSONTypeContext *tc, npy_datetimestruct dts; PyDatetimeScalarObject *obj = (PyDatetimeScalarObject *)_obj; PRINTMARK(); + // TODO(anyone): Does not appear to be reached in tests. - pandas_datetime_to_datetimestruct( - obj->obval, (NPY_DATETIMEUNIT)obj->obmeta.base, &dts); + pandas_datetime_to_datetimestruct(obj->obval, + (NPY_DATETIMEUNIT)obj->obmeta.base, &dts); return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen); } static void *PyDateTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { npy_datetimestruct dts; - PyObject *obj = (PyObject *)_obj; + PyDateTime_Date *obj = (PyDateTime_Date *)_obj; PRINTMARK(); diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 3c0fe98ee7b7d..76838c7a23b24 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -147,6 +147,9 @@ cdef inline void td64_to_tdstruct(int64_t td64, cdef inline int64_t pydatetime_to_dt64(datetime val, npy_datetimestruct *dts): + """ + Note we are assuming that the datetime object is timezone-naive. + """ dts.year = PyDateTime_GET_YEAR(val) dts.month = PyDateTime_GET_MONTH(val) dts.day = PyDateTime_GET_DAY(val) @@ -158,8 +161,7 @@ cdef inline int64_t pydatetime_to_dt64(datetime val, return dtstruct_to_dt64(dts) -cdef inline int64_t pydate_to_dt64(date val, - npy_datetimestruct *dts): +cdef inline int64_t pydate_to_dt64(date val, npy_datetimestruct *dts): dts.year = PyDateTime_GET_YEAR(val) dts.month = PyDateTime_GET_MONTH(val) dts.day = PyDateTime_GET_DAY(val)
`pydatetime_to_datetimestruct` does a ton of checking that boils down to "is this a valid datetime object?" Since the function only gets called after a type-check, we can assume it is a date/datetime and be a lot less verbose about it. This also rips out an unnecessary layer of functions `convert_datetime_to_datetimestruct`, `convert_timedelta_to_timedeltastruct`. cc @WillAyd you mentioned wanting to work on your C-foo. There's a comment about figuring out how to import the cpython datetime C-API. Any thoughts?
https://api.github.com/repos/pandas-dev/pandas/pulls/21962
2018-07-18T04:23:43Z
2018-07-20T12:30:14Z
2018-07-20T12:30:14Z
2020-04-05T17:41:30Z
standardize post-call treatment of get_dst_info, delay sorting calls
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index b8f97dcf2d599..acf6cd4b74362 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -104,7 +104,7 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, ndarray[int64_t] trans, deltas npy_datetimestruct dts object dt - int64_t value + int64_t value, delta ndarray[object] result = np.empty(n, dtype=object) object (*func_create)(int64_t, npy_datetimestruct, object, object) @@ -125,58 +125,67 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, raise ValueError("box must be one of 'datetime', 'date', 'time' or" " 'timestamp'") - if tz is not None: - if is_utc(tz): + if is_utc(tz) or tz is None: + for i in range(n): + value = arr[i] + if value == NPY_NAT: + result[i] = NaT + else: + dt64_to_dtstruct(value, &dts) + result[i] = func_create(value, dts, tz, freq) + elif is_tzlocal(tz): + for i in range(n): + value = arr[i] + if value == NPY_NAT: + result[i] = NaT + else: + # Python datetime objects do not support nanosecond + # resolution (yet, PEP 564). Need to compute new value + # using the i8 representation. + local_value = tz_convert_utc_to_tzlocal(value, tz) + dt64_to_dtstruct(local_value, &dts) + result[i] = func_create(value, dts, tz, freq) + else: + trans, deltas, typ = get_dst_info(tz) + + if typ not in ['pytz', 'dateutil']: + # static/fixed; in this case we know that len(delta) == 1 + delta = deltas[0] for i in range(n): value = arr[i] if value == NPY_NAT: result[i] = NaT else: - dt64_to_dtstruct(value, &dts) + # Adjust datetime64 timestamp, recompute datetimestruct + dt64_to_dtstruct(value + delta, &dts) result[i] = func_create(value, dts, tz, freq) - elif is_tzlocal(tz) or is_fixed_offset(tz): + + elif typ == 'dateutil': + # no zone-name change for dateutil tzs - dst etc + # represented in single object. for i in range(n): value = arr[i] if value == NPY_NAT: result[i] = NaT else: - # Python datetime objects do not support nanosecond - # resolution (yet, PEP 564). Need to compute new value - # using the i8 representation. - local_value = tz_convert_utc_to_tzlocal(value, tz) - dt64_to_dtstruct(local_value, &dts) + # Adjust datetime64 timestamp, recompute datetimestruct + pos = trans.searchsorted(value, side='right') - 1 + dt64_to_dtstruct(value + deltas[pos], &dts) result[i] = func_create(value, dts, tz, freq) else: - trans, deltas, typ = get_dst_info(tz) - + # pytz for i in range(n): - value = arr[i] if value == NPY_NAT: result[i] = NaT else: - # Adjust datetime64 timestamp, recompute datetimestruct pos = trans.searchsorted(value, side='right') - 1 - if treat_tz_as_pytz(tz): - # find right representation of dst etc in pytz timezone - new_tz = tz._tzinfos[tz._transition_info[pos]] - else: - # no zone-name change for dateutil tzs - dst etc - # represented in single object. - new_tz = tz + # find right representation of dst etc in pytz timezone + new_tz = tz._tzinfos[tz._transition_info[pos]] dt64_to_dtstruct(value + deltas[pos], &dts) result[i] = func_create(value, dts, new_tz, freq) - else: - for i in range(n): - - value = arr[i] - if value == NPY_NAT: - result[i] = NaT - else: - dt64_to_dtstruct(value, &dts) - result[i] = func_create(value, dts, None, freq) return result diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index fae855f5495f0..7621ac912d4d5 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -526,7 +526,7 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz): """ cdef: ndarray[int64_t] trans, deltas - int64_t delta, local_val + int64_t local_val Py_ssize_t pos assert obj.tzinfo is None @@ -542,22 +542,23 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz): # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - pos = trans.searchsorted(obj.value, side='right') - 1 - - # static/pytz/dateutil specific code if is_fixed_offset(tz): - # statictzinfo - assert len(deltas) == 1, len(deltas) + # static/fixed tzinfo; in this case we know len(deltas) == 1 + # This can come back with `typ` of either "fixed" or None dt64_to_dtstruct(obj.value + deltas[0], &obj.dts) - elif treat_tz_as_pytz(tz): + elif typ == 'pytz': + # i.e. treat_tz_as_pytz(tz) + pos = trans.searchsorted(obj.value, side='right') - 1 tz = tz._tzinfos[tz._transition_info[pos]] dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts) - elif treat_tz_as_dateutil(tz): + elif typ == 'dateutil': + # i.e. treat_tz_as_dateutil(tz) + pos = trans.searchsorted(obj.value, side='right') - 1 dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts) else: - # TODO: this case is never reached in the tests, but get_dst_info - # has a path that returns typ = None and empty deltas. - # --> Is this path possible? + # Note: as of 2018-07-17 all tzinfo objects that are _not_ + # either pytz or dateutil have is_fixed_offset(tz) == True, + # so this branch will never be reached. pass obj.tzinfo = tz @@ -1126,6 +1127,7 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): ndarray[int64_t] trans, deltas Py_ssize_t[:] pos npy_datetimestruct dts + int64_t delta if is_utc(tz): with nogil: @@ -1147,17 +1149,17 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - pos = trans.searchsorted(stamps, side='right') - 1 - - # statictzinfo if typ not in ['pytz', 'dateutil']: + # static/fixed; in this case we know that len(delta) == 1 + delta = deltas[0] for i in range(n): if stamps[i] == NPY_NAT: result[i] = NPY_NAT continue - dt64_to_dtstruct(stamps[i] + deltas[0], &dts) + dt64_to_dtstruct(stamps[i] + delta, &dts) result[i] = _normalized_stamp(&dts) else: + pos = trans.searchsorted(stamps, side='right') - 1 for i in range(n): if stamps[i] == NPY_NAT: result[i] = NPY_NAT @@ -1207,7 +1209,7 @@ def is_date_array_normalized(ndarray[int64_t] stamps, tz=None): Py_ssize_t i, n = len(stamps) ndarray[int64_t] trans, deltas npy_datetimestruct dts - int64_t local_val + int64_t local_val, delta if tz is None or is_utc(tz): for i in range(n): @@ -1223,12 +1225,22 @@ def is_date_array_normalized(ndarray[int64_t] stamps, tz=None): else: trans, deltas, typ = get_dst_info(tz) - for i in range(n): - # Adjust datetime64 timestamp, recompute datetimestruct - pos = trans.searchsorted(stamps[i]) - 1 + if typ not in ['pytz', 'dateutil']: + # static/fixed; in this case we know that len(delta) == 1 + delta = deltas[0] + for i in range(n): + # Adjust datetime64 timestamp, recompute datetimestruct + dt64_to_dtstruct(stamps[i] + delta, &dts) + if (dts.hour + dts.min + dts.sec + dts.us) > 0: + return False - dt64_to_dtstruct(stamps[i] + deltas[pos], &dts) - if (dts.hour + dts.min + dts.sec + dts.us) > 0: - return False + else: + for i in range(n): + # Adjust datetime64 timestamp, recompute datetimestruct + pos = trans.searchsorted(stamps[i]) - 1 + + dt64_to_dtstruct(stamps[i] + deltas[pos], &dts) + if (dts.hour + dts.min + dts.sec + dts.us) > 0: + return False return True diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 59db371833957..76dadb4ec3e23 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -938,13 +938,14 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, npy_datetimestruct dts int64_t local_val - if is_utc(tz): - for i in range(n): - if stamps[i] == NPY_NAT: - result[i] = NPY_NAT - continue - dt64_to_dtstruct(stamps[i], &dts) - result[i] = get_period_ordinal(&dts, freq) + if is_utc(tz) or tz is None: + with nogil: + for i in range(n): + if stamps[i] == NPY_NAT: + result[i] = NPY_NAT + continue + dt64_to_dtstruct(stamps[i], &dts) + result[i] = get_period_ordinal(&dts, freq) elif is_tzlocal(tz): for i in range(n): @@ -958,10 +959,8 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - pos = trans.searchsorted(stamps, side='right') - 1 - - # statictzinfo if typ not in ['pytz', 'dateutil']: + # static/fixed; in this case we know that len(delta) == 1 for i in range(n): if stamps[i] == NPY_NAT: result[i] = NPY_NAT @@ -969,6 +968,8 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, dt64_to_dtstruct(stamps[i] + deltas[0], &dts) result[i] = get_period_ordinal(&dts, freq) else: + pos = trans.searchsorted(stamps, side='right') - 1 + for i in range(n): if stamps[i] == NPY_NAT: result[i] = NPY_NAT diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 688b12005921d..0835a43411783 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -58,28 +58,19 @@ cpdef resolution(ndarray[int64_t] stamps, tz=None): if tz is not None: tz = maybe_get_tz(tz) - return _reso_local(stamps, tz) - else: - for i in range(n): - if stamps[i] == NPY_NAT: - continue - dt64_to_dtstruct(stamps[i], &dts) - curr_reso = _reso_stamp(&dts) - if curr_reso < reso: - reso = curr_reso - return reso + return _reso_local(stamps, tz) cdef _reso_local(ndarray[int64_t] stamps, object tz): cdef: - Py_ssize_t n = len(stamps) + Py_ssize_t i, n = len(stamps) int reso = RESO_DAY, curr_reso ndarray[int64_t] trans, deltas Py_ssize_t[:] pos npy_datetimestruct dts - int64_t local_val + int64_t local_val, delta - if is_utc(tz): + if is_utc(tz) or tz is None: for i in range(n): if stamps[i] == NPY_NAT: continue @@ -100,18 +91,18 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz): # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - pos = trans.searchsorted(stamps, side='right') - 1 - - # statictzinfo if typ not in ['pytz', 'dateutil']: + # static/fixed; in this case we know that len(delta) == 1 + delta = deltas[0] for i in range(n): if stamps[i] == NPY_NAT: continue - dt64_to_dtstruct(stamps[i] + deltas[0], &dts) + dt64_to_dtstruct(stamps[i] + delta, &dts) curr_reso = _reso_stamp(&dts) if curr_reso < reso: reso = curr_reso else: + pos = trans.searchsorted(stamps, side='right') - 1 for i in range(n): if stamps[i] == NPY_NAT: continue diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index b3fab83fef415..2e3b07252d45e 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -258,12 +258,18 @@ cdef object get_dst_info(object tz): dtype='i8') * 1000000000 typ = 'fixed' else: - trans = np.array([], dtype='M8[ns]') - deltas = np.array([], dtype='i8') - typ = None + # 2018-07-12 this is not reached in the tests, and this case + # is not handled in any of the functions that call + # get_dst_info. If this case _were_ hit the calling + # functions would then hit an IndexError because they assume + # `deltas` is non-empty. + # (under the just-deleted code that returned empty arrays) + raise AssertionError("dateutil tzinfo is not a FixedOffset " + "and has an empty `_trans_list`.", tz) else: # static tzinfo + # TODO: This case is not hit in tests (2018-07-17); is it possible? trans = np.array([NPY_NAT + 1], dtype=np.int64) num = int(get_utcoffset(tz, None).total_seconds()) * 1000000000 deltas = np.array([num], dtype=np.int64)
@jreback we discussed how there are a bunch of functions that do really similar things with `get_dst_info` but have their slight idiosyncrasies. This standardizes them, and is really verbose so as to delay certain calls until absolutely necessary. After this we can see about de-duplicating the 6ish occurrences of really similar code by passing a function pointer or something.
https://api.github.com/repos/pandas-dev/pandas/pulls/21960
2018-07-18T03:04:13Z
2018-07-20T12:29:21Z
2018-07-20T12:29:21Z
2018-07-25T00:43:34Z
remove cnp cimports where possible
diff --git a/pandas/_libs/hashing.pyx b/pandas/_libs/hashing.pyx index 4489847518a1d..8d81ee3bba6ad 100644 --- a/pandas/_libs/hashing.pyx +++ b/pandas/_libs/hashing.pyx @@ -3,14 +3,15 @@ # at https://github.com/veorq/SipHash import cython -cimport numpy as cnp +from cpython cimport PyBytes_Check, PyUnicode_Check + +from libc.stdlib cimport malloc, free +from libc.stdint cimport uint8_t, uint32_t, uint64_t + import numpy as np -from numpy cimport ndarray, uint8_t, uint32_t, uint64_t +from numpy cimport ndarray from util cimport _checknull -from cpython cimport (PyBytes_Check, - PyUnicode_Check) -from libc.stdlib cimport malloc, free DEF cROUNDS = 2 DEF dROUNDS = 4 diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 2179999859dbb..6e78ea612e822 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -3,6 +3,8 @@ cimport cython from cython cimport Py_ssize_t +from libc.stdint cimport int64_t + from cpython cimport PyObject from cpython.slice cimport PySlice_Check @@ -10,7 +12,6 @@ cdef extern from "Python.h": Py_ssize_t PY_SSIZE_T_MAX import numpy as np -from numpy cimport int64_t cdef extern from "compat_helper.h": cdef int slice_get_indices(PyObject* s, Py_ssize_t length, diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx index 27d2a639d13e6..50b24d2950fbb 100644 --- a/pandas/_libs/join.pyx +++ b/pandas/_libs/join.pyx @@ -4,11 +4,9 @@ cimport cython from cython cimport Py_ssize_t import numpy as np -cimport numpy as cnp from numpy cimport (ndarray, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, float32_t, float64_t) -cnp.import_array() cdef double NaN = <double> np.NaN diff --git a/pandas/_libs/tslibs/ccalendar.pxd b/pandas/_libs/tslibs/ccalendar.pxd index 04fb6eaf49c84..954e32f037356 100644 --- a/pandas/_libs/tslibs/ccalendar.pxd +++ b/pandas/_libs/tslibs/ccalendar.pxd @@ -2,8 +2,7 @@ # cython: profile=False from cython cimport Py_ssize_t - -from numpy cimport int64_t, int32_t +from libc.stdint cimport int64_t, int32_t cdef int dayofweek(int y, int m, int d) nogil diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index 12d35f7ce2f58..4668bebd151f4 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -7,8 +7,7 @@ Cython implementations of functions resembling the stdlib calendar module cimport cython from cython cimport Py_ssize_t - -from numpy cimport int64_t, int32_t +from libc.stdint cimport int64_t, int32_t from locale import LC_TIME from strptime import LocaleTime diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index a298f521ef853..d9cf3457817c6 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -9,9 +9,7 @@ cimport cython from cython cimport Py_ssize_t import numpy as np -cimport numpy as cnp from numpy cimport ndarray, int64_t, int32_t, int8_t -cnp.import_array() from ccalendar import get_locale_names, MONTHS_FULL, DAYS_FULL from ccalendar cimport (get_days_in_month, is_leapyear, dayofweek, diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 580d155f87fa8..73cce3a5f0757 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -8,15 +8,13 @@ import re cimport cython from cython cimport Py_ssize_t - +from libc.stdint cimport int64_t from cpython.datetime cimport datetime import time import numpy as np -cimport numpy as cnp -from numpy cimport int64_t, ndarray -cnp.import_array() +from numpy cimport ndarray # Avoid import from outside _libs if sys.version_info.major == 2:
Not sure what it will take to get avoid the 1.7 deprecation warnings, will see if this helps. Also curious whether cimporting from libc.stdint works on Appveyor.
https://api.github.com/repos/pandas-dev/pandas/pulls/21958
2018-07-18T01:32:51Z
2018-07-18T05:59:34Z
null
2020-04-05T17:41:28Z
BUG: bug in GroupBy.count where arg minlength passed to np.bincount must be None for np<1.13
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 1ac6d075946dd..37c7e9267b39a 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -536,11 +536,11 @@ Groupby/Resample/Rolling - Bug in :func:`pandas.core.groupby.GroupBy.first` and :func:`pandas.core.groupby.GroupBy.last` with ``as_index=False`` leading to the loss of timezone information (:issue:`15884`) - Bug in :meth:`DatetimeIndex.resample` when downsampling across a DST boundary (:issue:`8531`) -- -- - +- Bug where ``ValueError`` is wrongly raised when calling :func:`~pandas.core.groupby.SeriesGroupBy.count` method of a + ``SeriesGroupBy`` when the grouping variable only contains NaNs and numpy version < 1.13 (:issue:`21956`). - Multiple bugs in :func:`pandas.core.Rolling.min` with ``closed='left'` and a datetime-like index leading to incorrect results and also segfault. (:issue:`21704`) +- Sparse ^^^^^^ diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index fdededc325b03..4c87f6122b956 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1207,7 +1207,7 @@ def count(self): mask = (ids != -1) & ~isna(val) ids = ensure_platform_int(ids) - out = np.bincount(ids[mask], minlength=ngroups or 0) + out = np.bincount(ids[mask], minlength=ngroups or None) return Series(out, index=self.grouper.result_index, diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py index 787d99086873e..a14b6ff014f37 100644 --- a/pandas/tests/groupby/test_counting.py +++ b/pandas/tests/groupby/test_counting.py @@ -212,3 +212,13 @@ def test_count_with_datetimelike(self, datetimelike): expected = DataFrame({'y': [2, 1]}, index=['a', 'b']) expected.index.name = "x" assert_frame_equal(expected, res) + + def test_count_with_only_nans_in_first_group(self): + # GH21956 + df = DataFrame({'A': [np.nan, np.nan], 'B': ['a', 'b'], 'C': [1, 2]}) + result = df.groupby(['A', 'B']).C.count() + mi = MultiIndex(levels=[[], ['a', 'b']], + labels=[[], []], + names=['A', 'B']) + expected = Series([], index=mi, dtype=np.int64, name='C') + assert_series_equal(result, expected, check_index_type=False)
- [x] closes #21956 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry See #21956 for details.
https://api.github.com/repos/pandas-dev/pandas/pulls/21957
2018-07-17T23:03:08Z
2018-07-28T13:51:03Z
2018-07-28T13:51:02Z
2018-08-01T10:43:13Z
[CLN] update and clean up imports
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index a24e2cdd99f6f..9fb08d0f5222f 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -53,7 +53,7 @@ from pandas.core.dtypes.common import ( pandas_dtype) from pandas.core.arrays import Categorical from pandas.core.dtypes.concat import union_categoricals -import pandas.io.common as com +import pandas.io.common as iocom from pandas.errors import (ParserError, DtypeWarning, EmptyDataError, ParserWarning) @@ -665,7 +665,8 @@ cdef class TextReader: if b'utf-16' in (self.encoding or b''): # we need to read utf-16 through UTF8Recoder. # if source is utf-16, convert source to utf-8 by UTF8Recoder. - source = com.UTF8Recoder(source, self.encoding.decode('utf-8')) + source = iocom.UTF8Recoder(source, + self.encoding.decode('utf-8')) self.encoding = b'utf-8' self.c_encoding = <char*> self.encoding @@ -1356,7 +1357,7 @@ cdef asbytes(object o): # common NA values # no longer excluding inf representations # '1.#INF','-1.#INF', '1.#INF000000', -_NA_VALUES = _ensure_encoded(list(com._NA_VALUES)) +_NA_VALUES = _ensure_encoded(list(iocom._NA_VALUES)) def _maybe_upcast(arr): diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index 141a5d2389db5..11009c525ce6f 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -1,10 +1,11 @@ """ io on the clipboard """ import warnings +from pandas import compat from pandas.compat import StringIO, PY2, PY3 from pandas.core.dtypes.generic import ABCDataFrame -from pandas import compat, get_option, option_context +from pandas import get_option, option_context def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover diff --git a/pandas/io/common.py b/pandas/io/common.py index 17dda903cdadb..6f4d314f9ac96 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -7,15 +7,17 @@ from contextlib import contextmanager, closing import zipfile +# compat +from pandas.errors import (ParserError, DtypeWarning, # noqa + EmptyDataError, ParserWarning, AbstractMethodError) + from pandas.compat import StringIO, BytesIO, string_types, text_type from pandas import compat -from pandas.io.formats.printing import pprint_thing -import pandas.core.common as com + from pandas.core.dtypes.common import is_number, is_file_like -# compat -from pandas.errors import (ParserError, DtypeWarning, # noqa - EmptyDataError, ParserWarning) +from pandas.io.formats.printing import pprint_thing + # gh-12665: Alias for now and remove later. CParserError = ParserError @@ -67,7 +69,7 @@ def __iter__(self): return self def __next__(self): - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) if not compat.PY3: diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 1bc6526214a91..0a8770f4435fc 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -1,8 +1,11 @@ """ feather-format compat """ from distutils.version import LooseVersion -from pandas import DataFrame, RangeIndex, Int64Index from pandas.compat import range + +from pandas.core.dtypes.generic import ABCDataFrame, ABCInt64Index +from pandas import RangeIndex + from pandas.io.common import _stringify_path @@ -45,7 +48,7 @@ def to_feather(df, path): """ path = _stringify_path(path) - if not isinstance(df, DataFrame): + if not isinstance(df, ABCDataFrame): raise ValueError("feather only support IO with DataFrames") feather = _try_import() @@ -57,7 +60,7 @@ def to_feather(df, path): # validate that we have only a default index # raise on anything else as we don't serialize the index - if not isinstance(df.index, Int64Index): + if not isinstance(df.index, ABCInt64Index): raise ValueError("feather does not support serializing {} " "for the index; you can .reset_index()" "to make the index into column(s)".format( diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 808b6979b235e..007b2603afd46 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -14,22 +14,23 @@ PackageLoader, Environment, ChoiceLoader, FileSystemLoader ) except ImportError: - msg = "pandas.Styler requires jinja2. "\ - "Please install with `conda install Jinja2`\n"\ - "or `pip install Jinja2`" + msg = ("pandas.Styler requires jinja2. " + "Please install with `conda install Jinja2`\n" + "or `pip install Jinja2`") raise ImportError(msg) -from pandas.core.dtypes.common import is_float, is_string_like - import numpy as np -import pandas as pd -from pandas.api.types import is_list_like + +from pandas.util._decorators import Appender from pandas.compat import range + +from pandas.core.dtypes.common import is_float, is_string_like, is_list_like + +import pandas as pd from pandas.core.config import get_option from pandas.core.generic import _shared_docs import pandas.core.common as com from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice -from pandas.util._decorators import Appender try: import matplotlib.pyplot as plt from matplotlib import colors diff --git a/pandas/io/html.py b/pandas/io/html.py index 45fe3b017e4f6..7fc8ddfc3f52c 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -10,15 +10,17 @@ from distutils.version import LooseVersion -from pandas.core.dtypes.common import is_list_like -from pandas.errors import EmptyDataError -from pandas.io.common import _is_url, urlopen, _validate_header_arg -from pandas.io.parsers import TextParser +from pandas.errors import AbstractMethodError, EmptyDataError + from pandas.compat import (lrange, lmap, u, string_types, iteritems, raise_with_traceback, binary_type) + +from pandas.core.dtypes.common import is_list_like from pandas import Series -import pandas.core.common as com + from pandas.io.formats.printing import pprint_thing +from pandas.io.common import _is_url, urlopen, _validate_header_arg +from pandas.io.parsers import TextParser _IMPORTS = False _HAS_BS4 = False @@ -253,7 +255,7 @@ def _text_getter(self, obj): text : str or unicode The text from an individual DOM node. """ - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def _parse_td(self, obj): """Return the td elements from a row element. @@ -268,7 +270,7 @@ def _parse_td(self, obj): list of node-like These are the elements of each row, i.e., the columns. """ - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def _parse_thead_tr(self, table): """ @@ -283,7 +285,7 @@ def _parse_thead_tr(self, table): list of node-like These are the <tr> row elements of a table. """ - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def _parse_tbody_tr(self, table): """ @@ -302,7 +304,7 @@ def _parse_tbody_tr(self, table): list of node-like These are the <tr> row elements of a table. """ - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def _parse_tfoot_tr(self, table): """ @@ -317,7 +319,7 @@ def _parse_tfoot_tr(self, table): list of node-like These are the <tr> row elements of a table. """ - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def _parse_tables(self, doc, match, attrs): """ @@ -343,7 +345,7 @@ def _parse_tables(self, doc, match, attrs): list of node-like HTML <table> elements to be parsed into raw data. """ - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def _equals_tag(self, obj, tag): """ @@ -362,7 +364,7 @@ def _equals_tag(self, obj, tag): boolean Whether `obj`'s tag name is `tag` """ - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def _build_doc(self): """ @@ -373,7 +375,7 @@ def _build_doc(self): node-like The DOM from which to parse the table element. """ - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def _parse_thead_tbody_tfoot(self, table_html): """ diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 3ec5e8d9be955..1554eadceeb6e 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -1,23 +1,30 @@ # pylint: disable-msg=E1101,W0613,W0603 from itertools import islice import os + import numpy as np import pandas._libs.json as json from pandas._libs.tslibs import iNaT + +from pandas.errors import AbstractMethodError + +from pandas import compat from pandas.compat import StringIO, long, u, to_str -from pandas import compat, isna -from pandas import Series, DataFrame, to_datetime, MultiIndex + +from pandas.core.dtypes.common import is_period_dtype +from pandas.core.dtypes.generic import ABCMultiIndex + +from pandas import Series, DataFrame, to_datetime, isna, concat + from pandas.io.common import (get_filepath_or_buffer, _get_handle, _infer_compression, _stringify_path, BaseIterator) -from pandas.io.parsers import _validate_integer -import pandas.core.common as com -from pandas.core.reshape.concat import concat from pandas.io.formats.printing import pprint_thing +from pandas.io.parsers import _validate_integer + from .normalize import _convert_to_line_delimits from .table_schema import build_table_schema, parse_table_schema -from pandas.core.dtypes.common import is_period_dtype loads = json.loads dumps = json.dumps @@ -93,7 +100,7 @@ def __init__(self, obj, orient, date_format, double_precision, self._format_axes() def _format_axes(self): - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def write(self): return self._write(self.obj, self.orient, self.double_precision, @@ -181,7 +188,7 @@ def __init__(self, obj, orient, date_format, double_precision, self.schema = build_table_schema(obj, index=self.index) # NotImplementd on a column MultiIndex - if obj.ndim == 2 and isinstance(obj.columns, MultiIndex): + if obj.ndim == 2 and isinstance(obj.columns, ABCMultiIndex): raise NotImplementedError( "orient='table' is not supported for MultiIndex") @@ -654,7 +661,7 @@ def _convert_axes(self): setattr(self.obj, axis, new_axis) def _try_convert_types(self): - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True): @@ -767,7 +774,7 @@ def _try_convert_to_date(self, data): return data, False def _try_convert_dates(self): - raise com.AbstractMethodError(self) + raise AbstractMethodError(self) class SeriesParser(Parser): diff --git a/pandas/io/packers.py b/pandas/io/packers.py index 7a1e72637f4ce..7324e5e98c9a0 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -39,12 +39,20 @@ """ from datetime import datetime, date, timedelta -from dateutil.parser import parse import os from textwrap import dedent import warnings import numpy as np +from dateutil.parser import parse + +from pandas.util._move import ( + BadMove as _BadMove, + move_into_mutable_buffer as _move_into_mutable_buffer, +) + +from pandas.errors import PerformanceWarning + from pandas import compat from pandas.compat import u, u_safe @@ -61,16 +69,11 @@ from pandas.core.sparse.api import SparseSeries, SparseDataFrame from pandas.core.sparse.array import BlockIndex, IntIndex from pandas.core.generic import NDFrame -from pandas.errors import PerformanceWarning -from pandas.io.common import get_filepath_or_buffer, _stringify_path from pandas.core.internals import BlockManager, make_block, _safe_reshape import pandas.core.internals as internals +from pandas.io.common import get_filepath_or_buffer, _stringify_path from pandas.io.msgpack import Unpacker as _Unpacker, Packer as _Packer, ExtType -from pandas.util._move import ( - BadMove as _BadMove, - move_into_mutable_buffer as _move_into_mutable_buffer, -) # check which compression libs we have installed try: diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index a99014f07a6b3..1f429dd594d45 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -2,9 +2,13 @@ from warnings import catch_warnings from distutils.version import LooseVersion -from pandas import DataFrame, RangeIndex, Int64Index, get_option + +import pandas.core.dtypes.generic as gt + +from pandas import RangeIndex, get_option from pandas.compat import string_types import pandas.core.common as com + from pandas.io.common import get_filepath_or_buffer, is_s3_url @@ -47,7 +51,7 @@ class BaseImpl(object): @staticmethod def validate_dataframe(df): - if not isinstance(df, DataFrame): + if not isinstance(df, gt.ABCDataFrame): raise ValueError("to_parquet only supports IO with DataFrames") # must have value column names (strings only) @@ -140,15 +144,14 @@ def read(self, path, columns=None, **kwargs): def _validate_write_lt_070(self, df): # Compatibility shim for pyarrow < 0.7.0 # TODO: Remove in pandas 0.23.0 - from pandas.core.indexes.multi import MultiIndex - if isinstance(df.index, MultiIndex): + if isinstance(df.index, gt.ABCMultiIndex): msg = ( "Multi-index DataFrames are only supported " "with pyarrow >= 0.7.0" ) raise ValueError(msg) # Validate index - if not isinstance(df.index, Int64Index): + if not isinstance(df.index, gt.ABCInt64Index): msg = ( "pyarrow < 0.7.0 does not support serializing {} for the " "index; you can .reset_index() to make the index into " diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 486040fa52f35..7323bc9fbb1d9 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -12,9 +12,16 @@ import numpy as np +from pandas._libs import lib, parsers, ops as libops +from pandas._libs.tslibs import parsing + +from pandas.util._decorators import Appender +from pandas.errors import ParserWarning, ParserError, EmptyDataError + from pandas import compat from pandas.compat import (range, lrange, PY3, StringIO, lzip, zip, string_types, map, u) + from pandas.core.dtypes.common import ( is_integer, ensure_object, is_list_like, is_integer_dtype, @@ -24,6 +31,7 @@ from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.missing import isna from pandas.core.dtypes.cast import astype_nansafe + from pandas.core.index import (Index, MultiIndex, RangeIndex, ensure_index_from_sequences) from pandas.core.series import Series @@ -31,20 +39,14 @@ from pandas.core.arrays import Categorical from pandas.core import algorithms import pandas.core.common as com +from pandas.core.tools import datetimes as tools + from pandas.io.date_converters import generic_parser -from pandas.errors import ParserWarning, ParserError, EmptyDataError from pandas.io.common import (get_filepath_or_buffer, is_file_like, _validate_header_arg, _get_handle, UnicodeReader, UTF8Recoder, _NA_VALUES, BaseIterator, _infer_compression) -from pandas.core.tools import datetimes as tools -from pandas.util._decorators import Appender - -import pandas._libs.lib as lib -import pandas._libs.parsers as parsers -import pandas._libs.ops as libops -from pandas._libs.tslibs import parsing # BOM character (byte order mark) # This exists at the beginning of a file to indicate endianness diff --git a/pandas/io/sql.py b/pandas/io/sql.py index a582d32741ae9..3f269c12cb1c0 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -6,26 +6,28 @@ from __future__ import print_function, division from datetime import datetime, date, time - +from contextlib import contextmanager import warnings import re + import numpy as np import pandas._libs.lib as lib + +from pandas.compat import (map, zip, raise_with_traceback, + string_types, text_type) + +from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.common import ( is_list_like, is_dict_like, is_datetime64tz_dtype) -from pandas.compat import (map, zip, raise_with_traceback, - string_types, text_type) -from pandas.core.api import DataFrame, Series +from pandas.core.api import DataFrame from pandas.core.base import PandasObject from pandas.core.tools.datetimes import to_datetime -from contextlib import contextmanager - class SQLAlchemyRequired(ImportError): pass @@ -439,7 +441,7 @@ def to_sql(frame, name, con, schema=None, if_exists='fail', index=True, pandas_sql = pandasSQL_builder(con, schema=schema) - if isinstance(frame, Series): + if isinstance(frame, ABCSeries): frame = frame.to_frame() elif not isinstance(frame, DataFrame): raise NotImplementedError("'frame' argument should be either a " diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index 0522d7e721b65..17cf043865e14 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -8,9 +8,9 @@ from pandas._libs.tslibs.period import Period from pandas.core.dtypes.generic import ( - ABCPeriodIndex, ABCDatetimeIndex, ABCTimedeltaIndex) + ABCPeriodIndex, ABCDatetimeIndex, ABCTimedeltaIndex, + ABCDateOffset) -from pandas.tseries.offsets import DateOffset import pandas.tseries.frequencies as frequencies from pandas.io.formats.printing import pprint_thing @@ -209,7 +209,7 @@ def _get_freq(ax, series): freq = ax_freq # get the period frequency - if isinstance(freq, DateOffset): + if isinstance(freq, ABCDateOffset): freq = freq.rule_code else: freq = frequencies.get_base_alias(freq) @@ -231,7 +231,7 @@ def _use_dynamic_x(ax, data): if freq is None: return False - if isinstance(freq, DateOffset): + if isinstance(freq, ABCDateOffset): freq = freq.rule_code else: freq = frequencies.get_base_alias(freq) @@ -269,7 +269,7 @@ def _maybe_convert_index(ax, data): if freq is None: freq = getattr(data.index, 'inferred_freq', None) - if isinstance(freq, DateOffset): + if isinstance(freq, ABCDateOffset): freq = freq.rule_code if freq is None: diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py index 33dcf6d64b302..1b2da04d904e5 100644 --- a/pandas/tseries/holiday.py +++ b/pandas/tseries/holiday.py @@ -1,11 +1,13 @@ +from datetime import datetime, timedelta import warnings -from pandas import DateOffset, DatetimeIndex, Series, Timestamp -from pandas.compat import add_metaclass -from datetime import datetime, timedelta -from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU # noqa -from pandas.tseries.offsets import Easter, Day import numpy as np +from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU # noqa + +from pandas.compat import add_metaclass + +from pandas import DatetimeIndex, Series, Timestamp +from pandas.tseries.offsets import Easter, Day, DateOffset def next_monday(dt):
Use ABCFoo classes in cases where doing so allow us to avoid a heavier import. Change a few cases of `com.AbstractMethodError` to import from `pandas.errors` Otherwise mostly just rearranging imports in encouraged order.
https://api.github.com/repos/pandas-dev/pandas/pulls/21955
2018-07-17T21:58:11Z
2018-07-18T01:42:16Z
null
2018-07-18T01:42:16Z
DEPR: pd.read_table
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 61119089fdb42..4c1d2e2d446de 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -478,6 +478,7 @@ Deprecations - :meth:`Series.ptp` is deprecated. Use ``numpy.ptp`` instead (:issue:`21614`) - :meth:`Series.compress` is deprecated. Use ``Series[condition]`` instead (:issue:`18262`) - :meth:`Categorical.from_codes` has deprecated providing float values for the ``codes`` argument. (:issue:`21767`) +- :func:`pandas.read_table` is deprecated. Instead, use :func:`pandas.read_csv` passing ``sep='\t'`` if necessary (:issue:`21948`) .. _whatsnew_0240.prior_deprecations: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ebd35cb1a6a1a..bbe84110fd019 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1594,11 +1594,11 @@ def from_csv(cls, path, header=0, sep=',', index_col=0, parse_dates=True, "for from_csv when changing your function calls", FutureWarning, stacklevel=2) - from pandas.io.parsers import read_table - return read_table(path, header=header, sep=sep, - parse_dates=parse_dates, index_col=index_col, - encoding=encoding, tupleize_cols=tupleize_cols, - infer_datetime_format=infer_datetime_format) + from pandas.io.parsers import read_csv + return read_csv(path, header=header, sep=sep, + parse_dates=parse_dates, index_col=index_col, + encoding=encoding, tupleize_cols=tupleize_cols, + infer_datetime_format=infer_datetime_format) def to_sparse(self, fill_value=None, kind='block'): """ diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index 141a5d2389db5..0d564069c681f 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -9,7 +9,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover r""" - Read text from clipboard and pass to read_table. See read_table for the + Read text from clipboard and pass to read_csv. See read_csv for the full argument list Parameters @@ -31,7 +31,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover 'reading from clipboard only supports utf-8 encoding') from pandas.io.clipboard import clipboard_get - from pandas.io.parsers import read_table + from pandas.io.parsers import read_csv text = clipboard_get() # try to decode (if needed on PY3) @@ -51,7 +51,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover # that this came from excel and set 'sep' accordingly lines = text[:10000].split('\n')[:-1][:10] - # Need to remove leading white space, since read_table + # Need to remove leading white space, since read_csv # accepts: # a b # 0 1 2 @@ -80,7 +80,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover if kwargs.get('engine') == 'python' and PY2: text = text.encode('utf-8') - return read_table(StringIO(text), sep=sep, **kwargs) + return read_csv(StringIO(text), sep=sep, **kwargs) def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 88358ff392cb6..4b3fa08e5e4af 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -331,6 +331,10 @@ """ % (_parser_params % (_sep_doc.format(default="','"), _engine_doc)) _read_table_doc = """ + +.. deprecated:: 0.24.0 + Use :func:`pandas.read_csv` instead, passing ``sep='\t'`` if necessary. + Read general delimited file into DataFrame %s @@ -540,9 +544,13 @@ def _read(filepath_or_buffer, kwds): } -def _make_parser_function(name, sep=','): +def _make_parser_function(name, default_sep=','): - default_sep = sep + # prepare read_table deprecation + if name == "read_table": + sep = False + else: + sep = default_sep def parser_f(filepath_or_buffer, sep=sep, @@ -611,11 +619,24 @@ def parser_f(filepath_or_buffer, memory_map=False, float_precision=None): + # deprecate read_table GH21948 + if name == "read_table": + if sep is False and delimiter is None: + warnings.warn("read_table is deprecated, use read_csv " + "instead, passing sep='\\t'.", + FutureWarning, stacklevel=2) + else: + warnings.warn("read_table is deprecated, use read_csv " + "instead.", + FutureWarning, stacklevel=2) + if sep is False: + sep = default_sep + # Alias sep -> delimiter. if delimiter is None: delimiter = sep - if delim_whitespace and delimiter is not default_sep: + if delim_whitespace and delimiter != default_sep: raise ValueError("Specified a delimiter with both sep and" " delim_whitespace=True; you can only" " specify one.") @@ -687,10 +708,10 @@ def parser_f(filepath_or_buffer, return parser_f -read_csv = _make_parser_function('read_csv', sep=',') +read_csv = _make_parser_function('read_csv', default_sep=',') read_csv = Appender(_read_csv_doc)(read_csv) -read_table = _make_parser_function('read_table', sep='\t') +read_table = _make_parser_function('read_table', default_sep='\t') read_table = Appender(_read_table_doc)(read_table) diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index 7623587803b41..b0cdbe2b5bedb 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -1,5 +1,5 @@ import pytest -from pandas.io.parsers import read_table +from pandas.io.parsers import read_csv @pytest.fixture @@ -17,7 +17,7 @@ def jsonl_file(datapath): @pytest.fixture def salaries_table(datapath): """DataFrame with the salaries dataset""" - return read_table(datapath('io', 'parser', 'data', 'salaries.csv')) + return read_csv(datapath('io', 'parser', 'data', 'salaries.csv'), sep='\t') @pytest.fixture diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 191e3f37f1c37..3218742aa7636 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -21,7 +21,7 @@ import numpy as np import pandas as pd from pandas import (DataFrame, Series, Index, Timestamp, MultiIndex, - date_range, NaT, read_table) + date_range, NaT, read_csv) from pandas.compat import (range, zip, lrange, StringIO, PY3, u, lzip, is_platform_windows, is_platform_32bit) @@ -1225,8 +1225,8 @@ def test_to_string(self): lines = result.split('\n') header = lines[0].strip().split() joined = '\n'.join(re.sub(r'\s+', ' ', x).strip() for x in lines[1:]) - recons = read_table(StringIO(joined), names=header, - header=None, sep=' ') + recons = read_csv(StringIO(joined), names=header, + header=None, sep=' ') tm.assert_series_equal(recons['B'], biggie['B']) assert recons['A'].count() == biggie['A'].count() assert (np.abs(recons['A'].dropna() - diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index f6a31008bca5c..a7cc3ad989ea1 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -12,7 +12,7 @@ import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas import DataFrame -from pandas.io.parsers import read_csv, read_table +from pandas.io.parsers import read_csv from pandas.compat import BytesIO, StringIO @@ -44,7 +44,7 @@ def check_compressed_urls(salaries_table, compression, extension, mode, if mode != 'explicit': compression = mode - url_table = read_table(url, compression=compression, engine=engine) + url_table = read_csv(url, sep='\t', compression=compression, engine=engine) tm.assert_frame_equal(url_table, salaries_table) diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py index b6f13039641a2..8535a51657abf 100644 --- a/pandas/tests/io/parser/test_parsers.py +++ b/pandas/tests/io/parser/test_parsers.py @@ -70,7 +70,9 @@ def read_table(self, *args, **kwds): kwds = kwds.copy() kwds['engine'] = self.engine kwds['low_memory'] = self.low_memory - return read_table(*args, **kwds) + with tm.assert_produces_warning(FutureWarning): + df = read_table(*args, **kwds) + return df class TestCParserLowMemory(BaseParser, CParserTests): @@ -88,7 +90,9 @@ def read_table(self, *args, **kwds): kwds = kwds.copy() kwds['engine'] = self.engine kwds['low_memory'] = True - return read_table(*args, **kwds) + with tm.assert_produces_warning(FutureWarning): + df = read_table(*args, **kwds) + return df class TestPythonParser(BaseParser, PythonParserTests): @@ -103,7 +107,9 @@ def read_csv(self, *args, **kwds): def read_table(self, *args, **kwds): kwds = kwds.copy() kwds['engine'] = self.engine - return read_table(*args, **kwds) + with tm.assert_produces_warning(FutureWarning): + df = read_table(*args, **kwds) + return df class TestUnsortedUsecols(object): diff --git a/pandas/tests/io/parser/test_unsupported.py b/pandas/tests/io/parser/test_unsupported.py index 3117f6fae55da..1c64c1516077d 100644 --- a/pandas/tests/io/parser/test_unsupported.py +++ b/pandas/tests/io/parser/test_unsupported.py @@ -14,7 +14,7 @@ from pandas.compat import StringIO from pandas.errors import ParserError -from pandas.io.parsers import read_csv, read_table +from pandas.io.parsers import read_csv import pytest @@ -43,24 +43,24 @@ def test_c_engine(self): # specify C engine with unsupported options (raise) with tm.assert_raises_regex(ValueError, msg): - read_table(StringIO(data), engine='c', - sep=None, delim_whitespace=False) + read_csv(StringIO(data), engine='c', + sep=None, delim_whitespace=False) with tm.assert_raises_regex(ValueError, msg): - read_table(StringIO(data), engine='c', sep=r'\s') + read_csv(StringIO(data), engine='c', sep=r'\s') with tm.assert_raises_regex(ValueError, msg): - read_table(StringIO(data), engine='c', quotechar=chr(128)) + read_csv(StringIO(data), engine='c', sep='\t', quotechar=chr(128)) with tm.assert_raises_regex(ValueError, msg): - read_table(StringIO(data), engine='c', skipfooter=1) + read_csv(StringIO(data), engine='c', skipfooter=1) # specify C-unsupported options without python-unsupported options with tm.assert_produces_warning(parsers.ParserWarning): - read_table(StringIO(data), sep=None, delim_whitespace=False) + read_csv(StringIO(data), sep=None, delim_whitespace=False) with tm.assert_produces_warning(parsers.ParserWarning): - read_table(StringIO(data), quotechar=chr(128)) + read_csv(StringIO(data), sep=r'\s') with tm.assert_produces_warning(parsers.ParserWarning): - read_table(StringIO(data), sep=r'\s') + read_csv(StringIO(data), sep='\t', quotechar=chr(128)) with tm.assert_produces_warning(parsers.ParserWarning): - read_table(StringIO(data), skipfooter=1) + read_csv(StringIO(data), skipfooter=1) text = """ A B C D E one two three four @@ -70,9 +70,9 @@ def test_c_engine(self): msg = 'Error tokenizing data' with tm.assert_raises_regex(ParserError, msg): - read_table(StringIO(text), sep='\\s+') + read_csv(StringIO(text), sep='\\s+') with tm.assert_raises_regex(ParserError, msg): - read_table(StringIO(text), engine='c', sep='\\s+') + read_csv(StringIO(text), engine='c', sep='\\s+') msg = "Only length-1 thousands markers supported" data = """A|B|C diff --git a/pandas/tests/io/test_common.py b/pandas/tests/io/test_common.py index ceaac9818354a..991b8ee508760 100644 --- a/pandas/tests/io/test_common.py +++ b/pandas/tests/io/test_common.py @@ -131,7 +131,6 @@ def test_iterator(self): @pytest.mark.parametrize('reader, module, error_class, fn_ext', [ (pd.read_csv, 'os', FileNotFoundError, 'csv'), - (pd.read_table, 'os', FileNotFoundError, 'csv'), (pd.read_fwf, 'os', FileNotFoundError, 'txt'), (pd.read_excel, 'xlrd', FileNotFoundError, 'xlsx'), (pd.read_feather, 'feather', Exception, 'feather'), @@ -149,9 +148,14 @@ def test_read_non_existant(self, reader, module, error_class, fn_ext): with pytest.raises(error_class): reader(path) + def test_read_non_existant_read_table(self): + path = os.path.join(HERE, 'data', 'does_not_exist.' + 'csv') + with pytest.raises(FileNotFoundError): + with tm.assert_produces_warning(FutureWarning): + pd.read_table(path) + @pytest.mark.parametrize('reader, module, path', [ (pd.read_csv, 'os', ('io', 'data', 'iris.csv')), - (pd.read_table, 'os', ('io', 'data', 'iris.csv')), (pd.read_fwf, 'os', ('io', 'data', 'fixed_width_format.txt')), (pd.read_excel, 'xlrd', ('io', 'data', 'test1.xlsx')), (pd.read_feather, 'feather', ('io', 'data', 'feather-0_3_1.feather')), @@ -170,6 +174,22 @@ def test_read_fspath_all(self, reader, module, path, datapath): mypath = CustomFSPath(path) result = reader(mypath) expected = reader(path) + + if path.endswith('.pickle'): + # categorical + tm.assert_categorical_equal(result, expected) + else: + tm.assert_frame_equal(result, expected) + + def test_read_fspath_all_read_table(self, datapath): + path = datapath('io', 'data', 'iris.csv') + + mypath = CustomFSPath(path) + with tm.assert_produces_warning(FutureWarning): + result = pd.read_table(mypath) + with tm.assert_produces_warning(FutureWarning): + expected = pd.read_table(path) + if path.endswith('.pickle'): # categorical tm.assert_categorical_equal(result, expected) diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 3caee2b44c579..dcfeab55f94fc 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -10,7 +10,7 @@ import numpy as np from pandas.core.index import Index, MultiIndex -from pandas import Panel, DataFrame, Series, notna, isna, Timestamp +from pandas import Panel, DataFrame, Series, notna, isna, Timestamp, read_csv from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype import pandas.core.common as com @@ -512,14 +512,13 @@ def f(x): pytest.raises(com.SettingWithCopyError, f, result) def test_xs_level_multiple(self): - from pandas import read_table text = """ A B C D E one two three four a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" - df = read_table(StringIO(text), sep=r'\s+', engine='python') + df = read_csv(StringIO(text), sep=r'\s+', engine='python') result = df.xs(('a', 4), level=['one', 'four']) expected = df.xs('a').xs(4, level='four') @@ -547,14 +546,13 @@ def f(x): tm.assert_frame_equal(rs, xp) def test_xs_level0(self): - from pandas import read_table text = """ A B C D E one two three four a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640 a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744 x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838""" - df = read_table(StringIO(text), sep=r'\s+', engine='python') + df = read_csv(StringIO(text), sep=r'\s+', engine='python') result = df.xs('a', level=0) expected = df.xs('a')
`pd.read_table` is deprecated and replaced by `pd.read_csv`. - [x] closes #21948 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21954
2018-07-17T21:20:05Z
2018-08-02T10:49:07Z
2018-08-02T10:49:07Z
2018-08-02T10:49:07Z
BUG: fix df.where(cond) when cond is empty
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 695c4a4e16c9d..7a128f5cde7aa 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -1047,7 +1047,7 @@ Removal of prior version deprecations/changes Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ -- Slicing Series and Dataframes with an monotonically increasing :class:`CategoricalIndex` +- Slicing Series and DataFrames with an monotonically increasing :class:`CategoricalIndex` is now very fast and has speed comparable to slicing with an ``Int64Index``. The speed increase is both when indexing by label (using .loc) and position(.iloc) (:issue:`20395`) Slicing a monotonically increasing :class:`CategoricalIndex` itself (i.e. ``ci[1000:2000]``) @@ -1150,7 +1150,7 @@ Timezones - Fixed bug where :meth:`DataFrame.describe` and :meth:`Series.describe` on tz-aware datetimes did not show `first` and `last` result (:issue:`21328`) - Bug in :class:`DatetimeIndex` comparisons failing to raise ``TypeError`` when comparing timezone-aware ``DatetimeIndex`` against ``np.datetime64`` (:issue:`22074`) - Bug in ``DataFrame`` assignment with a timezone-aware scalar (:issue:`19843`) -- Bug in :func:`Dataframe.asof` that raised a ``TypeError`` when attempting to compare tz-naive and tz-aware timestamps (:issue:`21194`) +- Bug in :func:`DataFrame.asof` that raised a ``TypeError`` when attempting to compare tz-naive and tz-aware timestamps (:issue:`21194`) - Bug when constructing a :class:`DatetimeIndex` with :class:`Timestamp`s constructed with the ``replace`` method across DST (:issue:`18785`) - Bug when setting a new value with :meth:`DataFrame.loc` with a :class:`DatetimeIndex` with a DST transition (:issue:`18308`, :issue:`20724`) - Bug in :meth:`DatetimeIndex.unique` that did not re-localize tz-aware dates correctly (:issue:`21737`) @@ -1313,6 +1313,7 @@ Reshaping - Bug in :func:`pandas.concat` when joining resampled DataFrames with timezone aware index (:issue:`13783`) - Bug in :meth:`Series.combine_first` with ``datetime64[ns, tz]`` dtype which would return tz-naive result (:issue:`21469`) - Bug in :meth:`Series.where` and :meth:`DataFrame.where` with ``datetime64[ns, tz]`` dtype (:issue:`21546`) +- Bug in :meth:`DataFrame.where` with an empty DataFrame and empty ``cond`` having non-bool dtype (:issue:`21947`) - Bug in :meth:`Series.mask` and :meth:`DataFrame.mask` with ``list`` conditionals (:issue:`21891`) - Bug in :meth:`DataFrame.replace` raises RecursionError when converting OutOfBounds ``datetime64[ns, tz]`` (:issue:`20380`) - :func:`pandas.core.groupby.GroupBy.rank` now raises a ``ValueError`` when an invalid value is passed for argument ``na_option`` (:issue:`22124`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 71e4641d20c1b..396b092a286c1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -8142,7 +8142,7 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, # This is a single-dimensional object. if not is_bool_dtype(cond): raise ValueError(msg.format(dtype=cond.dtype)) - else: + elif not cond.empty: for dt in cond.dtypes: if not is_bool_dtype(dt): raise ValueError(msg.format(dtype=dt)) diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index ae04ffff37419..2467b2a89472b 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -2877,6 +2877,14 @@ def test_where_none(self): 'on mixed-type'): df.where(~isna(df), None, inplace=True) + def test_where_empty_df_and_empty_cond_having_non_bool_dtypes(self): + # see gh-21947 + df = pd.DataFrame(columns=["a"]) + cond = df.applymap(lambda x: x > 0) + + result = df.where(cond) + tm.assert_frame_equal(result, df) + def test_where_align(self): def create():
- when cond is empty, cond.dtypes are objects, which raised `ValueError: Boolean array expected for the condition, not object ` - [ ] closes #xxxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21947
2018-07-17T14:00:22Z
2018-11-06T13:06:19Z
2018-11-06T13:06:19Z
2018-11-06T17:01:47Z
DOC add Python 2.7 warning to recent whatsnew; include 23.3
diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst index 3ec812654ee4a..436bbeae5d08f 100644 --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -20,6 +20,8 @@ These are new features and improvements of note in each release. .. include:: whatsnew/v0.24.0.txt +.. include:: whatsnew/v0.23.3.txt + .. include:: whatsnew/v0.23.2.txt .. include:: whatsnew/v0.23.1.txt diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index cf60e86553fe3..1a514ba627fcb 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -6,6 +6,11 @@ v0.23.1 (June 12, 2018) This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes and bug fixes. We recommend that all users upgrade to this version. +.. warning:: + + Starting January 1, 2019, pandas feature releases will support Python 3 only. + See :ref:`install.dropping-27` for more. + .. contents:: What's new in v0.23.1 :local: :backlinks: none diff --git a/doc/source/whatsnew/v0.23.2.txt b/doc/source/whatsnew/v0.23.2.txt index 3f68eabdca4c2..7ec6e2632e717 100644 --- a/doc/source/whatsnew/v0.23.2.txt +++ b/doc/source/whatsnew/v0.23.2.txt @@ -11,6 +11,10 @@ and bug fixes. We recommend that all users upgrade to this version. Pandas 0.23.2 is first pandas release that's compatible with Python 3.7 (:issue:`20552`) +.. warning:: + + Starting January 1, 2019, pandas feature releases will support Python 3 only. + See :ref:`install.dropping-27` for more. .. contents:: What's new in v0.23.2 :local: diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt index a88c22e3d01f7..ac1ef78fd6fd2 100644 --- a/doc/source/whatsnew/v0.23.4.txt +++ b/doc/source/whatsnew/v0.23.4.txt @@ -6,6 +6,10 @@ v0.23.4 This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes and bug fixes. We recommend that all users upgrade to this version. +.. warning:: + + Starting January 1, 2019, pandas feature releases will support Python 3 only. + See :ref:`install.dropping-27` for more. .. contents:: What's new in v0.23.4 :local: diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 9e3f7ec73f852..a0076118a28a7 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -3,6 +3,11 @@ v0.24.0 (Month XX, 2018) ------------------------ +.. warning:: + + Starting January 1, 2019, pandas feature releases will support Python 3 only. + See :ref:`install.dropping-27` for more. + .. _whatsnew_0240.enhancements: New features
#18894 laid out: > We should add a big note on the top of each whatsnew that we are planning on dropping 2.7 support as of the end of 2018. So far, this warning only appeared in v0.23.0, and can be easily missed these days. This PR adds them to the recent whatsnews, and also includes v0.23.3 in `whatsnew.rst`
https://api.github.com/repos/pandas-dev/pandas/pulls/21944
2018-07-17T06:09:47Z
2018-07-17T12:01:52Z
2018-07-17T12:01:52Z
2018-08-05T17:27:30Z
apply_frame_axis0 applies function to first group twice
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 5c15c7b6a742f..b871a684fb6df 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -632,6 +632,7 @@ Plotting ^^^^^^^^ - Bug in :func:`DataFrame.plot.scatter` and :func:`DataFrame.plot.hexbin` caused x-axis label and ticklabels to disappear when colorbar was on in IPython inline backend (:issue:`10611`, :issue:`10678`, and :issue:`20455`) +- Bug in ``DataFrameGroupBy.plot.scatter`` resulted in the first group being plotted twice (:issue:`21609`) - Bug in plotting a Series with datetimes using :func:`matplotlib.axes.Axes.scatter` (:issue:`22039`) Groupby/Resample/Rolling diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py index 96c74f7fd4d75..530e23b336bfd 100644 --- a/pandas/core/groupby/base.py +++ b/pandas/core/groupby/base.py @@ -58,6 +58,10 @@ def _gotitem(self, key, ndim, subset=None): # special case to prevent duplicate plots when catching exceptions when # forwarding methods from NDFrames plotting_methods = frozenset(['plot', 'boxplot', 'hist']) +extra_plotting_methods = frozenset(['line', 'bar', 'barh', + 'box', 'kde', 'density', + 'area', 'pie', 'scatter', + 'hexbin']) common_apply_whitelist = frozenset([ 'last', 'first', diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index 3f84fa0f0670e..7944c1e47561f 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -311,6 +311,12 @@ def __getattr__(self, name): def attr(*args, **kwargs): def f(self): return getattr(self.plot, name)(*args, **kwargs) + # GH-21609 + # naming the function in order to later determine + # if it requires skipping during fast_apply + # some functions (e.g. scatter) would + # create spurious extra plots if fast path is taken + f.__name__ = name return self._groupby.apply(f) return attr @@ -596,7 +602,6 @@ def curried(x): # exception below if name in base.plotting_methods: return self.apply(curried) - try: return self.apply(curried_with_axis) except Exception: diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index ba04ff3a3d3ee..3c5852104c111 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -177,6 +177,7 @@ def apply(self, f, data, axis=0): # oh boy f_name = com.get_callable_name(f) if (f_name not in base.plotting_methods and + f_name not in base.extra_plotting_methods and hasattr(splitter, 'fast_apply') and axis == 0): try: values, mutated = splitter.fast_apply(f, group_keys) diff --git a/pandas/tests/plotting/test_groupby.py b/pandas/tests/plotting/test_groupby.py index a7c99a06c34e9..0983eee23f126 100644 --- a/pandas/tests/plotting/test_groupby.py +++ b/pandas/tests/plotting/test_groupby.py @@ -11,6 +11,28 @@ from pandas.tests.plotting.common import TestPlotBase +import pandas as pd +import pytest + + +@pytest.mark.parametrize( + 'plotting_method', + ['line', 'bar', 'barh', 'box', + 'area', 'pie', 'scatter', 'hexbin', + pytest.param('density', marks=td.skip_if_no_scipy), + pytest.param('kde', marks=td.skip_if_no_scipy) + ]) +@td.skip_if_no_mpl +def test_no_double_plot_for_first_group(plotting_method): + # GH-21609 + import matplotlib.pyplot as plt + df = pd.DataFrame({'cat': [1, 1, 2, 2], + 'x': [1, 3, 5, 7], 'y': [2, 4, 6, 8]}) + plt.close('all') + df.groupby('cat').plot.__getattr__(plotting_method)(x='x', y='y') + fig_nums = plt.get_fignums() + assert fig_nums == [1, 2] + @td.skip_if_no_mpl class TestDataFrameGroupByPlots(TestPlotBase):
- [x] closes #21609 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21943
2018-07-17T04:01:19Z
2018-11-04T16:11:41Z
null
2018-11-04T16:11:41Z
[CLN] Un-xfail now-passing tests
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index fe4e461b0bd4f..d7bc163584969 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -634,6 +634,11 @@ class ExtensionOpsMixin(object): """ A base class for linking the operators to their dunder names """ + + @classmethod + def _create_arithmetic_method(cls, op): + raise AbstractMethodError(cls) + @classmethod def _add_arithmetic_ops(cls): cls.__add__ = cls._create_arithmetic_method(operator.add) @@ -657,6 +662,10 @@ def _add_arithmetic_ops(cls): cls.__divmod__ = cls._create_arithmetic_method(divmod) cls.__rdivmod__ = cls._create_arithmetic_method(ops.rdivmod) + @classmethod + def _create_comparison_method(cls, op): + raise AbstractMethodError(cls) + @classmethod def _add_comparison_ops(cls): cls.__eq__ = cls._create_comparison_method(operator.eq) diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index f4bdb7ba86aaf..9f948e77f3d49 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -561,7 +561,6 @@ def test_td64series_add_int_series_invalid(self, tdser): with pytest.raises(TypeError): tdser + Series([2, 3, 4]) - @pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds') def test_td64series_radd_int_series_invalid(self, tdser): with pytest.raises(TypeError): Series([2, 3, 4]) + tdser @@ -570,7 +569,6 @@ def test_td64series_sub_int_series_invalid(self, tdser): with pytest.raises(TypeError): tdser - Series([2, 3, 4]) - @pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds') def test_td64series_rsub_int_series_invalid(self, tdser): with pytest.raises(TypeError): Series([2, 3, 4]) - tdser @@ -611,9 +609,7 @@ def test_td64series_add_sub_numeric_scalar_invalid(self, scalar, tdser): @pytest.mark.parametrize('vector', [ np.array([1, 2, 3]), pd.Index([1, 2, 3]), - pytest.param(Series([1, 2, 3]), - marks=pytest.mark.xfail(reason='GH#19123 integer ' - 'interpreted as nanos')) + Series([1, 2, 3]) ]) def test_td64series_add_sub_numeric_array_invalid(self, vector, dtype, tdser): @@ -777,10 +773,7 @@ def test_td64series_mul_numeric_array(self, vector, dtype, tdser): 'float64', 'float32', 'float16']) @pytest.mark.parametrize('vector', [ np.array([20, 30, 40]), - pytest.param(pd.Index([20, 30, 40]), - marks=pytest.mark.xfail(reason='__mul__ raises ' - 'instead of returning ' - 'NotImplemented')), + pd.Index([20, 30, 40]), Series([20, 30, 40]) ]) def test_td64series_rmul_numeric_array(self, vector, dtype, tdser): @@ -816,12 +809,8 @@ def test_td64series_mul_numeric_scalar(self, one, tdser): @pytest.mark.parametrize('two', [ 2, 2.0, - pytest.param(np.array(2), - marks=pytest.mark.xfail(reason='GH#19011 is_list_like ' - 'incorrectly True.')), - pytest.param(np.array(2.0), - marks=pytest.mark.xfail(reason='GH#19011 is_list_like ' - 'incorrectly True.')), + np.array(2), + np.array(2.0), ]) def test_td64series_div_numeric_scalar(self, two, tdser): # GH#4521 diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 7d5753d03f4fc..6d712b1f85856 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -339,48 +339,3 @@ def make_signature(func): if spec.keywords: args.append('**' + spec.keywords) return args, spec.args - - -class docstring_wrapper(object): - """ - Decorator to wrap a function and provide - a dynamically evaluated doc-string. - - Parameters - ---------- - func : callable - creator : callable - return the doc-string - default : str, optional - return this doc-string on error - """ - _attrs = ['__module__', '__name__', - '__qualname__', '__annotations__'] - - def __init__(self, func, creator, default=None): - self.func = func - self.creator = creator - self.default = default - update_wrapper( - self, func, [attr for attr in self._attrs - if hasattr(func, attr)]) - - def __get__(self, instance, cls=None): - - # we are called with a class - if instance is None: - return self - - # we want to return the actual passed instance - return types.MethodType(self, instance) - - def __call__(self, *args, **kwargs): - return self.func(*args, **kwargs) - - @property - def __doc__(self): - try: - return self.creator() - except Exception as exc: - msg = self.default or str(exc) - return msg
Some of these were fixed in #21861, others sometime earlier this year, unclear. Removes un unused util `docstring_wrapper`, closing #19676 - [x] closes #19676 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/21942
2018-07-17T03:44:44Z
2018-07-17T06:06:27Z
null
2020-04-05T17:42:37Z
Add option for alternative correlation types to `DataFrame.corrwith()`
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6380944338010..0814e6ce976ab 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -6891,7 +6891,7 @@ def cov(self, min_periods=None): return self._constructor(baseCov, index=idx, columns=cols) - def corrwith(self, other, axis=0, drop=False): + def corrwith(self, other, axis=0, method='pearson', drop=False): """ Compute pairwise correlation between rows or columns of two DataFrame objects. @@ -6901,6 +6901,10 @@ def corrwith(self, other, axis=0, drop=False): other : DataFrame, Series axis : {0 or 'index', 1 or 'columns'}, default 0 0 or 'index' to compute column-wise, 1 or 'columns' for row-wise + method : {'pearson', 'kendall', 'spearman'} + * pearson : standard correlation coefficient + * kendall : Kendall Tau correlation coefficient + * spearman : Spearman rank correlation drop : boolean, default False Drop missing indices from result, default returns union of all @@ -6912,10 +6916,9 @@ def corrwith(self, other, axis=0, drop=False): this = self._get_numeric_data() if isinstance(other, Series): - return this.apply(other.corr, axis=axis) + return this.apply(other.corr, axis=axis, method=method) other = other._get_numeric_data() - left, right = this.align(other, join='inner', copy=False) # mask missing values @@ -6926,14 +6929,9 @@ def corrwith(self, other, axis=0, drop=False): left = left.T right = right.T - # demeaned data - ldem = left - left.mean() - rdem = right - right.mean() - - num = (ldem * rdem).sum() - dom = (left.count() - 1) * left.std() * right.std() - - correl = num / dom + correl = Series(index=left.columns) + for col in left.columns: + correl[col] = nanops.nancorr(left[col], right[col], method=method) if not drop: raxis = 1 if axis == 0 else 0
- [x] closes #21925 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` This adds a `method` argument to `DataFrame.corrwith()` which calls the corresponding correlation function which already exists within pandas. The default option "pearson" was originally implemented directly in this method, I changed it to match the other options for consistency (and a slight speed improvement).
https://api.github.com/repos/pandas-dev/pandas/pulls/21941
2018-07-17T03:14:59Z
2018-08-05T19:06:12Z
null
2018-08-05T19:06:18Z
[BLD] Fix remaining compile-time warnings
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index 5e4a431caca00..43acab765d0d3 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -12,6 +12,11 @@ from numpy cimport (ndarray, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, float32_t, float64_t) +cdef extern from "numpy/npy_math.h": + # Note: apparently npy_isnan has better windows-compat than + # the libc.math.isnan implementation + # See discussion: https://github.com/cython/cython/issues/550 + bint npy_isnan(double x) nogil from util cimport numeric, get_nat @@ -35,7 +40,7 @@ cdef inline float64_t median_linear(float64_t* a, int n) nogil: # count NAs for i in range(n): - if a[i] != a[i]: + if npy_isnan(a[i]): na_count += 1 if na_count: @@ -46,7 +51,7 @@ cdef inline float64_t median_linear(float64_t* a, int n) nogil: j = 0 for i in range(n): - if a[i] == a[i]: + if not npy_isnan(a[i]): tmp[j] = a[i] j += 1 @@ -160,7 +165,7 @@ def group_cumprod_float64(float64_t[:, :] out, continue for j in range(K): val = values[i, j] - if val == val: + if not npy_isnan(val): accum[lab, j] *= val out[i, j] = accum[lab, j] else: @@ -199,7 +204,7 @@ def group_cumsum(numeric[:, :] out, val = values[i, j] if numeric == float32_t or numeric == float64_t: - if val == val: + if not npy_isnan(val): accum[lab, j] += val out[i, j] = accum[lab, j] else: diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index 0062a6c8d31ab..19ad374f72374 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -66,7 +66,7 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, val = values[i, j] # not nan - if val == val: + if not npy_isnan(val): nobs[lab, j] += 1 sumx[lab, j] += val @@ -112,7 +112,7 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, val = values[i, j] # not nan - if val == val: + if not npy_isnan(val): nobs[lab, j] += 1 prodx[lab, j] *= val @@ -161,7 +161,7 @@ def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, val = values[i, j] # not nan - if val == val: + if not npy_isnan(val): nobs[lab, j] += 1 oldmean = mean[lab, j] mean[lab, j] += (val - oldmean) / nobs[lab, j] @@ -209,7 +209,7 @@ def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, for j in range(K): val = values[i, j] # not nan - if val == val: + if not npy_isnan(val): nobs[lab, j] += 1 sumx[lab, j] += val @@ -260,10 +260,10 @@ def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, counts[lab] += 1 val = values[i, 0] - if val != val: + if npy_isnan(val): continue - if out[lab, 0] != out[lab, 0]: + if npy_isnan(out[lab, 0]): out[lab, 0] = out[lab, 1] = out[lab, 2] = out[lab, 3] = val else: out[lab, 1] = max(out[lab, 1], val) @@ -340,7 +340,11 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, val = values[i, j] # not nan + {{if c_type.startswith('float')}} + if not npy_isnan(val) and val != {{nan_val}}: + {{else}} if val == val and val != {{nan_val}}: + {{endif}} nobs[lab, j] += 1 resx[lab, j] = val @@ -396,7 +400,11 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, val = values[i, j] # not nan + {{if c_type.startswith('float')}} + if not npy_isnan(val) and val != {{nan_val}}: + {{else}} if val == val and val != {{nan_val}}: + {{endif}} nobs[lab, j] += 1 if nobs[lab, j] == rank: resx[lab, j] = val @@ -651,7 +659,7 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{if name == 'int64'}} if val != {{nan_val}}: {{else}} - if val == val and val != {{nan_val}}: + if not npy_isnan(val) and val != {{nan_val}}: {{endif}} nobs[lab, j] += 1 if val > maxx[lab, j]: @@ -706,7 +714,7 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{if name == 'int64'}} if val != {{nan_val}}: {{else}} - if val == val and val != {{nan_val}}: + if not npy_isnan(val) and val != {{nan_val}}: {{endif}} nobs[lab, j] += 1 if val < minx[lab, j]: @@ -754,7 +762,7 @@ def group_cummin_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, out[i, j] = {{nan_val}} else: {{else}} - if val == val: + if not npy_isnan(val): {{endif}} mval = accum[lab, j] if val < mval: @@ -795,7 +803,7 @@ def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, out[i, j] = {{nan_val}} else: {{else}} - if val == val: + if not npy_isnan(val): {{endif}} mval = accum[lab, j] if val > mval: diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index b9a72a0c8285f..8b4fb4aaa3667 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -16,6 +16,7 @@ cnp.import_array() cdef extern from "numpy/npy_math.h": double NAN "NPY_NAN" + bint npy_isnan(double x) nogil from khash cimport ( diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index ff6570e2106b2..ed1034fbfa8e3 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -29,7 +29,7 @@ dtypes = [('Float64', 'float64', 'float64_t'), ctypedef struct {{name}}VectorData: {{arg}} *data - size_t n, m + Py_ssize_t n, m {{endif}} @@ -147,7 +147,7 @@ cdef class StringVector: cdef resize(self): cdef: char **orig_data - size_t i, m + Py_ssize_t i, m m = self.data.m self.data.m = max(self.data.m * 4, _INIT_VEC_CAP) @@ -172,7 +172,7 @@ cdef class StringVector: def to_array(self): cdef: ndarray ao - size_t n + Py_ssize_t n object val ao = np.empty(self.data.n, dtype=np.object) @@ -198,7 +198,7 @@ cdef class ObjectVector: cdef: PyObject **data - size_t n, m + Py_ssize_t n, m ndarray ao bint external_view_exists @@ -281,7 +281,7 @@ cdef class {{name}}HashTable(HashTable): def sizeof(self, deep=False): """ return the size of my table in bytes """ return self.table.n_buckets * (sizeof({{dtype}}_t) + # keys - sizeof(size_t) + # vals + sizeof(Py_ssize_t) + # vals sizeof(uint32_t)) # flags cpdef get_item(self, {{dtype}}_t val): @@ -389,7 +389,11 @@ cdef class {{name}}HashTable(HashTable): for i in range(n): val = values[i] + {{if dtype == 'float64'}} + if npy_isnan(val) or (use_na_value and val == na_value2): + {{else}} if val != val or (use_na_value and val == na_value2): + {{endif}} labels[i] = na_sentinel continue @@ -522,13 +526,13 @@ cdef class StringHashTable(HashTable): def sizeof(self, deep=False): """ return the size of my table in bytes """ return self.table.n_buckets * (sizeof(char *) + # keys - sizeof(size_t) + # vals + sizeof(Py_ssize_t) + # vals sizeof(uint32_t)) # flags cpdef get_item(self, object val): cdef: khiter_t k - char *v + const char *v v = util.get_c_string(val) k = kh_get_str(self.table, v) @@ -541,7 +545,7 @@ cdef class StringHashTable(HashTable): cdef: khiter_t k int ret = 0 - char *v + const char *v v = util.get_c_string(val) @@ -560,10 +564,10 @@ cdef class StringHashTable(HashTable): int64_t *resbuf = <int64_t*> labels.data khiter_t k kh_str_t *table = self.table - char *v - char **vecs + const char *v + const char **vecs - vecs = <char **> malloc(n * sizeof(char *)) + vecs = <const char **> malloc(n * sizeof(char *)) for i in range(n): val = values[i] v = util.get_c_string(val) @@ -589,10 +593,10 @@ cdef class StringHashTable(HashTable): object val ObjectVector uniques khiter_t k - char *v - char **vecs + const char *v + const char **vecs - vecs = <char **> malloc(n * sizeof(char *)) + vecs = <const char **> malloc(n * sizeof(char *)) uindexer = np.empty(n, dtype=np.int64) for i in range(n): val = values[i] @@ -627,12 +631,12 @@ cdef class StringHashTable(HashTable): Py_ssize_t i, n = len(values) int ret = 0 object val - char *v + const char *v khiter_t k int64_t[:] locs = np.empty(n, dtype=np.int64) # these by-definition *must* be strings - vecs = <char **> malloc(n * sizeof(char *)) + vecs = <const char**> malloc(n * sizeof(char *)) for i in range(n): val = values[i] @@ -660,12 +664,12 @@ cdef class StringHashTable(HashTable): Py_ssize_t i, n = len(values) int ret = 0 object val - char *v - char **vecs + const char *v + const char**vecs khiter_t k # these by-definition *must* be strings - vecs = <char **> malloc(n * sizeof(char *)) + vecs = <const char**> malloc(n * sizeof(char *)) for i in range(n): val = values[i] @@ -693,8 +697,8 @@ cdef class StringHashTable(HashTable): Py_ssize_t idx, count = count_prior int ret = 0 object val - char *v - char **vecs + const char *v + const char**vecs khiter_t k bint use_na_value @@ -705,7 +709,7 @@ cdef class StringHashTable(HashTable): # pre-filter out missing # and assign pointers - vecs = <char **> malloc(n * sizeof(char *)) + vecs = <const char**> malloc(n * sizeof(char *)) for i in range(n): val = values[i] @@ -769,7 +773,7 @@ cdef class PyObjectHashTable(HashTable): def sizeof(self, deep=False): """ return the size of my table in bytes """ return self.table.n_buckets * (sizeof(PyObject *) + # keys - sizeof(size_t) + # vals + sizeof(Py_ssize_t) + # vals sizeof(uint32_t)) # flags cpdef get_item(self, object val): diff --git a/pandas/_libs/hashtable_func_helper.pxi.in b/pandas/_libs/hashtable_func_helper.pxi.in index 521e564447c59..da4e8f1248985 100644 --- a/pandas/_libs/hashtable_func_helper.pxi.in +++ b/pandas/_libs/hashtable_func_helper.pxi.in @@ -271,7 +271,12 @@ def ismember_{{dtype}}({{scalar}}[:] arr, {{scalar}}[:] values, bint hasnans=0): if k != table.n_buckets: result[i] = 1 else: + {{if dtype == 'float64'}} + result[i] = hasnans and npy_isnan(val) + {{else}} result[i] = hasnans and val != val + {{endif}} + {{endif}} kh_destroy_{{ttype}}(table) diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c index 1ad8c780ba7a4..104ac93381f86 100644 --- a/pandas/_libs/src/datetime/np_datetime.c +++ b/pandas/_libs/src/datetime/np_datetime.c @@ -28,6 +28,15 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #define PyInt_AsLong PyLong_AsLong #endif +// Silence "implicit declaration of function" warnings +int convert_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, + const npy_datetimestruct *dts, + npy_datetime *out); +int convert_timedelta_to_timedeltastruct(NPY_DATETIMEUNIT base, + npy_timedelta td, + pandas_timedeltastruct *out); + + const npy_datetimestruct _NS_MIN_DTS = { 1677, 9, 21, 0, 12, 43, 145225, 0, 0}; const npy_datetimestruct _NS_MAX_DTS = { diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c index 25eede6c286dc..a18d12616a802 100644 --- a/pandas/_libs/src/parser/tokenizer.c +++ b/pandas/_libs/src/parser/tokenizer.c @@ -262,7 +262,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) { ("\n\nmake_stream_space: nbytes = %zu. grow_buffer(self->stream...)\n", nbytes)) self->stream = (char *)grow_buffer((void *)self->stream, self->stream_len, - (size_t*)&self->stream_cap, nbytes * 2, + (int64_t*)&self->stream_cap, nbytes * 2, sizeof(char), &status); TRACE( ("make_stream_space: self->stream=%p, self->stream_len = %zu, " @@ -289,7 +289,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) { cap = self->words_cap; self->words = (char **)grow_buffer((void *)self->words, self->words_len, - (size_t*)&self->words_cap, nbytes, + (int64_t*)&self->words_cap, nbytes, sizeof(char *), &status); TRACE( ("make_stream_space: grow_buffer(self->self->words, %zu, %zu, %zu, " @@ -320,7 +320,7 @@ static int make_stream_space(parser_t *self, size_t nbytes) { cap = self->lines_cap; self->line_start = (int64_t *)grow_buffer((void *)self->line_start, self->lines + 1, - (size_t*)&self->lines_cap, nbytes, + (int64_t*)&self->lines_cap, nbytes, sizeof(int64_t), &status); TRACE(( "make_stream_space: grow_buffer(self->line_start, %zu, %zu, %zu, %d)\n", diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/src/util.pxd index 7ce2181f32553..7285bc6399b49 100644 --- a/pandas/_libs/src/util.pxd +++ b/pandas/_libs/src/util.pxd @@ -70,7 +70,7 @@ cdef extern from "numpy_helper.h": int assign_value_1d(ndarray, Py_ssize_t, object) except -1 cnp.int64_t get_nat() object get_value_1d(ndarray, Py_ssize_t) - char *get_c_string(object) except NULL + const char *get_c_string(object) except NULL object char_to_string(char*) ctypedef fused numeric: diff --git a/pandas/io/msgpack/_unpacker.pyx b/pandas/io/msgpack/_unpacker.pyx index 04bb330e595dd..7031563fcd37a 100644 --- a/pandas/io/msgpack/_unpacker.pyx +++ b/pandas/io/msgpack/_unpacker.pyx @@ -139,7 +139,7 @@ def unpackb(object packed, object object_hook=None, object list_hook=None, ret = unpack_construct(&ctx, buf, buf_len, &off) if ret == 1: obj = unpack_data(&ctx) - if off < buf_len: + if <Py_ssize_t>off < buf_len: raise ExtraData(obj, PyBytes_FromStringAndSize( buf + off, buf_len - off)) return obj @@ -367,9 +367,11 @@ cdef class Unpacker(object): self.buf_tail = tail + _buf_len cdef read_from_file(self): + # Assume self.max_buffer_size - (self.buf_tail - self.buf_head) >= 0 next_bytes = self.file_like_read( min(self.read_size, - self.max_buffer_size - (self.buf_tail - self.buf_head))) + <Py_ssize_t>(self.max_buffer_size - + (self.buf_tail - self.buf_head)))) if next_bytes: self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes)) @@ -417,7 +419,9 @@ cdef class Unpacker(object): def read_bytes(self, Py_ssize_t nbytes): """Read a specified number of raw bytes from the stream""" cdef size_t nread - nread = min(self.buf_tail - self.buf_head, nbytes) + + # Assume that self.buf_tail - self.buf_head >= 0 + nread = min(<Py_ssize_t>(self.buf_tail - self.buf_head), nbytes) ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread) self.buf_head += nread if len(ret) < nbytes and self.file_like is not None:
With the exception of Numpy-Deprecated-API-1.7 warnings that _any_ cython code produces, this fixes all remaining compiler warnings (... on py27, there's still a whole mess of them in py37). Based on a little bit of profiling it looked like `npy_isnan(x)` gives a 40% perf improvement over `x != x`. But profiling is hard, so who knows. In the comment where npy_nan is imported there is a link to a discussion about it.
https://api.github.com/repos/pandas-dev/pandas/pulls/21940
2018-07-17T01:56:41Z
2018-07-19T02:56:28Z
null
2020-04-05T17:42:44Z
CLN: Remove PeriodIndex.tz_convert, tz_localize
diff --git a/doc/source/api.rst b/doc/source/api.rst index fff944651588e..beded99318a5e 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1870,8 +1870,6 @@ Methods PeriodIndex.asfreq PeriodIndex.strftime PeriodIndex.to_timestamp - PeriodIndex.tz_convert - PeriodIndex.tz_localize Scalars ------- diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 9e3f7ec73f852..c7c24a284fea7 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -304,7 +304,7 @@ Other API Changes a ``KeyError`` (:issue:`21678`). - Invalid construction of ``IntervalDtype`` will now always raise a ``TypeError`` rather than a ``ValueError`` if the subdtype is invalid (:issue:`21185`) - Trying to reindex a ``DataFrame`` with a non unique ``MultiIndex`` now raises a ``ValueError`` instead of an ``Exception`` (:issue:`21770`) -- +- :meth:`PeriodIndex.tz_convert` and :meth:`PeriodIndex.tz_localize` have been removed (:issue:`21781`) .. _whatsnew_0240.deprecations: diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index a8e0c7f1aaa6a..f97f93d975af2 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -140,8 +140,6 @@ class PeriodIndex(PeriodArrayMixin, DatelikeOps, DatetimeIndexOpsMixin, asfreq strftime to_timestamp - tz_convert - tz_localize Examples -------- @@ -805,50 +803,6 @@ def __setstate__(self, state): _unpickle_compat = __setstate__ - def tz_convert(self, tz): - """ - Convert tz-aware DatetimeIndex from one time zone to another (using - pytz/dateutil) - - Parameters - ---------- - tz : string, pytz.timezone, dateutil.tz.tzfile or None - Time zone for time. Corresponding timestamps would be converted to - time zone of the TimeSeries. - None will remove timezone holding UTC time. - - Returns - ------- - normalized : DatetimeIndex - - Notes - ----- - Not currently implemented for PeriodIndex - """ - raise NotImplementedError("Not yet implemented for PeriodIndex") - - def tz_localize(self, tz, ambiguous='raise'): - """ - Localize tz-naive DatetimeIndex to given time zone (using - pytz/dateutil), or remove timezone from tz-aware DatetimeIndex - - Parameters - ---------- - tz : string, pytz.timezone, dateutil.tz.tzfile or None - Time zone for time. Corresponding timestamps would be converted to - time zone of the TimeSeries. - None will remove timezone holding local time. - - Returns - ------- - localized : DatetimeIndex - - Notes - ----- - Not currently implemented for PeriodIndex - """ - raise NotImplementedError("Not yet implemented for PeriodIndex") - PeriodIndex._add_comparison_methods() PeriodIndex._add_numeric_methods_disabled() diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index fb9bd74d9876d..b1d9d362d1402 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -747,7 +747,6 @@ def test_datetime_assignment_with_NaT_and_diff_time_units(self): def test_frame_to_period(self): K = 5 - from pandas.core.indexes.period import period_range dr = date_range('1/1/2000', '1/1/2001') pr = period_range('1/1/2000', '1/1/2001') @@ -776,14 +775,6 @@ def test_frame_to_period(self): @pytest.mark.parametrize("fn", ['tz_localize', 'tz_convert']) def test_tz_convert_and_localize(self, fn): l0 = date_range('20140701', periods=5, freq='D') - - # TODO: l1 should be a PeriodIndex for testing - # after GH2106 is addressed - with pytest.raises(NotImplementedError): - period_range('20140701', periods=1).tz_convert('UTC') - with pytest.raises(NotImplementedError): - period_range('20140701', periods=1).tz_localize('UTC') - # l1 = period_range('20140701', periods=5, freq='D') l1 = date_range('20140701', periods=5, freq='D') int_idx = Index(range(5))
- [x] closes #21781 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - Removed PeriodIndex.tz_convert and PeriodIndex.tz_localize from PeriodIndex. - Removed references to those methods in test_tz_convert_and_localize within test_timeseries - Removed methods from the PeriodIndex class doc and updated whatsnew test_tz_convert_and_localize also contains a TODO referencing changing the test to use PeriodIndex for a timezone test, which if it is never gaining timezone info should probably be removed. I just wanted to check that this was a good idea before doing it.
https://api.github.com/repos/pandas-dev/pandas/pulls/21935
2018-07-16T17:22:39Z
2018-07-18T01:00:19Z
2018-07-18T01:00:19Z
2018-07-19T19:45:33Z
BUG: Properly handle lists for .mask
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index a0076118a28a7..01a0c35117ea3 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -160,7 +160,7 @@ For situations where you need an ``ndarray`` of ``Interval`` objects, use :meth:`numpy.asarray` or ``idx.astype(object)``. .. ipython:: python - + np.asarray(idx) idx.values.astype(object) @@ -487,6 +487,7 @@ Reshaping - Bug in :func:`pandas.concat` when joining resampled DataFrames with timezone aware index (:issue:`13783`) - Bug in :meth:`Series.combine_first` with ``datetime64[ns, tz]`` dtype which would return tz-naive result (:issue:`21469`) - Bug in :meth:`Series.where` and :meth:`DataFrame.where` with ``datetime64[ns, tz]`` dtype (:issue:`21546`) +- Bug in :meth:`Series.mask` and :meth:`DataFrame.mask` with ``list`` conditionals (:issue:`21891`) - - diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7305da4f56506..b682f5e65f876 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7941,6 +7941,10 @@ def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None, inplace = validate_bool_kwarg(inplace, 'inplace') cond = com._apply_if_callable(cond, self) + # see gh-21891 + if not hasattr(cond, "__invert__"): + cond = np.array(cond) + return self.where(~cond, other=other, inplace=inplace, axis=axis, level=level, try_cast=try_cast, errors=errors) diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 9ca2b7e3c8a6a..2eed6b47df9e3 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -2966,6 +2966,13 @@ def test_mask(self): assert_frame_equal(rs, df.mask(df <= 0, other)) assert_frame_equal(rs, df.mask(~cond, other)) + # see gh-21891 + df = DataFrame([1, 2]) + res = df.mask([[True], [False]]) + + exp = DataFrame([np.nan, 2]) + tm.assert_frame_equal(res, exp) + def test_mask_inplace(self): # GH8801 df = DataFrame(np.random.randn(5, 3)) diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py index bd54d5f57d12d..e2a9b3586648d 100644 --- a/pandas/tests/series/indexing/test_boolean.py +++ b/pandas/tests/series/indexing/test_boolean.py @@ -617,6 +617,13 @@ def test_mask(): expected = Series([1, 2, np.nan, np.nan]) assert_series_equal(result, expected) + # see gh-21891 + s = Series([1, 2]) + res = s.mask([True, False]) + + exp = Series([np.nan, 2]) + tm.assert_series_equal(res, exp) + def test_mask_inplace(): s = Series(np.random.randn(5))
Title is self-explanatory. Closes #21891.
https://api.github.com/repos/pandas-dev/pandas/pulls/21934
2018-07-16T16:37:41Z
2018-07-17T19:40:40Z
2018-07-17T19:40:39Z
2018-07-17T19:47:31Z
ENH: add iso-format support to to_timedelta (#21877)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index ed4022d422b4d..04e56c373edc0 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -84,6 +84,7 @@ Other Enhancements - :meth:`Series.nlargest`, :meth:`Series.nsmallest`, :meth:`DataFrame.nlargest`, and :meth:`DataFrame.nsmallest` now accept the value ``"all"`` for the ``keep`` argument. This keeps all ties for the nth largest/smallest value (:issue:`16818`) - :class:`IntervalIndex` has gained the :meth:`~IntervalIndex.set_closed` method to change the existing ``closed`` value (:issue:`21670`) - :func:`~DataFrame.to_csv` and :func:`~DataFrame.to_json` now support ``compression='infer'`` to infer compression based on filename (:issue:`15008`) +- :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`) - .. _whatsnew_0240.api_breaking: diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index b9405b15a0980..f7a6cf0c6dafc 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -183,7 +183,11 @@ cpdef convert_to_timedelta64(object ts, object unit): ts = cast_from_unit(ts, unit) ts = np.timedelta64(ts) elif is_string_object(ts): - ts = np.timedelta64(parse_timedelta_string(ts)) + if len(ts) > 0 and ts[0] == 'P': + ts = parse_iso_format_string(ts) + else: + ts = parse_timedelta_string(ts) + ts = np.timedelta64(ts) elif hasattr(ts, 'delta'): ts = np.timedelta64(delta_to_nanoseconds(ts), 'ns') diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_construction.py index 68dc0003e2312..447e2b40050f6 100644 --- a/pandas/tests/indexes/timedeltas/test_construction.py +++ b/pandas/tests/indexes/timedeltas/test_construction.py @@ -44,6 +44,13 @@ def test_constructor(self): tm.assert_index_equal(TimedeltaIndex([400, 450, 1200], unit='ms'), expected) + def test_constructor_iso(self): + # GH #21877 + expected = timedelta_range('1s', periods=9, freq='s') + durations = ['P0DT0H0M{}S'.format(i) for i in range(1, 10)] + result = to_timedelta(durations) + tm.assert_index_equal(result, expected) + def test_constructor_coverage(self): rng = timedelta_range('1 days', periods=10.5) exp = timedelta_range('1 days', periods=10) diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 6472bd4245622..017606dc42d59 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -233,6 +233,11 @@ def check(value): assert tup.microseconds == 999 assert tup.nanoseconds == 0 + def test_iso_conversion(self): + # GH #21877 + expected = Timedelta(1, unit='s') + assert to_timedelta('P0DT0H0M1S') == expected + def test_nat_converters(self): assert to_timedelta('nat', box=False).astype('int64') == iNaT assert to_timedelta('nan', box=False).astype('int64') == iNaT
- [x] closes #21877 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21933
2018-07-16T15:38:53Z
2018-07-20T12:42:01Z
2018-07-20T12:42:01Z
2018-07-20T12:49:14Z
DEPR: Series.compress
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 137fd5aafe5bd..65dad1304d780 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -382,7 +382,7 @@ Deprecations - :meth:`DataFrame.to_stata`, :meth:`read_stata`, :class:`StataReader` and :class:`StataWriter` have deprecated the ``encoding`` argument. The encoding of a Stata dta file is determined by the file type and cannot be changed (:issue:`21244`). - :meth:`MultiIndex.to_hierarchical` is deprecated and will be removed in a future version (:issue:`21613`) - :meth:`Series.ptp` is deprecated. Use ``numpy.ptp`` instead (:issue:`21614`) -- +- :meth:`Series.compress` is deprecated. Use ``Series[condition]`` instead (:issue:`18262`) .. _whatsnew_0240.prior_deprecations: diff --git a/pandas/core/series.py b/pandas/core/series.py index 3571e908fc6a7..c53caac980790 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -510,10 +510,15 @@ def compress(self, condition, *args, **kwargs): """ Return selected slices of an array along given axis as a Series + .. deprecated:: 0.24.0 + See also -------- numpy.ndarray.compress """ + msg = ("Series.compress(condition) is deprecated. " + "Use Series[condition] instead.") + warnings.warn(msg, FutureWarning, stacklevel=2) nv.validate_compress(args, kwargs) return self[condition] diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 8c0f4b11149fe..69969bd090b9b 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -585,7 +585,9 @@ def test_compress(self): index=list('abcde'), name='foo') expected = Series(s.values.compress(cond), index=list('ac'), name='foo') - tm.assert_series_equal(s.compress(cond), expected) + with tm.assert_produces_warning(FutureWarning): + result = s.compress(cond) + tm.assert_series_equal(result, expected) def test_numpy_compress(self): cond = [True, False, True, False, False]
xref #18262
https://api.github.com/repos/pandas-dev/pandas/pulls/21930
2018-07-16T12:59:18Z
2018-07-25T10:31:37Z
2018-07-25T10:31:37Z
2018-07-26T13:16:43Z
CLN: Address MulitIndex Test Follow Ups in Issue #21918
diff --git a/pandas/tests/indexes/multi/test_analytics.py b/pandas/tests/indexes/multi/test_analytics.py index 072356e4923a6..4cc0504417801 100644 --- a/pandas/tests/indexes/multi/test_analytics.py +++ b/pandas/tests/indexes/multi/test_analytics.py @@ -1,4 +1,11 @@ +# -*- coding: utf-8 -*- + +import numpy as np +import pandas as pd +import pandas.util.testing as tm import pytest +from pandas import Index, MultiIndex, date_range, period_range +from pandas.compat import lrange def test_shift(idx): @@ -6,3 +13,316 @@ def test_shift(idx): # GH8083 test the base class for shift pytest.raises(NotImplementedError, idx.shift, 1) pytest.raises(NotImplementedError, idx.shift, 1, 2) + + +def test_bounds(idx): + idx._bounds + + +def test_groupby(idx): + groups = idx.groupby(np.array([1, 1, 1, 2, 2, 2])) + labels = idx.get_values().tolist() + exp = {1: labels[:3], 2: labels[3:]} + tm.assert_dict_equal(groups, exp) + + # GH5620 + groups = idx.groupby(idx) + exp = {key: [key] for key in idx} + tm.assert_dict_equal(groups, exp) + + +def test_truncate(): + major_axis = Index(lrange(4)) + minor_axis = Index(lrange(2)) + + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + + index = MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels]) + + result = index.truncate(before=1) + assert 'foo' not in result.levels[0] + assert 1 in result.levels[0] + + result = index.truncate(after=1) + assert 2 not in result.levels[0] + assert 1 in result.levels[0] + + result = index.truncate(before=1, after=2) + assert len(result.levels[0]) == 2 + + # after < before + pytest.raises(ValueError, index.truncate, 3, 1) + + +def test_where(): + i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) + + def f(): + i.where(True) + + pytest.raises(NotImplementedError, f) + + +def test_where_array_like(): + i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) + klasses = [list, tuple, np.array, pd.Series] + cond = [False, True] + + for klass in klasses: + def f(): + return i.where(klass(cond)) + pytest.raises(NotImplementedError, f) + +# TODO: reshape + + +def test_reorder_levels(idx): + # this blows up + tm.assert_raises_regex(IndexError, '^Too many levels', + idx.reorder_levels, [2, 1, 0]) + + +def test_numpy_repeat(): + reps = 2 + numbers = [1, 2, 3] + names = np.array(['foo', 'bar']) + + m = MultiIndex.from_product([ + numbers, names], names=names) + expected = MultiIndex.from_product([ + numbers, names.repeat(reps)], names=names) + tm.assert_index_equal(np.repeat(m, reps), expected) + + msg = "the 'axis' parameter is not supported" + tm.assert_raises_regex( + ValueError, msg, np.repeat, m, reps, axis=1) + + +def test_append_mixed_dtypes(): + # GH 13660 + dti = date_range('2011-01-01', freq='M', periods=3, ) + dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern') + pi = period_range('2011-01', freq='M', periods=3) + + mi = MultiIndex.from_arrays([[1, 2, 3], + [1.1, np.nan, 3.3], + ['a', 'b', 'c'], + dti, dti_tz, pi]) + assert mi.nlevels == 6 + + res = mi.append(mi) + exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3], + [1.1, np.nan, 3.3, 1.1, np.nan, 3.3], + ['a', 'b', 'c', 'a', 'b', 'c'], + dti.append(dti), + dti_tz.append(dti_tz), + pi.append(pi)]) + tm.assert_index_equal(res, exp) + + other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'], + ['x', 'y', 'z'], ['x', 'y', 'z'], + ['x', 'y', 'z'], ['x', 'y', 'z']]) + + res = mi.append(other) + exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'], + [1.1, np.nan, 3.3, 'x', 'y', 'z'], + ['a', 'b', 'c', 'x', 'y', 'z'], + dti.append(pd.Index(['x', 'y', 'z'])), + dti_tz.append(pd.Index(['x', 'y', 'z'])), + pi.append(pd.Index(['x', 'y', 'z']))]) + tm.assert_index_equal(res, exp) + + +def test_take(idx): + indexer = [4, 3, 0, 2] + result = idx.take(indexer) + expected = idx[indexer] + assert result.equals(expected) + + # TODO: Remove Commented Code + # if not isinstance(idx, + # (DatetimeIndex, PeriodIndex, TimedeltaIndex)): + # GH 10791 + with pytest.raises(AttributeError): + idx.freq + + +def test_take_invalid_kwargs(idx): + idx = idx + indices = [1, 2] + + msg = r"take\(\) got an unexpected keyword argument 'foo'" + tm.assert_raises_regex(TypeError, msg, idx.take, + indices, foo=2) + + msg = "the 'out' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, idx.take, + indices, out=indices) + + msg = "the 'mode' parameter is not supported" + tm.assert_raises_regex(ValueError, msg, idx.take, + indices, mode='clip') + + +def test_take_fill_value(): + # GH 12631 + vals = [['A', 'B'], + [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]] + idx = pd.MultiIndex.from_product(vals, names=['str', 'dt']) + + result = idx.take(np.array([1, 0, -1])) + exp_vals = [('A', pd.Timestamp('2011-01-02')), + ('A', pd.Timestamp('2011-01-01')), + ('B', pd.Timestamp('2011-01-02'))] + expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) + tm.assert_index_equal(result, expected) + + # fill_value + result = idx.take(np.array([1, 0, -1]), fill_value=True) + exp_vals = [('A', pd.Timestamp('2011-01-02')), + ('A', pd.Timestamp('2011-01-01')), + (np.nan, pd.NaT)] + expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) + tm.assert_index_equal(result, expected) + + # allow_fill=False + result = idx.take(np.array([1, 0, -1]), allow_fill=False, + fill_value=True) + exp_vals = [('A', pd.Timestamp('2011-01-02')), + ('A', pd.Timestamp('2011-01-01')), + ('B', pd.Timestamp('2011-01-02'))] + expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) + tm.assert_index_equal(result, expected) + + msg = ('When allow_fill=True and fill_value is not None, ' + 'all indices must be >= -1') + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -2]), fill_value=True) + with tm.assert_raises_regex(ValueError, msg): + idx.take(np.array([1, 0, -5]), fill_value=True) + + with pytest.raises(IndexError): + idx.take(np.array([1, -5])) + + +def test_iter(idx): + result = list(idx) + expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'), + ('baz', 'two'), ('qux', 'one'), ('qux', 'two')] + assert result == expected + + +def test_sub(idx): + + first = idx + + # - now raises (previously was set op difference) + with pytest.raises(TypeError): + first - idx[-3:] + with pytest.raises(TypeError): + idx[-3:] - first + with pytest.raises(TypeError): + idx[-3:] - first.tolist() + with pytest.raises(TypeError): + first.tolist() - idx[-3:] + + +def test_map(idx): + # callable + index = idx + + # we don't infer UInt64 + if isinstance(index, pd.UInt64Index): + expected = index.astype('int64') + else: + expected = index + + result = index.map(lambda x: x) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + "mapper", + [ + lambda values, idx: {i: e for e, i in zip(values, idx)}, + lambda values, idx: pd.Series(values, idx)]) +def test_map_dictlike(idx, mapper): + + if isinstance(idx, (pd.CategoricalIndex, pd.IntervalIndex)): + pytest.skip("skipping tests for {}".format(type(idx))) + + identity = mapper(idx.values, idx) + + # we don't infer to UInt64 for a dict + if isinstance(idx, pd.UInt64Index) and isinstance(identity, dict): + expected = idx.astype('int64') + else: + expected = idx + + result = idx.map(identity) + tm.assert_index_equal(result, expected) + + # empty mappable + expected = pd.Index([np.nan] * len(idx)) + result = idx.map(mapper(expected, idx)) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize('func', [ + np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10, + np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin, + np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, + np.arcsinh, np.arccosh, np.arctanh, np.deg2rad, + np.rad2deg +]) +def test_numpy_ufuncs(func): + # test ufuncs of numpy 1.9.2. see: + # http://docs.scipy.org/doc/numpy/reference/ufuncs.html + + # some functions are skipped because it may return different result + # for unicode input depending on numpy version + + # copy and paste from idx fixture as pytest doesn't support + # parameters and fixtures at the same time. + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + index_names = ['first', 'second'] + + idx = MultiIndex( + levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels], + names=index_names, + verify_integrity=False + ) + + with pytest.raises(Exception): + with np.errstate(all='ignore'): + func(idx) + + +@pytest.mark.parametrize('func', [ + np.isfinite, np.isinf, np.isnan, np.signbit +]) +def test_numpy_type_funcs(func): + # for func in [np.isfinite, np.isinf, np.isnan, np.signbit]: + # copy and paste from idx fixture as pytest doesn't support + # parameters and fixtures at the same time. + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + major_labels = np.array([0, 0, 1, 2, 3, 3]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + index_names = ['first', 'second'] + + idx = MultiIndex( + levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels], + names=index_names, + verify_integrity=False + ) + + with pytest.raises(Exception): + func(idx) diff --git a/pandas/tests/indexes/multi/test_astype.py b/pandas/tests/indexes/multi/test_astype.py new file mode 100644 index 0000000000000..e0e23609290e5 --- /dev/null +++ b/pandas/tests/indexes/multi/test_astype.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- + +import numpy as np +import pandas.util.testing as tm +import pytest +from pandas.util.testing import assert_copy +from pandas.core.dtypes.dtypes import CategoricalDtype + + +def test_astype(idx): + expected = idx.copy() + actual = idx.astype('O') + assert_copy(actual.levels, expected.levels) + assert_copy(actual.labels, expected.labels) + assert [level.name for level in actual.levels] == list(expected.names) + + with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"): + idx.astype(np.dtype(int)) + + +@pytest.mark.parametrize('ordered', [True, False]) +def test_astype_category(idx, ordered): + # GH 18630 + msg = '> 1 ndim Categorical are not supported at this time' + with tm.assert_raises_regex(NotImplementedError, msg): + idx.astype(CategoricalDtype(ordered=ordered)) + + if ordered is False: + # dtype='category' defaults to ordered=False, so only test once + with tm.assert_raises_regex(NotImplementedError, msg): + idx.astype('category') diff --git a/pandas/tests/indexes/multi/test_constructor.py b/pandas/tests/indexes/multi/test_constructor.py index 9577662bda366..4b8d0553886b2 100644 --- a/pandas/tests/indexes/multi/test_constructor.py +++ b/pandas/tests/indexes/multi/test_constructor.py @@ -234,29 +234,30 @@ def test_from_arrays_empty(): tm.assert_index_equal(result, expected) -def test_from_arrays_invalid_input(): +@pytest.mark.parametrize('invalid_array', [ + (1), + ([1]), + ([1, 2]), + ([[1], 2]), + ('a'), + (['a']), + (['a', 'b']), + ([['a'], 'b']), +]) +def test_from_arrays_invalid_input(invalid_array): invalid_inputs = [1, [1], [1, 2], [[1], 2], 'a', ['a'], ['a', 'b'], [['a'], 'b']] for i in invalid_inputs: pytest.raises(TypeError, MultiIndex.from_arrays, arrays=i) -def test_from_arrays_different_lengths(): +@pytest.mark.parametrize('idx1, idx2', [ + ([1, 2, 3], ['a', 'b']), + ([], ['a', 'b']), + ([1, 2, 3], []) +]) +def test_from_arrays_different_lengths(idx1, idx2): # see gh-13599 - idx1 = [1, 2, 3] - idx2 = ['a', 'b'] - tm.assert_raises_regex(ValueError, '^all arrays must ' - 'be same length$', - MultiIndex.from_arrays, [idx1, idx2]) - - idx1 = [] - idx2 = ['a', 'b'] - tm.assert_raises_regex(ValueError, '^all arrays must ' - 'be same length$', - MultiIndex.from_arrays, [idx1, idx2]) - - idx1 = [1, 2, 3] - idx2 = [] tm.assert_raises_regex(ValueError, '^all arrays must ' 'be same length$', MultiIndex.from_arrays, [idx1, idx2]) @@ -305,66 +306,87 @@ def test_from_tuples_index_values(idx): assert (result.values == idx.values).all() -def test_from_product_empty(): +def test_from_product_empty_zero_levels(): # 0 levels with tm.assert_raises_regex( ValueError, "Must pass non-zero number of levels/labels"): MultiIndex.from_product([]) - # 1 level + +def test_from_product_empty_one_level(): result = MultiIndex.from_product([[]], names=['A']) expected = pd.Index([], name='A') tm.assert_index_equal(result.levels[0], expected) - # 2 levels - l1 = [[], ['foo', 'bar', 'baz'], []] - l2 = [[], [], ['a', 'b', 'c']] + +@pytest.mark.parametrize('first, second', [ + ([], []), + (['foo', 'bar', 'baz'], []), + ([], ['a', 'b', 'c']), +]) +def test_from_product_empty_two_levels(first, second): names = ['A', 'B'] - for first, second in zip(l1, l2): - result = MultiIndex.from_product([first, second], names=names) - expected = MultiIndex(levels=[first, second], - labels=[[], []], names=names) - tm.assert_index_equal(result, expected) + result = MultiIndex.from_product([first, second], names=names) + expected = MultiIndex(levels=[first, second], + labels=[[], []], names=names) + tm.assert_index_equal(result, expected) + +@pytest.mark.parametrize('N', list(range(4))) +def test_from_product_empty_three_levels(N): # GH12258 names = ['A', 'B', 'C'] - for N in range(4): - lvl2 = lrange(N) - result = MultiIndex.from_product([[], lvl2, []], names=names) - expected = MultiIndex(levels=[[], lvl2, []], - labels=[[], [], []], names=names) - tm.assert_index_equal(result, expected) + lvl2 = lrange(N) + result = MultiIndex.from_product([[], lvl2, []], names=names) + expected = MultiIndex(levels=[[], lvl2, []], + labels=[[], [], []], names=names) + tm.assert_index_equal(result, expected) -def test_from_product_invalid_input(): - invalid_inputs = [1, [1], [1, 2], [[1], 2], - 'a', ['a'], ['a', 'b'], [['a'], 'b']] - for i in invalid_inputs: - pytest.raises(TypeError, MultiIndex.from_product, iterables=i) +@pytest.mark.parametrize('invalid_input', [ + 1, + [1], + [1, 2], + [[1], 2], + 'a', + ['a'], + ['a', 'b'], + [['a'], 'b'], +]) +def test_from_product_invalid_input(invalid_input): + pytest.raises(TypeError, MultiIndex.from_product, iterables=invalid_input) def test_from_product_datetimeindex(): dt_index = date_range('2000-01-01', periods=2) mi = pd.MultiIndex.from_product([[1, 2], dt_index]) - etalon = construct_1d_object_array_from_listlike([(1, pd.Timestamp( - '2000-01-01')), (1, pd.Timestamp('2000-01-02')), (2, pd.Timestamp( - '2000-01-01')), (2, pd.Timestamp('2000-01-02'))]) + etalon = construct_1d_object_array_from_listlike([ + (1, pd.Timestamp('2000-01-01')), + (1, pd.Timestamp('2000-01-02')), + (2, pd.Timestamp('2000-01-01')), + (2, pd.Timestamp('2000-01-02')), + ]) tm.assert_numpy_array_equal(mi.values, etalon) -def test_from_product_index_series_categorical(): +@pytest.mark.parametrize('ordered', [False, True]) +@pytest.mark.parametrize('f', [ + lambda x: x, + lambda x: pd.Series(x), + lambda x: x.values +]) +def test_from_product_index_series_categorical(ordered, f): # GH13743 first = ['foo', 'bar'] - for ordered in [False, True]: - idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), - ordered=ordered) - expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"), - categories=list("bac"), - ordered=ordered) - for arr in [idx, pd.Series(idx), idx.values]: - result = pd.MultiIndex.from_product([first, arr]) - tm.assert_index_equal(result.get_level_values(1), expected) + idx = pd.CategoricalIndex(list("abcaab"), categories=list("bac"), + ordered=ordered) + expected = pd.CategoricalIndex(list("abcaab") + list("abcaab"), + categories=list("bac"), + ordered=ordered) + + result = pd.MultiIndex.from_product([first, f(idx)]) + tm.assert_index_equal(result.get_level_values(1), expected) def test_from_product(): @@ -409,19 +431,28 @@ def test_create_index_existing_name(idx): index = idx index.names = ['foo', 'bar'] result = pd.Index(index) - tm.assert_index_equal( - result, Index(Index([('foo', 'one'), ('foo', 'two'), - ('bar', 'one'), ('baz', 'two'), - ('qux', 'one'), ('qux', 'two')], - dtype='object'), - names=['foo', 'bar'])) + expected = Index( + Index([ + ('foo', 'one'), ('foo', 'two'), + ('bar', 'one'), ('baz', 'two'), + ('qux', 'one'), ('qux', 'two')], + dtype='object' + ), + names=['foo', 'bar'] + ) + tm.assert_index_equal(result, expected) result = pd.Index(index, names=['A', 'B']) - tm.assert_index_equal( - result, - Index(Index([('foo', 'one'), ('foo', 'two'), ('bar', 'one'), - ('baz', 'two'), ('qux', 'one'), ('qux', 'two')], - dtype='object'), names=['A', 'B'])) + expected = Index( + Index([ + ('foo', 'one'), ('foo', 'two'), + ('bar', 'one'), ('baz', 'two'), + ('qux', 'one'), ('qux', 'two')], + dtype='object' + ), + names=['A', 'B'] + ) + tm.assert_index_equal(result, expected) def test_tuples_with_name_string(): diff --git a/pandas/tests/indexes/multi/test_contains.py b/pandas/tests/indexes/multi/test_contains.py index aaed4467816da..7b91a1d14d7e8 100644 --- a/pandas/tests/indexes/multi/test_contains.py +++ b/pandas/tests/indexes/multi/test_contains.py @@ -43,8 +43,10 @@ def test_isin_nan_pypy(): def test_isin(): values = [('foo', 2), ('bar', 3), ('quux', 4)] - idx = MultiIndex.from_arrays([['qux', 'baz', 'foo', 'bar'], np.arange( - 4)]) + idx = MultiIndex.from_arrays([ + ['qux', 'baz', 'foo', 'bar'], + np.arange(4) + ]) result = idx.isin(values) expected = np.array([False, False, True, True]) tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexes/multi/test_copy.py b/pandas/tests/indexes/multi/test_copy.py index 282f2fa84efe0..f6c5c0c5eb346 100644 --- a/pandas/tests/indexes/multi/test_copy.py +++ b/pandas/tests/indexes/multi/test_copy.py @@ -3,8 +3,8 @@ from copy import copy, deepcopy import pandas.util.testing as tm -from pandas import (CategoricalIndex, IntervalIndex, MultiIndex, PeriodIndex, - RangeIndex, Series, compat) +import pytest +from pandas import MultiIndex def assert_multiindex_copied(copy, original): @@ -41,84 +41,46 @@ def test_view(idx): assert_multiindex_copied(i_view, idx) -def test_copy_name(idx): - # gh-12309: Check that the "name" argument - # passed at initialization is honored. - - # TODO: Remove or refactor MultiIndex not tested. - for name, index in compat.iteritems({'idx': idx}): - if isinstance(index, MultiIndex): - continue - - first = index.__class__(index, copy=True, name='mario') - second = first.__class__(first, copy=False) - - # Even though "copy=False", we want a new object. - assert first is not second - - # Not using tm.assert_index_equal() since names differ. - assert index.equals(first) - - assert first.name == 'mario' - assert second.name == 'mario' - - s1 = Series(2, index=first) - s2 = Series(3, index=second[:-1]) - - if not isinstance(index, CategoricalIndex): - # See gh-13365 - s3 = s1 * s2 - assert s3.index.name == 'mario' - - -def test_ensure_copied_data(idx): - # Check the "copy" argument of each Index.__new__ is honoured - # GH12309 - # TODO: REMOVE THIS TEST. MultiIndex is tested seperately as noted below. - - for name, index in compat.iteritems({'idx': idx}): - init_kwargs = {} - if isinstance(index, PeriodIndex): - # Needs "freq" specification: - init_kwargs['freq'] = index.freq - elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)): - # RangeIndex cannot be initialized from data - # MultiIndex and CategoricalIndex are tested separately - continue - - index_type = index.__class__ - result = index_type(index.values, copy=True, **init_kwargs) - tm.assert_index_equal(index, result) - tm.assert_numpy_array_equal(index.values, result.values, - check_same='copy') - - if isinstance(index, PeriodIndex): - # .values an object array of Period, thus copied - result = index_type(ordinal=index.asi8, copy=False, - **init_kwargs) - tm.assert_numpy_array_equal(index._ndarray_values, - result._ndarray_values, - check_same='same') - elif isinstance(index, IntervalIndex): - # checked in test_interval.py - pass - else: - result = index_type(index.values, copy=False, **init_kwargs) - tm.assert_numpy_array_equal(index.values, result.values, - check_same='same') - tm.assert_numpy_array_equal(index._ndarray_values, - result._ndarray_values, - check_same='same') - - -def test_copy_and_deepcopy(indices): - - if isinstance(indices, MultiIndex): - return - for func in (copy, deepcopy): - idx_copy = func(indices) - assert idx_copy is not indices - assert idx_copy.equals(indices) - - new_copy = indices.copy(deep=True, name="banana") - assert new_copy.name == "banana" +@pytest.mark.parametrize('func', [copy, deepcopy]) +def test_copy_and_deepcopy(func): + + idx = MultiIndex( + levels=[['foo', 'bar'], ['fizz', 'buzz']], + labels=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=['first', 'second'] + ) + idx_copy = func(idx) + assert idx_copy is not idx + assert idx_copy.equals(idx) + + +@pytest.mark.parametrize('deep', [True, False]) +def test_copy_method(deep): + idx = MultiIndex( + levels=[['foo', 'bar'], ['fizz', 'buzz']], + labels=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=['first', 'second'] + ) + idx_copy = idx.copy(deep=deep) + assert idx_copy.equals(idx) + + +@pytest.mark.parametrize('deep', [True, False]) +@pytest.mark.parametrize('kwarg, value', [ + ('names', ['thrid', 'fourth']), + ('levels', [['foo2', 'bar2'], ['fizz2', 'buzz2']]), + ('labels', [[1, 0, 0, 0], [1, 1, 0, 0]]) +]) +def test_copy_method_kwargs(deep, kwarg, value): + # gh-12309: Check that the "name" argument as well other kwargs are honored + idx = MultiIndex( + levels=[['foo', 'bar'], ['fizz', 'buzz']], + labels=[[0, 0, 0, 1], [0, 0, 1, 1]], + names=['first', 'second'] + ) + + idx_copy = idx.copy(**{kwarg: value, 'deep': deep}) + if kwarg == 'names': + assert getattr(idx_copy, kwarg) == value + else: + assert list(list(i) for i in getattr(idx_copy, kwarg)) == value diff --git a/pandas/tests/indexes/multi/test_equivalence.py b/pandas/tests/indexes/multi/test_equivalence.py index 0bebe3165e2e8..7770ee96bbfb3 100644 --- a/pandas/tests/indexes/multi/test_equivalence.py +++ b/pandas/tests/indexes/multi/test_equivalence.py @@ -4,29 +4,25 @@ import numpy as np import pandas as pd import pandas.util.testing as tm -from pandas import Index, MultiIndex, RangeIndex, Series, compat +from pandas import Index, MultiIndex, Series from pandas.compat import lrange, lzip, range def test_equals(idx): - # TODO: Remove or Refactor. MultiIndex not tested. - for name, idx in compat.iteritems({'idx': idx}): - assert idx.equals(idx) - assert idx.equals(idx.copy()) - assert idx.equals(idx.astype(object)) - - assert not idx.equals(list(idx)) - assert not idx.equals(np.array(idx)) - - # Cannot pass in non-int64 dtype to RangeIndex - if not isinstance(idx, RangeIndex): - same_values = Index(idx, dtype=object) - assert idx.equals(same_values) - assert same_values.equals(idx) - - if idx.nlevels == 1: - # do not test MultiIndex - assert not idx.equals(pd.Series(idx)) + assert idx.equals(idx) + assert idx.equals(idx.copy()) + assert idx.equals(idx.astype(object)) + + assert not idx.equals(list(idx)) + assert not idx.equals(np.array(idx)) + + same_values = Index(idx, dtype=object) + assert idx.equals(same_values) + assert same_values.equals(idx) + + if idx.nlevels == 1: + # do not test MultiIndex + assert not idx.equals(pd.Series(idx)) def test_equals_op(idx): diff --git a/pandas/tests/indexes/multi/test_format.py b/pandas/tests/indexes/multi/test_format.py index 21e8a199cadd9..63936a74b6b8c 100644 --- a/pandas/tests/indexes/multi/test_format.py +++ b/pandas/tests/indexes/multi/test_format.py @@ -100,11 +100,6 @@ def test_repr_roundtrip(): tm.assert_index_equal(result, mi_u, exact=True) -def test_str(): - # tested elsewhere - pass - - def test_unicode_string_with_unicode(): d = {"a": [u("\u05d0"), 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]} idx = pd.DataFrame(d).set_index(["a", "b"]).index diff --git a/pandas/tests/indexes/multi/test_get_set.py b/pandas/tests/indexes/multi/test_get_set.py index 56fd4c04cb96e..30be5b546f7c7 100644 --- a/pandas/tests/indexes/multi/test_get_set.py +++ b/pandas/tests/indexes/multi/test_get_set.py @@ -9,6 +9,16 @@ from pandas.compat import range +def assert_matching(actual, expected, check_dtype=False): + # avoid specifying internal representation + # as much as possible + assert len(actual) == len(expected) + for act, exp in zip(actual, expected): + act = np.asarray(act) + exp = np.asarray(exp) + tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype) + + def test_get_level_number_integer(idx): idx.names = [1, 0] assert idx._get_level_number(1) == 0 @@ -164,15 +174,6 @@ def test_set_levels(idx): levels = idx.levels new_levels = [[lev + 'a' for lev in level] for level in levels] - def assert_matching(actual, expected, check_dtype=False): - # avoid specifying internal representation - # as much as possible - assert len(actual) == len(expected) - for act, exp in zip(actual, expected): - act = np.asarray(act) - exp = np.asarray(exp) - tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype) - # level changing [w/o mutation] ind2 = idx.set_levels(new_levels) assert_matching(ind2.levels, new_levels) @@ -254,15 +255,6 @@ def test_set_labels(idx): minor_labels = [(x + 1) % 1 for x in minor_labels] new_labels = [major_labels, minor_labels] - def assert_matching(actual, expected): - # avoid specifying internal representation - # as much as possible - assert len(actual) == len(expected) - for act, exp in zip(actual, expected): - act = np.asarray(act) - exp = np.asarray(exp, dtype=np.int8) - tm.assert_numpy_array_equal(act, exp) - # label changing [w/o mutation] ind2 = idx.set_labels(new_labels) assert_matching(ind2.labels, new_labels) @@ -389,21 +381,22 @@ def test_set_names_with_nlevel_1(inplace): tm.assert_index_equal(result, expected) -def test_set_levels_categorical(): +@pytest.mark.parametrize('ordered', [True, False]) +def test_set_levels_categorical(ordered): # GH13854 index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]]) - for ordered in [False, True]: - cidx = CategoricalIndex(list("bac"), ordered=ordered) - result = index.set_levels(cidx, 0) - expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]], - labels=index.labels) - tm.assert_index_equal(result, expected) - - result_lvl = result.get_level_values(0) - expected_lvl = CategoricalIndex(list("bacb"), - categories=cidx.categories, - ordered=cidx.ordered) - tm.assert_index_equal(result_lvl, expected_lvl) + + cidx = CategoricalIndex(list("bac"), ordered=ordered) + result = index.set_levels(cidx, 0) + expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]], + labels=index.labels) + tm.assert_index_equal(result, expected) + + result_lvl = result.get_level_values(0) + expected_lvl = CategoricalIndex(list("bacb"), + categories=cidx.categories, + ordered=cidx.ordered) + tm.assert_index_equal(result_lvl, expected_lvl) def test_set_value_keeps_names(): diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index 0b528541e5eb6..ebd50909bae98 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -109,31 +109,6 @@ def test_slice_locs_not_contained(): assert result == (0, len(index)) -def test_insert_base(idx): - - result = idx[1:4] - - # test 0th element - assert idx[0:4].equals(result.insert(0, idx[0])) - - -def test_delete_base(idx): - - expected = idx[1:] - result = idx.delete(0) - assert result.equals(expected) - assert result.name == expected.name - - expected = idx[:-1] - result = idx.delete(-1) - assert result.equals(expected) - assert result.name == expected.name - - with pytest.raises((IndexError, ValueError)): - # either depending on numpy version - result = idx.delete(len(idx)) - - def test_putmask_with_wrong_mask(idx): # GH18368 diff --git a/pandas/tests/indexes/multi/test_join.py b/pandas/tests/indexes/multi/test_join.py index 4a386c6e8dbe4..ac3958956bae7 100644 --- a/pandas/tests/indexes/multi/test_join.py +++ b/pandas/tests/indexes/multi/test_join.py @@ -8,10 +8,11 @@ from pandas import Index, MultiIndex -@pytest.mark.parametrize('other', - [Index(['three', 'one', 'two']), - Index(['one']), - Index(['one', 'three'])]) +@pytest.mark.parametrize('other', [ + Index(['three', 'one', 'two']), + Index(['one']), + Index(['one', 'three']), +]) def test_join_level(idx, other, join_type): join_index, lidx, ridx = other.join(idx, how=join_type, level='second', diff --git a/pandas/tests/indexes/multi/test_missing.py b/pandas/tests/indexes/multi/test_missing.py index 01465ea4c2f3b..79fcff965e725 100644 --- a/pandas/tests/indexes/multi/test_missing.py +++ b/pandas/tests/indexes/multi/test_missing.py @@ -4,7 +4,7 @@ import pandas as pd import pandas.util.testing as tm import pytest -from pandas import Int64Index, MultiIndex, PeriodIndex, UInt64Index, isna +from pandas import Int64Index, MultiIndex, PeriodIndex, UInt64Index from pandas._libs.tslib import iNaT from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin @@ -78,27 +78,9 @@ def test_nulls(idx): # this is really a smoke test for the methods # as these are adequately tested for function elsewhere - # TODO: Remove or Refactor. MultiIndex not Implemeted. - for name, index in [('idx', idx), ]: - if len(index) == 0: - tm.assert_numpy_array_equal( - index.isna(), np.array([], dtype=bool)) - elif isinstance(index, MultiIndex): - idx = index.copy() - msg = "isna is not defined for MultiIndex" - with tm.assert_raises_regex(NotImplementedError, msg): - idx.isna() - else: - - if not index.hasnans: - tm.assert_numpy_array_equal( - index.isna(), np.zeros(len(index), dtype=bool)) - tm.assert_numpy_array_equal( - index.notna(), np.ones(len(index), dtype=bool)) - else: - result = isna(index) - tm.assert_numpy_array_equal(index.isna(), result) - tm.assert_numpy_array_equal(index.notna(), ~result) + msg = "isna is not defined for MultiIndex" + with tm.assert_raises_regex(NotImplementedError, msg): + idx.isna() @pytest.mark.xfail diff --git a/pandas/tests/indexes/multi/test_operations.py b/pandas/tests/indexes/multi/test_operations.py deleted file mode 100644 index d38cb28039595..0000000000000 --- a/pandas/tests/indexes/multi/test_operations.py +++ /dev/null @@ -1,448 +0,0 @@ -# -*- coding: utf-8 -*- - -import numpy as np -import pandas as pd -import pandas.util.testing as tm -import pytest -from pandas import (DatetimeIndex, Float64Index, Index, Int64Index, MultiIndex, - PeriodIndex, TimedeltaIndex, UInt64Index, date_range, - period_range) -from pandas.compat import lrange, range -from pandas.core.dtypes.dtypes import CategoricalDtype -from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin -from pandas.util.testing import assert_copy - - -def check_level_names(index, names): - assert [level.name for level in index.levels] == list(names) - - -def test_insert(idx): - # key contained in all levels - new_index = idx.insert(0, ('bar', 'two')) - assert new_index.equal_levels(idx) - assert new_index[0] == ('bar', 'two') - - # key not contained in all levels - new_index = idx.insert(0, ('abc', 'three')) - - exp0 = Index(list(idx.levels[0]) + ['abc'], name='first') - tm.assert_index_equal(new_index.levels[0], exp0) - - exp1 = Index(list(idx.levels[1]) + ['three'], name='second') - tm.assert_index_equal(new_index.levels[1], exp1) - assert new_index[0] == ('abc', 'three') - - # key wrong length - msg = "Item must have length equal to number of levels" - with tm.assert_raises_regex(ValueError, msg): - idx.insert(0, ('foo2',)) - - left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]], - columns=['1st', '2nd', '3rd']) - left.set_index(['1st', '2nd'], inplace=True) - ts = left['3rd'].copy(deep=True) - - left.loc[('b', 'x'), '3rd'] = 2 - left.loc[('b', 'a'), '3rd'] = -1 - left.loc[('b', 'b'), '3rd'] = 3 - left.loc[('a', 'x'), '3rd'] = 4 - left.loc[('a', 'w'), '3rd'] = 5 - left.loc[('a', 'a'), '3rd'] = 6 - - ts.loc[('b', 'x')] = 2 - ts.loc['b', 'a'] = -1 - ts.loc[('b', 'b')] = 3 - ts.loc['a', 'x'] = 4 - ts.loc[('a', 'w')] = 5 - ts.loc['a', 'a'] = 6 - - right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2], - ['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4], - ['a', 'w', 5], ['a', 'a', 6]], - columns=['1st', '2nd', '3rd']) - right.set_index(['1st', '2nd'], inplace=True) - # FIXME data types changes to float because - # of intermediate nan insertion; - tm.assert_frame_equal(left, right, check_dtype=False) - tm.assert_series_equal(ts, right['3rd']) - - # GH9250 - idx = [('test1', i) for i in range(5)] + \ - [('test2', i) for i in range(6)] + \ - [('test', 17), ('test', 18)] - - left = pd.Series(np.linspace(0, 10, 11), - pd.MultiIndex.from_tuples(idx[:-2])) - - left.loc[('test', 17)] = 11 - left.loc[('test', 18)] = 12 - - right = pd.Series(np.linspace(0, 12, 13), - pd.MultiIndex.from_tuples(idx)) - - tm.assert_series_equal(left, right) - - -def test_bounds(idx): - idx._bounds - - -def test_append(idx): - result = idx[:3].append(idx[3:]) - assert result.equals(idx) - - foos = [idx[:1], idx[1:3], idx[3:]] - result = foos[0].append(foos[1:]) - assert result.equals(idx) - - # empty - result = idx.append([]) - assert result.equals(idx) - - -def test_groupby(idx): - groups = idx.groupby(np.array([1, 1, 1, 2, 2, 2])) - labels = idx.get_values().tolist() - exp = {1: labels[:3], 2: labels[3:]} - tm.assert_dict_equal(groups, exp) - - # GH5620 - groups = idx.groupby(idx) - exp = {key: [key] for key in idx} - tm.assert_dict_equal(groups, exp) - - -def test_truncate(): - major_axis = Index(lrange(4)) - minor_axis = Index(lrange(2)) - - major_labels = np.array([0, 0, 1, 2, 3, 3]) - minor_labels = np.array([0, 1, 0, 1, 0, 1]) - - index = MultiIndex(levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels]) - - result = index.truncate(before=1) - assert 'foo' not in result.levels[0] - assert 1 in result.levels[0] - - result = index.truncate(after=1) - assert 2 not in result.levels[0] - assert 1 in result.levels[0] - - result = index.truncate(before=1, after=2) - assert len(result.levels[0]) == 2 - - # after < before - pytest.raises(ValueError, index.truncate, 3, 1) - - -def test_where(): - i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) - - def f(): - i.where(True) - - pytest.raises(NotImplementedError, f) - - -def test_where_array_like(): - i = MultiIndex.from_tuples([('A', 1), ('A', 2)]) - klasses = [list, tuple, np.array, pd.Series] - cond = [False, True] - - for klass in klasses: - def f(): - return i.where(klass(cond)) - pytest.raises(NotImplementedError, f) - - -def test_reorder_levels(idx): - # this blows up - tm.assert_raises_regex(IndexError, '^Too many levels', - idx.reorder_levels, [2, 1, 0]) - - -def test_astype(idx): - expected = idx.copy() - actual = idx.astype('O') - assert_copy(actual.levels, expected.levels) - assert_copy(actual.labels, expected.labels) - check_level_names(actual, expected.names) - - with tm.assert_raises_regex(TypeError, "^Setting.*dtype.*object"): - idx.astype(np.dtype(int)) - - -@pytest.mark.parametrize('ordered', [True, False]) -def test_astype_category(idx, ordered): - # GH 18630 - msg = '> 1 ndim Categorical are not supported at this time' - with tm.assert_raises_regex(NotImplementedError, msg): - idx.astype(CategoricalDtype(ordered=ordered)) - - if ordered is False: - # dtype='category' defaults to ordered=False, so only test once - with tm.assert_raises_regex(NotImplementedError, msg): - idx.astype('category') - - -def test_repeat(): - reps = 2 - numbers = [1, 2, 3] - names = np.array(['foo', 'bar']) - - m = MultiIndex.from_product([ - numbers, names], names=names) - expected = MultiIndex.from_product([ - numbers, names.repeat(reps)], names=names) - tm.assert_index_equal(m.repeat(reps), expected) - - with tm.assert_produces_warning(FutureWarning): - result = m.repeat(n=reps) - tm.assert_index_equal(result, expected) - - -def test_numpy_repeat(): - reps = 2 - numbers = [1, 2, 3] - names = np.array(['foo', 'bar']) - - m = MultiIndex.from_product([ - numbers, names], names=names) - expected = MultiIndex.from_product([ - numbers, names.repeat(reps)], names=names) - tm.assert_index_equal(np.repeat(m, reps), expected) - - msg = "the 'axis' parameter is not supported" - tm.assert_raises_regex( - ValueError, msg, np.repeat, m, reps, axis=1) - - -def test_append_mixed_dtypes(): - # GH 13660 - dti = date_range('2011-01-01', freq='M', periods=3, ) - dti_tz = date_range('2011-01-01', freq='M', periods=3, tz='US/Eastern') - pi = period_range('2011-01', freq='M', periods=3) - - mi = MultiIndex.from_arrays([[1, 2, 3], - [1.1, np.nan, 3.3], - ['a', 'b', 'c'], - dti, dti_tz, pi]) - assert mi.nlevels == 6 - - res = mi.append(mi) - exp = MultiIndex.from_arrays([[1, 2, 3, 1, 2, 3], - [1.1, np.nan, 3.3, 1.1, np.nan, 3.3], - ['a', 'b', 'c', 'a', 'b', 'c'], - dti.append(dti), - dti_tz.append(dti_tz), - pi.append(pi)]) - tm.assert_index_equal(res, exp) - - other = MultiIndex.from_arrays([['x', 'y', 'z'], ['x', 'y', 'z'], - ['x', 'y', 'z'], ['x', 'y', 'z'], - ['x', 'y', 'z'], ['x', 'y', 'z']]) - - res = mi.append(other) - exp = MultiIndex.from_arrays([[1, 2, 3, 'x', 'y', 'z'], - [1.1, np.nan, 3.3, 'x', 'y', 'z'], - ['a', 'b', 'c', 'x', 'y', 'z'], - dti.append(pd.Index(['x', 'y', 'z'])), - dti_tz.append(pd.Index(['x', 'y', 'z'])), - pi.append(pd.Index(['x', 'y', 'z']))]) - tm.assert_index_equal(res, exp) - - -def test_take(idx): - indexer = [4, 3, 0, 2] - result = idx.take(indexer) - expected = idx[indexer] - assert result.equals(expected) - - if not isinstance(idx, - (DatetimeIndex, PeriodIndex, TimedeltaIndex)): - # GH 10791 - with pytest.raises(AttributeError): - idx.freq - - -def test_take_invalid_kwargs(idx): - idx = idx - indices = [1, 2] - - msg = r"take\(\) got an unexpected keyword argument 'foo'" - tm.assert_raises_regex(TypeError, msg, idx.take, - indices, foo=2) - - msg = "the 'out' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, idx.take, - indices, out=indices) - - msg = "the 'mode' parameter is not supported" - tm.assert_raises_regex(ValueError, msg, idx.take, - indices, mode='clip') - - -def test_take_fill_value(): - # GH 12631 - vals = [['A', 'B'], - [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]] - idx = pd.MultiIndex.from_product(vals, names=['str', 'dt']) - - result = idx.take(np.array([1, 0, -1])) - exp_vals = [('A', pd.Timestamp('2011-01-02')), - ('A', pd.Timestamp('2011-01-01')), - ('B', pd.Timestamp('2011-01-02'))] - expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) - tm.assert_index_equal(result, expected) - - # fill_value - result = idx.take(np.array([1, 0, -1]), fill_value=True) - exp_vals = [('A', pd.Timestamp('2011-01-02')), - ('A', pd.Timestamp('2011-01-01')), - (np.nan, pd.NaT)] - expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) - tm.assert_index_equal(result, expected) - - # allow_fill=False - result = idx.take(np.array([1, 0, -1]), allow_fill=False, - fill_value=True) - exp_vals = [('A', pd.Timestamp('2011-01-02')), - ('A', pd.Timestamp('2011-01-01')), - ('B', pd.Timestamp('2011-01-02'))] - expected = pd.MultiIndex.from_tuples(exp_vals, names=['str', 'dt']) - tm.assert_index_equal(result, expected) - - msg = ('When allow_fill=True and fill_value is not None, ' - 'all indices must be >= -1') - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -2]), fill_value=True) - with tm.assert_raises_regex(ValueError, msg): - idx.take(np.array([1, 0, -5]), fill_value=True) - - with pytest.raises(IndexError): - idx.take(np.array([1, -5])) - - -def test_iter(idx): - result = list(idx) - expected = [('foo', 'one'), ('foo', 'two'), ('bar', 'one'), - ('baz', 'two'), ('qux', 'one'), ('qux', 'two')] - assert result == expected - - -def test_sub(idx): - - first = idx - - # - now raises (previously was set op difference) - with pytest.raises(TypeError): - first - idx[-3:] - with pytest.raises(TypeError): - idx[-3:] - first - with pytest.raises(TypeError): - idx[-3:] - first.tolist() - with pytest.raises(TypeError): - first.tolist() - idx[-3:] - - -def test_argsort(idx): - result = idx.argsort() - expected = idx.values.argsort() - tm.assert_numpy_array_equal(result, expected) - - -def test_map(idx): - # callable - index = idx - - # we don't infer UInt64 - if isinstance(index, pd.UInt64Index): - expected = index.astype('int64') - else: - expected = index - - result = index.map(lambda x: x) - tm.assert_index_equal(result, expected) - - -@pytest.mark.parametrize( - "mapper", - [ - lambda values, idx: {i: e for e, i in zip(values, idx)}, - lambda values, idx: pd.Series(values, idx)]) -def test_map_dictlike(idx, mapper): - - if isinstance(idx, (pd.CategoricalIndex, pd.IntervalIndex)): - pytest.skip("skipping tests for {}".format(type(idx))) - - identity = mapper(idx.values, idx) - - # we don't infer to UInt64 for a dict - if isinstance(idx, pd.UInt64Index) and isinstance(identity, dict): - expected = idx.astype('int64') - else: - expected = idx - - result = idx.map(identity) - tm.assert_index_equal(result, expected) - - # empty mappable - expected = pd.Index([np.nan] * len(idx)) - result = idx.map(mapper(expected, idx)) - tm.assert_index_equal(result, expected) - - -def test_numpy_ufuncs(idx): - # test ufuncs of numpy 1.9.2. see: - # http://docs.scipy.org/doc/numpy/reference/ufuncs.html - - # some functions are skipped because it may return different result - # for unicode input depending on numpy version - - for func in [np.exp, np.exp2, np.expm1, np.log, np.log2, np.log10, - np.log1p, np.sqrt, np.sin, np.cos, np.tan, np.arcsin, - np.arccos, np.arctan, np.sinh, np.cosh, np.tanh, - np.arcsinh, np.arccosh, np.arctanh, np.deg2rad, - np.rad2deg]: - if isinstance(idx, DatetimeIndexOpsMixin): - # raise TypeError or ValueError (PeriodIndex) - # PeriodIndex behavior should be changed in future version - with pytest.raises(Exception): - with np.errstate(all='ignore'): - func(idx) - elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)): - # coerces to float (e.g. np.sin) - with np.errstate(all='ignore'): - result = func(idx) - exp = Index(func(idx.values), name=idx.name) - - tm.assert_index_equal(result, exp) - assert isinstance(result, pd.Float64Index) - else: - # raise AttributeError or TypeError - if len(idx) == 0: - continue - else: - with pytest.raises(Exception): - with np.errstate(all='ignore'): - func(idx) - - for func in [np.isfinite, np.isinf, np.isnan, np.signbit]: - if isinstance(idx, DatetimeIndexOpsMixin): - # raise TypeError or ValueError (PeriodIndex) - with pytest.raises(Exception): - func(idx) - elif isinstance(idx, (Float64Index, Int64Index, UInt64Index)): - # Results in bool array - result = func(idx) - assert isinstance(result, np.ndarray) - assert not isinstance(result, Index) - else: - if len(idx) == 0: - continue - else: - with pytest.raises(Exception): - func(idx) diff --git a/pandas/tests/indexes/multi/test_reshape.py b/pandas/tests/indexes/multi/test_reshape.py new file mode 100644 index 0000000000000..85eec6a232180 --- /dev/null +++ b/pandas/tests/indexes/multi/test_reshape.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- + + +import numpy as np +import pytest + +import pandas as pd +import pandas.util.testing as tm +from pandas import Index, MultiIndex + + +def test_insert(idx): + # key contained in all levels + new_index = idx.insert(0, ('bar', 'two')) + assert new_index.equal_levels(idx) + assert new_index[0] == ('bar', 'two') + + # key not contained in all levels + new_index = idx.insert(0, ('abc', 'three')) + + exp0 = Index(list(idx.levels[0]) + ['abc'], name='first') + tm.assert_index_equal(new_index.levels[0], exp0) + + exp1 = Index(list(idx.levels[1]) + ['three'], name='second') + tm.assert_index_equal(new_index.levels[1], exp1) + assert new_index[0] == ('abc', 'three') + + # key wrong length + msg = "Item must have length equal to number of levels" + with tm.assert_raises_regex(ValueError, msg): + idx.insert(0, ('foo2',)) + + left = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1]], + columns=['1st', '2nd', '3rd']) + left.set_index(['1st', '2nd'], inplace=True) + ts = left['3rd'].copy(deep=True) + + left.loc[('b', 'x'), '3rd'] = 2 + left.loc[('b', 'a'), '3rd'] = -1 + left.loc[('b', 'b'), '3rd'] = 3 + left.loc[('a', 'x'), '3rd'] = 4 + left.loc[('a', 'w'), '3rd'] = 5 + left.loc[('a', 'a'), '3rd'] = 6 + + ts.loc[('b', 'x')] = 2 + ts.loc['b', 'a'] = -1 + ts.loc[('b', 'b')] = 3 + ts.loc['a', 'x'] = 4 + ts.loc[('a', 'w')] = 5 + ts.loc['a', 'a'] = 6 + + right = pd.DataFrame([['a', 'b', 0], ['b', 'd', 1], ['b', 'x', 2], + ['b', 'a', -1], ['b', 'b', 3], ['a', 'x', 4], + ['a', 'w', 5], ['a', 'a', 6]], + columns=['1st', '2nd', '3rd']) + right.set_index(['1st', '2nd'], inplace=True) + # FIXME data types changes to float because + # of intermediate nan insertion; + tm.assert_frame_equal(left, right, check_dtype=False) + tm.assert_series_equal(ts, right['3rd']) + + # GH9250 + idx = [('test1', i) for i in range(5)] + \ + [('test2', i) for i in range(6)] + \ + [('test', 17), ('test', 18)] + + left = pd.Series(np.linspace(0, 10, 11), + pd.MultiIndex.from_tuples(idx[:-2])) + + left.loc[('test', 17)] = 11 + left.loc[('test', 18)] = 12 + + right = pd.Series(np.linspace(0, 12, 13), + pd.MultiIndex.from_tuples(idx)) + + tm.assert_series_equal(left, right) + + +def test_append(idx): + result = idx[:3].append(idx[3:]) + assert result.equals(idx) + + foos = [idx[:1], idx[1:3], idx[3:]] + result = foos[0].append(foos[1:]) + assert result.equals(idx) + + # empty + result = idx.append([]) + assert result.equals(idx) + + +def test_repeat(): + reps = 2 + numbers = [1, 2, 3] + names = np.array(['foo', 'bar']) + + m = MultiIndex.from_product([ + numbers, names], names=names) + expected = MultiIndex.from_product([ + numbers, names.repeat(reps)], names=names) + tm.assert_index_equal(m.repeat(reps), expected) + + with tm.assert_produces_warning(FutureWarning): + result = m.repeat(n=reps) + tm.assert_index_equal(result, expected) + + +def test_insert_base(idx): + + result = idx[1:4] + + # test 0th element + assert idx[0:4].equals(result.insert(0, idx[0])) + + +def test_delete_base(idx): + + expected = idx[1:] + result = idx.delete(0) + assert result.equals(expected) + assert result.name == expected.name + + expected = idx[:-1] + result = idx.delete(-1) + assert result.equals(expected) + assert result.name == expected.name + + with pytest.raises((IndexError, ValueError)): + # either depending on numpy version + result = idx.delete(len(idx)) diff --git a/pandas/tests/indexes/multi/test_set_ops.py b/pandas/tests/indexes/multi/test_set_ops.py index 79a3837aac7f8..3f61cf2b6ff3f 100644 --- a/pandas/tests/indexes/multi/test_set_ops.py +++ b/pandas/tests/indexes/multi/test_set_ops.py @@ -1,11 +1,9 @@ # -*- coding: utf-8 -*- - import numpy as np import pandas as pd import pandas.util.testing as tm -from pandas import (CategoricalIndex, DatetimeIndex, MultiIndex, PeriodIndex, - Series, TimedeltaIndex) +from pandas import MultiIndex, Series def test_setops_errorcases(idx): @@ -27,29 +25,18 @@ def test_intersection_base(idx): second = idx[:3] intersect = first.intersection(second) - if isinstance(idx, CategoricalIndex): - pass - else: - assert tm.equalContents(intersect, second) + assert tm.equalContents(intersect, second) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: - if isinstance(idx, PeriodIndex): - msg = "can only call with other PeriodIndex-ed objects" - with tm.assert_raises_regex(ValueError, msg): - result = first.intersection(case) - elif isinstance(idx, CategoricalIndex): - pass - else: - result = first.intersection(case) - assert tm.equalContents(result, second) - - if isinstance(idx, MultiIndex): - msg = "other must be a MultiIndex or a list of tuples" - with tm.assert_raises_regex(TypeError, msg): - result = first.intersection([1, 2, 3]) + result = first.intersection(case) + assert tm.equalContents(result, second) + + msg = "other must be a MultiIndex or a list of tuples" + with tm.assert_raises_regex(TypeError, msg): + result = first.intersection([1, 2, 3]) def test_union_base(idx): @@ -63,20 +50,12 @@ def test_union_base(idx): cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: - if isinstance(idx, PeriodIndex): - msg = "can only call with other PeriodIndex-ed objects" - with tm.assert_raises_regex(ValueError, msg): - result = first.union(case) - elif isinstance(idx, CategoricalIndex): - pass - else: - result = first.union(case) - assert tm.equalContents(result, everything) - - if isinstance(idx, MultiIndex): - msg = "other must be a MultiIndex or a list of tuples" - with tm.assert_raises_regex(TypeError, msg): - result = first.union([1, 2, 3]) + result = first.union(case) + assert tm.equalContents(result, everything) + + msg = "other must be a MultiIndex or a list of tuples" + with tm.assert_raises_regex(TypeError, msg): + result = first.union([1, 2, 3]) def test_difference_base(idx): @@ -85,63 +64,37 @@ def test_difference_base(idx): answer = idx[4:] result = first.difference(second) - if isinstance(idx, CategoricalIndex): - pass - else: - assert tm.equalContents(result, answer) + assert tm.equalContents(result, answer) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: - if isinstance(idx, PeriodIndex): - msg = "can only call with other PeriodIndex-ed objects" - with tm.assert_raises_regex(ValueError, msg): - result = first.difference(case) - elif isinstance(idx, CategoricalIndex): - pass - elif isinstance(idx, (DatetimeIndex, TimedeltaIndex)): - assert result.__class__ == answer.__class__ - tm.assert_numpy_array_equal(result.sort_values().asi8, - answer.sort_values().asi8) - else: - result = first.difference(case) - assert tm.equalContents(result, answer) - - if isinstance(idx, MultiIndex): - msg = "other must be a MultiIndex or a list of tuples" - with tm.assert_raises_regex(TypeError, msg): - result = first.difference([1, 2, 3]) + result = first.difference(case) + assert tm.equalContents(result, answer) + + msg = "other must be a MultiIndex or a list of tuples" + with tm.assert_raises_regex(TypeError, msg): + result = first.difference([1, 2, 3]) def test_symmetric_difference(idx): first = idx[1:] second = idx[:-1] - if isinstance(idx, CategoricalIndex): - pass - else: - answer = idx[[0, -1]] - result = first.symmetric_difference(second) - assert tm.equalContents(result, answer) + answer = idx[[0, -1]] + result = first.symmetric_difference(second) + assert tm.equalContents(result, answer) # GH 10149 cases = [klass(second.values) for klass in [np.array, Series, list]] for case in cases: - if isinstance(idx, PeriodIndex): - msg = "can only call with other PeriodIndex-ed objects" - with tm.assert_raises_regex(ValueError, msg): - result = first.symmetric_difference(case) - elif isinstance(idx, CategoricalIndex): - pass - else: - result = first.symmetric_difference(case) - assert tm.equalContents(result, answer) - - if isinstance(idx, MultiIndex): - msg = "other must be a MultiIndex or a list of tuples" - with tm.assert_raises_regex(TypeError, msg): - first.symmetric_difference([1, 2, 3]) + result = first.symmetric_difference(case) + assert tm.equalContents(result, answer) + + msg = "other must be a MultiIndex or a list of tuples" + with tm.assert_raises_regex(TypeError, msg): + first.symmetric_difference([1, 2, 3]) def test_empty(idx): diff --git a/pandas/tests/indexes/multi/test_sorting.py b/pandas/tests/indexes/multi/test_sorting.py index d6165c17c6717..ee29ea1be8aea 100644 --- a/pandas/tests/indexes/multi/test_sorting.py +++ b/pandas/tests/indexes/multi/test_sorting.py @@ -215,7 +215,8 @@ def test_reconstruct_remove_unused(): @pytest.mark.parametrize('first_type,second_type', [ ('int64', 'int64'), - ('datetime64[D]', 'str')]) + ('datetime64[D]', 'str') +]) def test_remove_unused_levels_large(first_type, second_type): # GH16556 @@ -254,3 +255,9 @@ def test_remove_unused_nan(level0, level1): tm.assert_index_equal(result, mi) for level in 0, 1: assert('unused' not in result.levels[level]) + + +def test_argsort(idx): + result = idx.argsort() + expected = idx.values.argsort() + tm.assert_numpy_array_equal(result, expected)
- [x] closes #21918 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry - N/A Addresses most of the issues. Left blowing away the old pickles for another issue which is still open.
https://api.github.com/repos/pandas-dev/pandas/pulls/21928
2018-07-16T03:25:41Z
2018-07-24T22:01:57Z
2018-07-24T22:01:57Z
2018-07-24T22:02:05Z
Docstrings, de-duplicate EAMixin/DatetimeLikeIndex __new__ code
diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index fe4e461b0bd4f..aadfbdd4303c4 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -634,6 +634,7 @@ class ExtensionOpsMixin(object): """ A base class for linking the operators to their dunder names """ + @classmethod def _add_arithmetic_ops(cls): cls.__add__ = cls._create_arithmetic_method(operator.add) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ec430e4bf17b1..7bb1c45998eb2 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -5,6 +5,7 @@ import numpy as np from pandas._libs import lib, iNaT, NaT +from pandas._libs.tslibs import timezones from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds, Timedelta from pandas._libs.tslibs.period import ( DIFFERENT_FREQ_INDEX, IncompatibleFrequency) @@ -13,7 +14,7 @@ from pandas import compat from pandas.tseries import frequencies -from pandas.tseries.offsets import Tick +from pandas.tseries.offsets import Tick, DateOffset from pandas.core.dtypes.common import ( needs_i8_conversion, @@ -23,10 +24,13 @@ is_timedelta64_dtype, is_object_dtype) from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame, ABCIndexClass +from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas.core.common as com from pandas.core.algorithms import checked_add_with_arr +from .base import ExtensionOpsMixin + def _make_comparison_op(op, cls): # TODO: share code with indexes.base version? Main difference is that @@ -87,7 +91,7 @@ def _shallow_copy(self, values=None, **kwargs): return self._simple_new(values, **attributes) -class DatetimeLikeArrayMixin(AttributesMixin): +class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin): """ Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray @@ -464,7 +468,10 @@ def _addsub_offset_array(self, other, op): "{cls} not vectorized" .format(cls=type(self).__name__), PerformanceWarning) - res_values = op(self.astype('O').values, np.array(other)) + # For EA self.astype('O') returns a numpy array, not an Index + left = lib.values_from_object(self.astype('O')) + + res_values = op(left, np.array(other)) kwargs = {} if not is_period_dtype(self): kwargs['freq'] = 'infer' @@ -551,3 +558,96 @@ def validate_periods(periods): raise TypeError('periods must be a number, got {periods}' .format(periods=periods)) return periods + + +def validate_endpoints(closed): + """ + Check that the `closed` argument is among [None, "left", "right"] + + Parameters + ---------- + closed : {None, "left", "right"} + + Returns + ------- + left_closed : bool + right_closed : bool + + Raises + ------ + ValueError : if argument is not among valid values + """ + left_closed = False + right_closed = False + + if closed is None: + left_closed = True + right_closed = True + elif closed == "left": + left_closed = True + elif closed == "right": + right_closed = True + else: + raise ValueError("Closed has to be either 'left', 'right' or None") + + return left_closed, right_closed + + +def maybe_infer_freq(freq): + """ + Comparing a DateOffset to the string "infer" raises, so we need to + be careful about comparisons. Make a dummy variable `freq_infer` to + signify the case where the given freq is "infer" and set freq to None + to avoid comparison trouble later on. + + Parameters + ---------- + freq : {DateOffset, None, str} + + Returns + ------- + freq : {DateOffset, None} + freq_infer : bool + """ + freq_infer = False + if not isinstance(freq, DateOffset): + # if a passed freq is None, don't infer automatically + if freq != 'infer': + freq = frequencies.to_offset(freq) + else: + freq_infer = True + freq = None + return freq, freq_infer + + +def validate_tz_from_dtype(dtype, tz): + """ + If the given dtype is a DatetimeTZDtype, extract the implied + tzinfo object from it and check that it does not conflict with the given + tz. + + Parameters + ---------- + dtype : dtype, str + tz : None, tzinfo + + Returns + ------- + tz : consensus tzinfo + + Raises + ------ + ValueError : on tzinfo mismatch + """ + if dtype is not None: + try: + dtype = DatetimeTZDtype.construct_from_string(dtype) + dtz = getattr(dtype, 'tz', None) + if dtz is not None: + if tz is not None and not timezones.tz_compare(tz, dtz): + raise ValueError("cannot supply both a tz and a dtype" + " with a tz") + tz = dtz + except TypeError: + pass + return tz diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index c5e85cb5892f4..78e6d1f222160 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -29,7 +29,7 @@ import pandas.core.common as com from pandas.core.algorithms import checked_add_with_arr -from pandas.tseries.frequencies import to_offset, DateOffset +from pandas.tseries.frequencies import to_offset from pandas.tseries.offsets import Tick from pandas.core.arrays import datetimelike as dtl @@ -84,10 +84,11 @@ def f(self): return property(f) -def _dt_array_cmp(opname, cls): +def _dt_array_cmp(cls, op): """ Wrap comparison operations to convert datetime-like to datetime64 """ + opname = '__{name}__'.format(name=op.__name__) nat_result = True if opname == '__ne__' else False def wrapper(self, other): @@ -181,12 +182,10 @@ def __new__(cls, values, freq=None, tz=None): # e.g. DatetimeIndex tz = values.tz - if (freq is not None and not isinstance(freq, DateOffset) and - freq != 'infer'): - freq = to_offset(freq) + freq, freq_infer = dtl.maybe_infer_freq(freq) result = cls._simple_new(values, freq=freq, tz=tz) - if freq == 'infer': + if freq_infer: inferred = result.inferred_freq if inferred: result.freq = to_offset(inferred) @@ -289,17 +288,7 @@ def __iter__(self): # ----------------------------------------------------------------- # Comparison Methods - @classmethod - def _add_comparison_methods(cls): - """add in comparison methods""" - cls.__eq__ = _dt_array_cmp('__eq__', cls) - cls.__ne__ = _dt_array_cmp('__ne__', cls) - cls.__lt__ = _dt_array_cmp('__lt__', cls) - cls.__gt__ = _dt_array_cmp('__gt__', cls) - cls.__le__ = _dt_array_cmp('__le__', cls) - cls.__ge__ = _dt_array_cmp('__ge__', cls) - # TODO: Some classes pass __eq__ while others pass operator.eq; - # standardize this. + _create_comparison_method = classmethod(_dt_array_cmp) def _has_same_tz(self, other): zzone = self._timezone @@ -441,14 +430,7 @@ def _local_timestamps(self): This is used to calculate time-of-day information as if the timestamps were timezone-naive. """ - values = self.asi8 - indexer = values.argsort() - result = conversion.tz_convert(values.take(indexer), utc, self.tz) - - n = len(indexer) - reverse = np.empty(n, dtype=np.int_) - reverse.put(indexer, np.arange(n)) - return result.take(reverse) + return conversion.tz_convert(self.asi8, utc, self.tz) def tz_convert(self, tz): """ @@ -1102,4 +1084,4 @@ def to_julian_date(self): ) / 24.0) -DatetimeArrayMixin._add_comparison_methods() +DatetimeArrayMixin._add_comparison_ops() diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 66b1fb8db25c0..cb5afa34add2a 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -40,10 +40,11 @@ def f(self): return property(f) -def _period_array_cmp(opname, cls): +def _period_array_cmp(cls, op): """ Wrap comparison operations to convert Period-like to PeriodDtype """ + opname = '__{name}__'.format(name=op.__name__) nat_result = True if opname == '__ne__' else False def wrapper(self, other): @@ -268,6 +269,8 @@ def asfreq(self, freq=None, how='E'): # ------------------------------------------------------------------ # Arithmetic Methods + _create_comparison_method = classmethod(_period_array_cmp) + def _sub_datelike(self, other): assert other is not NaT return NotImplemented @@ -381,18 +384,8 @@ def _maybe_convert_timedelta(self, other): raise IncompatibleFrequency(msg.format(cls=type(self).__name__, freqstr=self.freqstr)) - @classmethod - def _add_comparison_methods(cls): - """ add in comparison methods """ - cls.__eq__ = _period_array_cmp('__eq__', cls) - cls.__ne__ = _period_array_cmp('__ne__', cls) - cls.__lt__ = _period_array_cmp('__lt__', cls) - cls.__gt__ = _period_array_cmp('__gt__', cls) - cls.__le__ = _period_array_cmp('__le__', cls) - cls.__ge__ = _period_array_cmp('__ge__', cls) - -PeriodArrayMixin._add_comparison_methods() +PeriodArrayMixin._add_comparison_ops() # ------------------------------------------------------------------- diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index a28f7fc9c32fa..efa7c0b0e44d4 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -17,7 +17,7 @@ import pandas.core.common as com -from pandas.tseries.offsets import Tick, DateOffset +from pandas.tseries.offsets import Tick from pandas.tseries.frequencies import to_offset from . import datetimelike as dtl @@ -54,10 +54,11 @@ def f(self): return property(f) -def _td_array_cmp(opname, cls): +def _td_array_cmp(cls, op): """ Wrap comparison operations to convert timedelta-like to timedelta64 """ + opname = '__{name}__'.format(name=op.__name__) nat_result = True if opname == '__ne__' else False def wrapper(self, other): @@ -126,25 +127,23 @@ def _simple_new(cls, values, freq=None, **kwargs): def __new__(cls, values, freq=None, start=None, end=None, periods=None, closed=None): - if (freq is not None and not isinstance(freq, DateOffset) and - freq != 'infer'): - freq = to_offset(freq) - periods = dtl.validate_periods(periods) + freq, freq_infer = dtl.maybe_infer_freq(freq) if values is None: + # TODO: Remove this block and associated kwargs; GH#20535 if freq is None and com._any_none(periods, start, end): raise ValueError('Must provide freq argument if no data is ' 'supplied') - else: - return cls._generate_range(start, end, periods, freq, - closed=closed) + periods = dtl.validate_periods(periods) + return cls._generate_range(start, end, periods, freq, + closed=closed) result = cls._simple_new(values, freq=freq) - if freq == 'infer': + if freq_infer: inferred = result.inferred_freq if inferred: - result._freq = to_offset(inferred) + result.freq = to_offset(inferred) return result @@ -161,23 +160,12 @@ def _generate_range(cls, start, end, periods, freq, closed=None, **kwargs): if end is not None: end = Timedelta(end) - left_closed = False - right_closed = False - if start is None and end is None: if closed is not None: raise ValueError("Closed has to be None if not both of start" "and end are defined") - if closed is None: - left_closed = True - right_closed = True - elif closed == "left": - left_closed = True - elif closed == "right": - right_closed = True - else: - raise ValueError("Closed has to be either 'left', 'right' or None") + left_closed, right_closed = dtl.validate_endpoints(closed) if freq is not None: index = _generate_regular_range(start, end, periods, freq) @@ -197,6 +185,8 @@ def _generate_range(cls, start, end, periods, freq, closed=None, **kwargs): # ---------------------------------------------------------------- # Arithmetic Methods + _create_comparison_method = classmethod(_td_array_cmp) + def _add_offset(self, other): assert not isinstance(other, Tick) raise TypeError("cannot add the type {typ} to a {cls}" @@ -266,19 +256,6 @@ def _evaluate_with_timedelta_like(self, other, op): return NotImplemented - # ---------------------------------------------------------------- - # Comparison Methods - - @classmethod - def _add_comparison_methods(cls): - """add in comparison methods""" - cls.__eq__ = _td_array_cmp('__eq__', cls) - cls.__ne__ = _td_array_cmp('__ne__', cls) - cls.__lt__ = _td_array_cmp('__lt__', cls) - cls.__gt__ = _td_array_cmp('__gt__', cls) - cls.__le__ = _td_array_cmp('__le__', cls) - cls.__ge__ = _td_array_cmp('__ge__', cls) - # ---------------------------------------------------------------- # Conversion Methods - Vectorized analogues of Timedelta methods @@ -392,7 +369,7 @@ def f(x): return result -TimedeltaArrayMixin._add_comparison_methods() +TimedeltaArrayMixin._add_comparison_ops() # --------------------------------------------------------------------- diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 3ae5eb3a8dbf5..8f05a9a887830 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -789,9 +789,8 @@ def shift(self, n, freq=None): start = self[0] + n * self.freq end = self[-1] + n * self.freq attribs = self._get_attributes_dict() - attribs['start'] = start - attribs['end'] = end - return type(self)(**attribs) + return self._generate_range(start=start, end=end, periods=None, + **attribs) def repeat(self, repeats, *args, **kwargs): """ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 7257be421c3e1..6ed752d3a213d 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -27,7 +27,6 @@ pandas_dtype, ensure_int64) from pandas.core.dtypes.generic import ABCSeries -from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna import pandas.core.dtypes.concat as _concat @@ -41,7 +40,7 @@ from pandas.core.indexes.datetimelike import ( DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin) from pandas.tseries.offsets import ( - DateOffset, generate_range, Tick, CDay, prefix_mapping) + generate_range, Tick, CDay, prefix_mapping) from pandas.core.tools.timedeltas import to_timedelta from pandas.util._decorators import ( @@ -84,10 +83,12 @@ def func(self, *args, **kwargs): return func -def _dt_index_cmp(opname, cls): +def _dt_index_cmp(cls, op): """ Wrap comparison operations to convert datetime-like to datetime64 """ + opname = '__{name}__'.format(name=op.__name__) + def wrapper(self, other): result = getattr(DatetimeArrayMixin, opname)(self, other) if is_bool_dtype(result): @@ -238,12 +239,12 @@ def _join_i8_wrapper(joinf, **kwargs): @classmethod def _add_comparison_methods(cls): """ add in comparison methods """ - cls.__eq__ = _dt_index_cmp('__eq__', cls) - cls.__ne__ = _dt_index_cmp('__ne__', cls) - cls.__lt__ = _dt_index_cmp('__lt__', cls) - cls.__gt__ = _dt_index_cmp('__gt__', cls) - cls.__le__ = _dt_index_cmp('__le__', cls) - cls.__ge__ = _dt_index_cmp('__ge__', cls) + cls.__eq__ = _dt_index_cmp(cls, operator.eq) + cls.__ne__ = _dt_index_cmp(cls, operator.ne) + cls.__lt__ = _dt_index_cmp(cls, operator.lt) + cls.__gt__ = _dt_index_cmp(cls, operator.gt) + cls.__le__ = _dt_index_cmp(cls, operator.le) + cls.__ge__ = _dt_index_cmp(cls, operator.ge) _engine_type = libindex.DatetimeEngine @@ -289,39 +290,20 @@ def __new__(cls, data=None, if name is None and hasattr(data, 'name'): name = data.name - freq_infer = False - if not isinstance(freq, DateOffset): - - # if a passed freq is None, don't infer automatically - if freq != 'infer': - freq = to_offset(freq) - else: - freq_infer = True - freq = None - - periods = dtl.validate_periods(periods) + freq, freq_infer = dtl.maybe_infer_freq(freq) # if dtype has an embedded tz, capture it - if dtype is not None: - try: - dtype = DatetimeTZDtype.construct_from_string(dtype) - dtz = getattr(dtype, 'tz', None) - if dtz is not None: - if tz is not None and str(tz) != str(dtz): - raise ValueError("cannot supply both a tz and a dtype" - " with a tz") - tz = dtz - except TypeError: - pass + tz = dtl.validate_tz_from_dtype(dtype, tz) if data is None: + # TODO: Remove this block and associated kwargs; GH#20535 if freq is None and com._any_none(periods, start, end): - msg = 'Must provide freq argument if no data is supplied' - raise ValueError(msg) - else: - return cls._generate_range(start, end, periods, name, freq, - tz=tz, normalize=normalize, - closed=closed, ambiguous=ambiguous) + raise ValueError('Must provide freq argument if no data is ' + 'supplied') + periods = dtl.validate_periods(periods) + return cls._generate_range(start, end, periods, name, freq, + tz=tz, normalize=normalize, + closed=closed, ambiguous=ambiguous) if not isinstance(data, (np.ndarray, Index, ABCSeries)): if is_scalar(data): @@ -407,23 +389,12 @@ def _generate_range(cls, start, end, periods, name, freq, tz=None, if end is not None: end = Timestamp(end) - left_closed = False - right_closed = False - if start is None and end is None: if closed is not None: raise ValueError("Closed has to be None if not both of start" "and end are defined") - if closed is None: - left_closed = True - right_closed = True - elif closed == "left": - left_closed = True - elif closed == "right": - right_closed = True - else: - raise ValueError("Closed has to be either 'left', 'right' or None") + left_closed, right_closed = dtl.validate_endpoints(closed) try: inferred_tz = timezones.infer_tzinfo(start, end) @@ -540,12 +511,6 @@ def _convert_for_op(self, value): return _to_m8(value) raise ValueError('Passed item and index have different timezone') - def _local_timestamps(self): - if self.is_monotonic: - return conversion.tz_convert(self.asi8, utc, self.tz) - else: - return DatetimeArrayMixin._local_timestamps(self) - @classmethod def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None, **kwargs): @@ -1744,7 +1709,6 @@ def _generate_regular_range(cls, start, end, periods, freq): "if a 'period' is given.") data = np.arange(b, e, stride, dtype=np.int64) - # TODO: Do we need to use _simple_new here? just return data.view? data = cls._simple_new(data.view(_NS_DTYPE), None, tz=tz) else: if isinstance(start, Timestamp): diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 4d8e57820f29d..350c609acbb4f 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -804,7 +804,7 @@ def __setstate__(self, state): _unpickle_compat = __setstate__ -PeriodIndex._add_comparison_methods() +PeriodIndex._add_comparison_ops() PeriodIndex._add_numeric_methods_disabled() PeriodIndex._add_logical_methods_disabled() PeriodIndex._add_datetimelike_methods() diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index dc26c9cc0c248..af34ec8b22824 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -1,4 +1,5 @@ """ implement the TimedeltaIndex """ +import operator import numpy as np from pandas.core.dtypes.common import ( @@ -34,7 +35,6 @@ TimelikeOps, DatetimeIndexOpsMixin) from pandas.core.tools.timedeltas import ( to_timedelta, _coerce_scalar_to_timedelta_type) -from pandas.tseries.offsets import DateOffset from pandas._libs import (lib, index as libindex, join as libjoin, Timedelta, NaT, iNaT) @@ -51,10 +51,12 @@ def f(self): return property(f) -def _td_index_cmp(opname, cls): +def _td_index_cmp(cls, op): """ Wrap comparison operations to convert timedelta-like to timedelta64 """ + opname = '__{name}__'.format(name=op.__name__) + def wrapper(self, other): result = getattr(TimedeltaArrayMixin, opname)(self, other) if is_bool_dtype(result): @@ -155,12 +157,12 @@ def _join_i8_wrapper(joinf, **kwargs): @classmethod def _add_comparison_methods(cls): """ add in comparison methods """ - cls.__eq__ = _td_index_cmp('__eq__', cls) - cls.__ne__ = _td_index_cmp('__ne__', cls) - cls.__lt__ = _td_index_cmp('__lt__', cls) - cls.__gt__ = _td_index_cmp('__gt__', cls) - cls.__le__ = _td_index_cmp('__le__', cls) - cls.__ge__ = _td_index_cmp('__ge__', cls) + cls.__eq__ = _td_index_cmp(cls, operator.eq) + cls.__ne__ = _td_index_cmp(cls, operator.ne) + cls.__lt__ = _td_index_cmp(cls, operator.lt) + cls.__gt__ = _td_index_cmp(cls, operator.gt) + cls.__le__ = _td_index_cmp(cls, operator.le) + cls.__ge__ = _td_index_cmp(cls, operator.ge) _engine_type = libindex.TimedeltaEngine @@ -181,25 +183,16 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, else: return data._shallow_copy() - freq_infer = False - if not isinstance(freq, DateOffset): - - # if a passed freq is None, don't infer automatically - if freq != 'infer': - freq = to_offset(freq) - else: - freq_infer = True - freq = None - - periods = dtl.validate_periods(periods) + freq, freq_infer = dtl.maybe_infer_freq(freq) if data is None: + # TODO: Remove this block and associated kwargs; GH#20535 if freq is None and com._any_none(periods, start, end): - msg = 'Must provide freq argument if no data is supplied' - raise ValueError(msg) - else: - return cls._generate_range(start, end, periods, name, freq, - closed=closed) + raise ValueError('Must provide freq argument if no data is ' + 'supplied') + periods = dtl.validate_periods(periods) + return cls._generate_range(start, end, periods, name, freq, + closed=closed) if unit is not None: data = to_timedelta(data, unit=unit, box=False) @@ -226,7 +219,6 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, inferred = subarr.inferred_freq if inferred: subarr.freq = to_offset(inferred) - return subarr return subarr diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index 387a70fe37253..59cd4743f857b 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -250,7 +250,8 @@ def infer_freq(index, warn=True): if is_period_arraylike(index): raise TypeError("PeriodIndex given. Check the `freq` attribute " "instead of using infer_freq.") - elif isinstance(index, pd.TimedeltaIndex): + elif is_timedelta64_dtype(index): + # Allow TimedeltaIndex and TimedeltaArray inferer = _TimedeltaFrequencyInferer(index, warn=warn) return inferer.get_freq()
There's a lot of duplication in the constructors and constructor-helpers. This starts to whittle that down, writes some docstrings long the way. Also use `ExtensionOpsMixin` to define comparison operators on the EAMixin classes. We determined that the DatetimeIndex._local_timestamps method had an unecessary monotonicy check, so took that out.
https://api.github.com/repos/pandas-dev/pandas/pulls/21926
2018-07-16T00:26:50Z
2018-07-20T20:43:38Z
2018-07-20T20:43:38Z
2018-07-20T21:03:21Z
move rename functionality out of internals
diff --git a/.coveragerc b/.coveragerc index f5c8b701a79a8..13baa100b84b7 100644 --- a/.coveragerc +++ b/.coveragerc @@ -17,6 +17,7 @@ exclude_lines = # Don't complain if tests don't hit defensive assertion code: raise AssertionError raise NotImplementedError + AbstractMethodError # Don't complain if non-runnable code isn't run: if 0: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 85bd6065314f4..c5ca6eafdb427 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -289,10 +289,7 @@ def set_axis(a, i): for i, a in cls._AXIS_NAMES.items(): set_axis(a, i) - # addtl parms - if isinstance(ns, dict): - for k, v in ns.items(): - setattr(cls, k, v) + assert not isinstance(ns, dict) def _construct_axes_dict(self, axes=None, **kwargs): """Return an axes dictionary for myself.""" @@ -3406,8 +3403,10 @@ def add_prefix(self, prefix): 2 3 5 3 4 6 """ - new_data = self._data.add_prefix(prefix) - return self._constructor(new_data).__finalize__(self) + f = functools.partial('{prefix}{}'.format, prefix=prefix) + + mapper = {self._info_axis_name: f} + return self.rename(**mapper) def add_suffix(self, suffix): """ @@ -3463,8 +3462,10 @@ def add_suffix(self, suffix): 2 3 5 3 4 6 """ - new_data = self._data.add_suffix(suffix) - return self._constructor(new_data).__finalize__(self) + f = functools.partial('{}{suffix}'.format, suffix=suffix) + + mapper = {self._info_axis_name: f} + return self.rename(**mapper) _shared_docs['sort_values'] = """ Sort by the values along either axis @@ -3980,6 +3981,7 @@ def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False, return self._constructor(new_data).__finalize__(self) + # TODO: unused; remove? def _reindex_axis(self, new_index, fill_method, axis, copy): new_data = self._data.reindex_axis(new_index, axis=axis, method=fill_method, copy=copy) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index e64ba44bb8a92..63738594799f5 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -176,20 +176,11 @@ def rename_axis(self, mapper, axis, copy=True, level=None): axis : int copy : boolean, default True level : int, default None - """ obj = self.copy(deep=copy) obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) return obj - def add_prefix(self, prefix): - f = partial('{prefix}{}'.format, prefix=prefix) - return self.rename_axis(f, axis=0) - - def add_suffix(self, suffix): - f = partial('{}{suffix}'.format, suffix=suffix) - return self.rename_axis(f, axis=0) - @property def _is_single_block(self): if self.ndim == 1: @@ -222,12 +213,10 @@ def _rebuild_blknos_and_blklocs(self): self._blknos = new_blknos self._blklocs = new_blklocs - # make items read only for now - def _get_items(self): + @property + def items(self): return self.axes[0] - items = property(fget=_get_items) - def _get_counts(self, f): """ return a dict of the counts of the function in BlockManager """ self._consolidate_inplace()
- [ ] <s>closes #16045</s><b>update</b>Not anymore - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` @jorisvandenbossche can you confirm this is what you had in mind in that issue?
https://api.github.com/repos/pandas-dev/pandas/pulls/21924
2018-07-15T20:24:21Z
2018-09-08T02:46:54Z
2018-09-08T02:46:54Z
2018-09-08T03:10:23Z
[BUG] change types to Py_ssize_t to fix #21905
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index d0090852fa5af..fae855f5495f0 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -527,7 +527,7 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz): cdef: ndarray[int64_t] trans, deltas int64_t delta, local_val - Py_ssize_t posn + Py_ssize_t pos assert obj.tzinfo is None @@ -782,7 +782,6 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2): cdef: ndarray[int64_t] utc_dates, tt, result, trans, deltas Py_ssize_t i, j, pos, n = len(vals) - ndarray[Py_ssize_t] posn int64_t v, offset, delta npy_datetimestruct dts @@ -1124,7 +1123,8 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): cdef: Py_ssize_t n = len(stamps) ndarray[int64_t] result = np.empty(n, dtype=np.int64) - ndarray[int64_t] trans, deltas, pos + ndarray[int64_t] trans, deltas + Py_ssize_t[:] pos npy_datetimestruct dts if is_utc(tz): diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index ebd8402c6fdf7..b8965288a878b 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -934,7 +934,8 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, cdef: Py_ssize_t n = len(stamps) ndarray[int64_t] result = np.empty(n, dtype=np.int64) - ndarray[int64_t] trans, deltas, pos + ndarray[int64_t] trans, deltas + Py_ssize_t[:] pos npy_datetimestruct dts int64_t local_val diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index e8eb27fd4544b..688b12005921d 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -74,7 +74,8 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz): cdef: Py_ssize_t n = len(stamps) int reso = RESO_DAY, curr_reso - ndarray[int64_t] trans, deltas, pos + ndarray[int64_t] trans, deltas + Py_ssize_t[:] pos npy_datetimestruct dts int64_t local_val
May close #21905, will need to check with OP.
https://api.github.com/repos/pandas-dev/pandas/pulls/21923
2018-07-15T20:16:15Z
2018-07-17T00:37:13Z
2018-07-17T00:37:13Z
2020-04-05T17:42:33Z
Concatenation of series of differing types should lead to object
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 9e3f7ec73f852..a0966dcf8bdd9 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -257,6 +257,7 @@ ExtensionType Changes - Bug in :meth:`Series.get` for ``Series`` using ``ExtensionArray`` and integer index (:issue:`21257`) - :meth:`Series.combine()` works correctly with :class:`~pandas.api.extensions.ExtensionArray` inside of :class:`Series` (:issue:`20825`) - :meth:`Series.combine()` with scalar argument now works for any function type (:issue:`21248`) +- Bug in :func:`concat` that lead to inconsistent behaviour for ExtensionArrays where the scalar representation was int or float (:issue:`21792`) - .. _whatsnew_0240.api.incompatibilities: diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 4a41b14cee071..9a5a775df6186 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -180,6 +180,8 @@ def is_nonempty(x): extensions = [is_extension_array_dtype(x) for x in to_concat] if any(extensions) and axis == 1: to_concat = [np.atleast_2d(x.astype('object')) for x in to_concat] + elif any(extensions): + to_concat = [x.astype('object') for x in to_concat] if not nonempty: # we have all empties, but may need to coerce the result dtype to
- [x] closes #21792 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I see no simple solution on how to make a test for this without introducing a whole new ExtensionArray class that is actually of scalar type `'i'`. cc @jreback I'm wondering a bit why this has not yet been a problem for https://github.com/pandas-dev/pandas/pull/21160 I have check that this fixes the problems I'm seeing in `fletcher` on `int` and `float` dtypes.
https://api.github.com/repos/pandas-dev/pandas/pulls/21922
2018-07-15T18:49:01Z
2018-12-23T23:14:52Z
null
2018-12-23T23:14:52Z
BUG:Clip with a list-like threshold with a nan is broken (GH19992)
diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt index ac1ef78fd6fd2..f1cedf139f7d6 100644 --- a/doc/source/whatsnew/v0.23.4.txt +++ b/doc/source/whatsnew/v0.23.4.txt @@ -62,3 +62,7 @@ Bug Fixes - - + +**Missing** + +- Bug in :func:`Series.clip` and :func:`DataFrame.clip` cannot accept list-like threshold containing ``NaN`` (:issue:`19992`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index b682f5e65f876..610bcf5d1d6c4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6520,9 +6520,11 @@ def clip(self, lower=None, upper=None, axis=None, inplace=False, # GH 17276 # numpy doesn't like NaN as a clip value # so ignore - if np.any(pd.isnull(lower)): + # GH 19992 + # numpy doesn't drop a list-like bound containing NaN + if not is_list_like(lower) and np.any(pd.isnull(lower)): lower = None - if np.any(pd.isnull(upper)): + if not is_list_like(upper) and np.any(pd.isnull(upper)): upper = None # GH 2747 (arguments were reversed) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index a399fa2b68680..b48395efaf5c8 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1859,13 +1859,23 @@ def test_clip_with_na_args(self): """Should process np.nan argument as None """ # GH # 17276 tm.assert_frame_equal(self.frame.clip(np.nan), self.frame) - tm.assert_frame_equal(self.frame.clip(upper=[1, 2, np.nan]), - self.frame) - tm.assert_frame_equal(self.frame.clip(lower=[1, np.nan, 3]), - self.frame) tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan), self.frame) + # GH #19992 + df = DataFrame({'col_0': [1, 2, 3], 'col_1': [4, 5, 6], + 'col_2': [7, 8, 9]}) + + result = df.clip(lower=[4, 5, np.nan], axis=0) + expected = DataFrame({'col_0': [4, 5, np.nan], 'col_1': [4, 5, np.nan], + 'col_2': [7, 8, np.nan]}) + tm.assert_frame_equal(result, expected) + + result = df.clip(lower=[4, 5, np.nan], axis=1) + expected = DataFrame({'col_0': [4, 4, 4], 'col_1': [5, 5, 6], + 'col_2': [np.nan, np.nan, np.nan]}) + tm.assert_frame_equal(result, expected) + # Matrix-like def test_dot(self): a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'], diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 28a77bbb1d3fa..8c0f4b11149fe 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -942,11 +942,15 @@ def test_clip_with_na_args(self): s = Series([1, 2, 3]) assert_series_equal(s.clip(np.nan), Series([1, 2, 3])) - assert_series_equal(s.clip(upper=[1, 1, np.nan]), Series([1, 2, 3])) - assert_series_equal(s.clip(lower=[1, np.nan, 1]), Series([1, 2, 3])) assert_series_equal(s.clip(upper=np.nan, lower=np.nan), Series([1, 2, 3])) + # GH #19992 + assert_series_equal(s.clip(lower=[0, 4, np.nan]), + Series([1, 4, np.nan])) + assert_series_equal(s.clip(upper=[1, np.nan, 1]), + Series([1, np.nan, 1])) + def test_clip_against_series(self): # GH #6966
- fix bug #19992 - 2 tests amended in frame/test_analytics.py and series/test_analytics.py - whatsnew entry added
https://api.github.com/repos/pandas-dev/pandas/pulls/21921
2018-07-15T15:18:30Z
2018-07-18T10:23:30Z
2018-07-18T10:23:30Z
2018-07-18T10:23:47Z
[BUG][BLD] revert DEF component of #21878
diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h index f45b4320b4d3d..98eca92fd1ab2 100644 --- a/pandas/_libs/src/numpy_helper.h +++ b/pandas/_libs/src/numpy_helper.h @@ -30,6 +30,23 @@ PANDAS_INLINE PyObject* get_value_1d(PyArrayObject* ap, Py_ssize_t i) { return PyArray_Scalar(item, PyArray_DESCR(ap), (PyObject*)ap); } +// returns ASCII or UTF8 (py3) view on python str +// python object owns memory, should not be freed +PANDAS_INLINE const char* get_c_string(PyObject* obj) { +#if PY_VERSION_HEX >= 0x03000000 + return PyUnicode_AsUTF8(obj); +#else + return PyString_AsString(obj); +#endif +} + +PANDAS_INLINE PyObject* char_to_string(const char* data) { +#if PY_VERSION_HEX >= 0x03000000 + return PyUnicode_FromString(data); +#else + return PyString_FromString(data); +#endif +} void set_array_not_contiguous(PyArrayObject* ao) { ao->flags &= ~(NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS); diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/src/util.pxd index 728eb63dc836c..7ce2181f32553 100644 --- a/pandas/_libs/src/util.pxd +++ b/pandas/_libs/src/util.pxd @@ -4,9 +4,7 @@ cnp.import_array() cimport cpython from cpython cimport PyTypeObject -from cpython.string cimport PyString_FromString, PyString_AsString -DEF PY3 = bytes != str cdef extern from "Python.h": # Note: importing extern-style allows us to declare these as nogil @@ -17,8 +15,6 @@ cdef extern from "Python.h": bint PyFloat_Check(object obj) nogil bint PyComplex_Check(object obj) nogil bint PyObject_TypeCheck(object obj, PyTypeObject* type) nogil - char* PyUnicode_AsUTF8(object unicode) - object PyUnicode_FromString(const char* u) nogil cdef extern from "numpy/arrayobject.h": @@ -74,6 +70,8 @@ cdef extern from "numpy_helper.h": int assign_value_1d(ndarray, Py_ssize_t, object) except -1 cnp.int64_t get_nat() object get_value_1d(ndarray, Py_ssize_t) + char *get_c_string(object) except NULL + object char_to_string(char*) ctypedef fused numeric: cnp.int8_t @@ -104,26 +102,6 @@ cdef extern from "headers/stdint.h": enum: INT64_MIN -cdef inline const char* get_c_string(object obj) except NULL: - """ - returns ASCII or UTF8 (py3) view on python str - python object owns memory, should not be freed - """ - # TODO: this docstring is copied verbatim from version that was - # directly in numpy_helper.C; is it still accurate? - IF PY3: - return PyUnicode_AsUTF8(obj) - ELSE: - return PyString_AsString(obj) - - -cdef inline object char_to_string(const char* data): - IF PY3: - return PyUnicode_FromString(data) - ELSE: - return PyString_FromString(data) - - cdef inline object get_value_at(ndarray arr, object loc): cdef: Py_ssize_t i, sz diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 576b3ecc1f8e2..b8f97dcf2d599 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -53,7 +53,7 @@ from tslibs.timestamps cimport (create_timestamp_from_ts, from tslibs.timestamps import Timestamp -DEF PY2 = str == bytes +cdef bint PY2 = str == bytes cdef inline object create_datetime_from_ts( @@ -555,9 +555,8 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', if len(val) == 0 or val in nat_strings: iresult[i] = NPY_NAT continue - if PY2: - if PyUnicode_Check(val): - val = val.encode('utf-8') + if PyUnicode_Check(val) and PY2: + val = val.encode('utf-8') try: _string_to_dts(val, &dts, &out_local, &out_tzoffset) diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index ebd8402c6fdf7..266d312aca0ae 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -52,7 +52,7 @@ from nattype cimport _nat_scalar_rules, NPY_NAT, is_null_datetimelike from offsets cimport to_offset from offsets import _Tick -DEF PY2 = str == bytes +cdef bint PY2 = str == bytes cdef extern from "period_helper.h": @@ -728,7 +728,7 @@ cdef object _period_strftime(int64_t value, int freq, object fmt): result = result.replace(str_extra_fmts[i], repl) - IF PY2: + if PY2: result = result.decode('utf-8', 'ignore') return result
#21878 introduced a subtle build problem that is not caught by the CI. Running `python setup.py build_ext --inplace` followed by `python3 setup.py build_ext --inplace` causes compile-time errors in py3 (or if running these in the opposite order, errors in py2). These are fixed by running `python setup.py clean` in between. This reverts the relevant changes. For anyone curious: cython supports syntax: ``` DEF foo = [...] IF foo: [...] ELSE: [...] ``` and these IF/ELSE conditions get evaluated at compile-time. #21878 incorrectly assumed that "compile-time" in this context meant ".c -> .so" time, not ".pyx -> .c" time. In this we used: ``` DEF PY2 = str == bytes IF PY2: [...] ELSE: [...] ``` so after running `setup.py build_ext --inplace` in py2, the ".c" file we end up with has already gotten rid of the PY3 branches. When we run `python3 setup.py build_ext --inplace` cython uses the existing .c file, tries to compile it to .so, and breaks.
https://api.github.com/repos/pandas-dev/pandas/pulls/21919
2018-07-15T03:41:31Z
2018-07-15T17:59:25Z
2018-07-15T17:59:25Z
2018-07-15T17:59:47Z
DOC: Updated the DataFrame.assign docstring
diff --git a/ci/doctests.sh b/ci/doctests.sh index e7fe80e60eb6d..48774a1e4d00d 100755 --- a/ci/doctests.sh +++ b/ci/doctests.sh @@ -21,7 +21,7 @@ if [ "$DOCTEST" ]; then # DataFrame / Series docstrings pytest --doctest-modules -v pandas/core/frame.py \ - -k"-assign -axes -combine -isin -itertuples -join -nlargest -nsmallest -nunique -pivot_table -quantile -query -reindex -reindex_axis -replace -round -set_index -stack -to_dict -to_stata" + -k"-axes -combine -isin -itertuples -join -nlargest -nsmallest -nunique -pivot_table -quantile -query -reindex -reindex_axis -replace -round -set_index -stack -to_dict -to_stata" if [ $? -ne "0" ]; then RET=1 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 959b0a4fd1890..a28a8939d9a2d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3273,7 +3273,7 @@ def assign(self, **kwargs): Parameters ---------- - kwargs : keyword, value pairs + **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not @@ -3283,7 +3283,7 @@ def assign(self, **kwargs): Returns ------- - df : DataFrame + DataFrame A new DataFrame with the new columns in addition to all the existing columns. @@ -3303,48 +3303,34 @@ def assign(self, **kwargs): Examples -------- - >>> df = pd.DataFrame({'A': range(1, 11), 'B': np.random.randn(10)}) + >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, + ... index=['Portland', 'Berkeley']) + >>> df + temp_c + Portland 17.0 + Berkeley 25.0 Where the value is a callable, evaluated on `df`: - - >>> df.assign(ln_A = lambda x: np.log(x.A)) - A B ln_A - 0 1 0.426905 0.000000 - 1 2 -0.780949 0.693147 - 2 3 -0.418711 1.098612 - 3 4 -0.269708 1.386294 - 4 5 -0.274002 1.609438 - 5 6 -0.500792 1.791759 - 6 7 1.649697 1.945910 - 7 8 -1.495604 2.079442 - 8 9 0.549296 2.197225 - 9 10 -0.758542 2.302585 - - Where the value already exists and is inserted: - - >>> newcol = np.log(df['A']) - >>> df.assign(ln_A=newcol) - A B ln_A - 0 1 0.426905 0.000000 - 1 2 -0.780949 0.693147 - 2 3 -0.418711 1.098612 - 3 4 -0.269708 1.386294 - 4 5 -0.274002 1.609438 - 5 6 -0.500792 1.791759 - 6 7 1.649697 1.945910 - 7 8 -1.495604 2.079442 - 8 9 0.549296 2.197225 - 9 10 -0.758542 2.302585 - - Where the keyword arguments depend on each other - - >>> df = pd.DataFrame({'A': [1, 2, 3]}) - - >>> df.assign(B=df.A, C=lambda x:x['A']+ x['B']) - A B C - 0 1 1 2 - 1 2 2 4 - 2 3 3 6 + >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) + temp_c temp_f + Portland 17.0 62.6 + Berkeley 25.0 77.0 + + Alternatively, the same behavior can be achieved by directly + referencing an existing Series or sequence: + >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) + temp_c temp_f + Portland 17.0 62.6 + Berkeley 25.0 77.0 + + In Python 3.6+, you can create multiple columns within the same assign + where one of the columns depends on another one defined within the same + assign: + >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, + ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) + temp_c temp_f temp_k + Portland 17.0 62.6 290.15 + Berkeley 25.0 77.0 298.15 """ data = self.copy()
Updated the DataFrame.assign docstring example to use np.arange instead of np.random.randn to pass the validation test.
https://api.github.com/repos/pandas-dev/pandas/pulls/21917
2018-07-14T23:11:49Z
2018-09-22T23:36:23Z
2018-09-22T23:36:23Z
2018-09-27T12:53:25Z
Added links to useful discussions of groupby and SettingWithCopyWarning
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 45e449d081fb0..4003478b3faf4 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -1366,3 +1366,6 @@ column index name will be used as the name of the inserted column: result result.stack() + +Additional discussion can be found at `https://pythonforbiologists.com/when-to-use-aggregatefiltertransform-in-pandas/ +<https://pythonforbiologists.com/when-to-use-aggregatefiltertransform-in-pandas/>`_. diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 1c63acce6e3fa..b710ab595fbb9 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -1919,3 +1919,6 @@ This will **not** work at all, and so should be avoided: The chained assignment warnings / exceptions are aiming to inform the user of a possibly invalid assignment. There may be false positives; situations where a chained assignment is inadvertently reported. + +For additional discussion and explanation, see `https://www.dataquest.io/blog/settingwithcopywarning/ +<https://www.dataquest.io/blog/settingwithcopywarning/>`_.
Added links to useful discussions of `groupby` and `SettingWithCopyWarning`. Did not run tests as only docs changed, but `python make.py --single groupby` and `python make.py --single indexing` successfully create the respective html files. - [x] closes #17505 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21916
2018-07-14T21:10:28Z
2018-07-17T18:03:53Z
null
2018-07-17T18:03:53Z
[BUG] Fix interpolation for datetimelike dtypes
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index a17bf7c8bd6e9..9c24e31de4fd5 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -371,6 +371,7 @@ Datetimelike - Fixed bug where two :class:`DateOffset` objects with different ``normalize`` attributes could evaluate as equal (:issue:`21404`) - Fixed bug where :meth:`Timestamp.resolution` incorrectly returned 1-microsecond ``timedelta`` instead of 1-nanosecond :class:`Timedelta` (:issue:`21336`,:issue:`21365`) +- Fixed bug in :meth:`DataFrame.interpolate` and :meth:`Series.interpolate` where null values were not filled for dtypes of ``datetime64[ns]``, ``datetime64[ns, tz]``, ``timedelta64[ns]`` (:issue:`21915`) Timedelta ^^^^^^^^^ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8da678e0adec0..8488584d21f65 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6097,8 +6097,11 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False, raise ValueError("Only `method=linear` interpolation is supported " "on MultiIndexes.") - if _maybe_transposed_self._data.get_dtype_counts().get( - 'object') == len(_maybe_transposed_self.T): + dtype_counts = _maybe_transposed_self._data.get_dtype_counts() + if ('object' in dtype_counts and + dtype_counts.get('object') == len(_maybe_transposed_self.T)): + # Try to short-circuit tranposing to avoid superfluous dimension + # errors GH#13287, GH#17539, GH#19197 raise TypeError("Cannot interpolate with all NaNs.") # create/use the index diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 208d7b8bcf8a7..159e31ab55425 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -15,6 +15,7 @@ from pandas.core.base import PandasObject +import pandas.core.dtypes.common as ct from pandas.core.dtypes.dtypes import ( ExtensionDtype, DatetimeTZDtype, PandasExtensionDtype, @@ -1158,20 +1159,19 @@ def check_int_bool(self, inplace): try: m = missing.clean_interp_method(method, **kwargs) except: - m = None + raise ValueError("invalid method '{0}' to interpolate." + .format(method)) - if m is not None: - r = check_int_bool(self, inplace) - if r is not None: - return r - return self._interpolate(method=m, index=index, values=values, - axis=axis, limit=limit, - limit_direction=limit_direction, - limit_area=limit_area, - fill_value=fill_value, inplace=inplace, - downcast=downcast, mgr=mgr, **kwargs) + r = check_int_bool(self, inplace) + if r is not None: + return r + return self._interpolate(method=m, index=index, values=values, + axis=axis, limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + fill_value=fill_value, inplace=inplace, + downcast=downcast, mgr=mgr, **kwargs) - raise ValueError("invalid method '{0}' to interpolate.".format(method)) def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, limit=None, fill_value=None, coerce=False, @@ -1199,6 +1199,7 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, blocks = [self.make_block_same_class(values, ndim=self.ndim)] return self._maybe_downcast(blocks, downcast) + # TODO: ignoring `values`? def _interpolate(self, method=None, index=None, values=None, fill_value=None, axis=0, limit=None, limit_direction='forward', limit_area=None, @@ -1206,13 +1207,27 @@ def _interpolate(self, method=None, index=None, values=None, """ interpolate using scipy wrappers """ inplace = validate_bool_kwarg(inplace, 'inplace') - data = self.values if inplace else self.values.copy() # only deal with floats - if not self.is_float: + if ct.needs_i8_conversion(self.dtype): + if ct.is_period_dtype(self.dtype): + raise NotImplementedError("PeriodDtype columns/Series don't " + "exist yet, but will soon. " + "When they do, test them!") + mask = isna(self.values) + values = self.values + + # DatetimeTZBlock.values is DatetimeIndex, need to cast/shape + values = getattr(values, 'values', values).reshape(self.shape) + data = values.astype(np.float64) + data[mask.reshape(self.shape)] = np.nan + elif not self.is_float: if not self.is_integer: return self - data = data.astype(np.float64) + data = self.values.astype(np.float64) + else: + # avoid making a copy if possible + data = self.values if inplace else self.values.copy() if fill_value is None: fill_value = self.fill_value @@ -1224,7 +1239,6 @@ def _interpolate(self, method=None, index=None, values=None, # process 1-d slices in the axis direction def func(x): - # process a 1-d slice, returning it # should the axis argument be handled below in apply_along_axis? # i.e. not an arg to missing.interpolate_1d @@ -1236,6 +1250,20 @@ def func(x): # interp each column independently interp_values = np.apply_along_axis(func, axis, data) + if ct.needs_i8_conversion(self.dtype): + # convert remaining NaNs back to NaT and cast back to own dtype + mask = isna(interp_values) + interp_values[mask] = fill_value # TODO: or self.fill_value? + + # Note: we need to get to a numpy dtype (M8[ns] or m8[ns]) and + # not a pandas tz-aware dtype (for now) + dtype = self.dtype.base + assert isinstance(dtype, np.dtype) + interp_values = interp_values.astype(dtype) + if is_datetimetz(self): + # squeeze() since we expanded dimension above + held = self._holder(interp_values.squeeze(), tz='UTC') + interp_values = held.tz_convert(self.dtype.tz) blocks = [self.make_block_same_class(interp_values)] return self._maybe_downcast(blocks, downcast) diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 9567c08781856..b4416432764cf 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -360,7 +360,10 @@ def test_fillna_categorical_nan(self): cat = Categorical([np.nan, 2, np.nan]) val = Categorical([np.nan, np.nan, np.nan]) df = DataFrame({"cats": cat, "vals": val}) - res = df.fillna(df.median()) + with tm.assert_produces_warning(RuntimeWarning): + # RuntimeWarning: All-NaN slice encountered + res = df.fillna(df.median()) + v_exp = [np.nan, np.nan, np.nan] df_exp = DataFrame({"cats": [2, 2, 2], "vals": v_exp}, dtype='category') @@ -855,3 +858,71 @@ def test_interp_ignore_all_good(self): # all good result = df[['B', 'D']].interpolate(downcast=None) assert_frame_equal(result, df[['B', 'D']]) + + @pytest.mark.parametrize('use_idx', [True, False]) + @pytest.mark.parametrize('tz', [None, 'US/Central']) + def test_interpolate_dt64_values(self, tz, use_idx): + # GH#21915 + dti = pd.date_range('2016-01-01', periods=10, tz=tz) + index = dti if use_idx else None + + # Copy to avoid corrupting dti, see GH#21907 + ser = pd.Series(dti, index=index).copy() + ser[::3] = pd.NaT + + expected = pd.Series(dti, index=index) + expected.iloc[0] = pd.NaT + expected.iloc[-1] = expected.iloc[-2] + + df = ser.to_frame() + expected = expected.to_frame() + + result = df.interpolate(method='linear') + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize('use_idx', [True, False]) + def test_interpolate_td64_values(self, use_idx): + # GH#21915 + tdi = pd.timedelta_range('1D', periods=10) + index = tdi if use_idx else None + + ser = pd.Series(tdi, index=index) + ser[::3] = pd.NaT + + expected = pd.Series(tdi, index=index) + expected.iloc[0] = pd.NaT + expected.iloc[-1] = expected.iloc[-2] + + df = ser.to_frame() + expected = expected.to_frame() + + result = df.interpolate(method='linear') + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize('use_idx', [True, False]) + def test_interpolate_datetimelike_and_object(self, use_idx): + # GH#21915 + # Check that dt64/td64 with more than one column doesn't get + # screwed up by .transpose() with an object column present. + dti_tz = pd.date_range('2016-01-01', periods=10, tz='US/Central') + dti_naive = pd.date_range('2016-01-01', periods=10, tz=None) + tdi = pd.timedelta_range('1D', periods=10) + objcol = list('ABCDEFGHIJ') + + index = tdi if use_idx else None + + df = pd.DataFrame({'aware': dti_tz, + 'naive': dti_naive, + 'tdi': tdi, + 'obj': objcol}, + columns=['naive', 'aware', 'tdi', 'obj'], + index=index) + + expected = df.copy() + expected.iloc[0, :-1] = pd.NaT + expected.iloc[-1, :-1] = df.iloc[-2, :-1] + + df.iloc[::3, :-1] = pd.NaT + + result = df.interpolate(method='linear') + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 2bc44cb1c683f..96f4a60a8c53f 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -1317,3 +1317,38 @@ def test_series_interpolate_intraday(self): result = ts.reindex(new_index).interpolate(method='time') tm.assert_numpy_array_equal(result.values, exp.values) + + # TODO: De-duplicate with similar tests in test.frame.test_missing? + @pytest.mark.parametrize('use_idx', [True, False]) + @pytest.mark.parametrize('tz', [None, 'US/Central']) + def test_interpolate_dt64_values(self, tz, use_idx): + # GH#21915 + dti = pd.date_range('2016-01-01', periods=10, tz=tz) + index = dti if use_idx else None + + # Copy to avoid corrupting dti, see GH#21907 + ser = pd.Series(dti, index=index).copy() + ser[::3] = pd.NaT + + expected = pd.Series(dti, index=index) + expected.iloc[0] = pd.NaT + expected.iloc[-1] = expected.iloc[-2] + + result = ser.interpolate(method='linear') + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('use_idx', [True, False]) + def test_interpolate_td64_values(self, use_idx): + # GH#21915 + tdi = pd.timedelta_range('1D', periods=10) + index = tdi if use_idx else None + + ser = pd.Series(tdi, index=index) + ser[::3] = pd.NaT + + expected = pd.Series(tdi, index=index) + expected.iloc[0] = pd.NaT + expected.iloc[-1] = expected.iloc[-2] + + result = ser.interpolate(method='linear') + tm.assert_series_equal(result, expected)
- [x] closes #11312, #11701, #19199, - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Two bugs here, one fixed, one avoided. First is in `Block._interpolate` where datetimelike values were not cast correctly. Second is that `DataFrame.transpose` will raise in some conditions (see #19198, also motivated #21908). This adds dtype-handling code in `Block._interpolate`. An alternative would be to override `_interpolate` in subclasses. Either way works for me. NB: This is only implemented for `method='linear'`. I made that explicit in the tests as a reminder to follow-up with others. (#19199 is marked as a duplicate but it includes a bug report for the TZ-aware case that is separate bug.)
https://api.github.com/repos/pandas-dev/pandas/pulls/21915
2018-07-14T20:13:23Z
2018-07-17T06:05:49Z
null
2022-11-16T18:07:52Z
Use the Agg backend for docs builds
diff --git a/doc/make.py b/doc/make.py index 4d54a2415a194..d85747458148d 100755 --- a/doc/make.py +++ b/doc/make.py @@ -363,6 +363,10 @@ def main(): sys.path.append(args.python_path) globals()['pandas'] = importlib.import_module('pandas') + # Set the matplotlib backend to the non-interactive Agg backend for all + # child processes. + os.environ['MPLBACKEND'] = 'module://matplotlib.backends.backend_agg' + builder = DocBuilder(args.num_jobs, not args.no_api, args.single, args.verbosity) getattr(builder, args.command)()
This uses a non-interactive Agg matplotlib backend to build docs, which avoids trying to use the default MacOS backend, which can fail in some environments. Closes #21913.
https://api.github.com/repos/pandas-dev/pandas/pulls/21914
2018-07-14T16:56:55Z
2018-07-17T12:18:53Z
2018-07-17T12:18:53Z
2018-07-17T12:19:03Z
Add statsmodels to optional dependencies
diff --git a/ci/requirements-optional-conda.txt b/ci/requirements-optional-conda.txt index 9e4e8e99b5205..18aac30f04aea 100644 --- a/ci/requirements-optional-conda.txt +++ b/ci/requirements-optional-conda.txt @@ -22,6 +22,7 @@ s3fs scipy seaborn sqlalchemy +statsmodels xarray xlrd xlsxwriter diff --git a/ci/requirements-optional-pip.txt b/ci/requirements-optional-pip.txt index 3cce3f5339883..28dafc43b09c0 100644 --- a/ci/requirements-optional-pip.txt +++ b/ci/requirements-optional-pip.txt @@ -24,6 +24,7 @@ s3fs scipy seaborn sqlalchemy +statsmodels xarray xlrd xlsxwriter
Some of the documentation uses methods from statsmodels, which isn't included in the optional dependency list. Fixes #21911. - [x] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21912
2018-07-14T16:27:07Z
2018-07-15T02:14:57Z
2018-07-15T02:14:57Z
2018-07-15T02:14:57Z
Change ._data to ._parent for accessors
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index eebdfe8a54a9d..003ba7608dea4 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2401,7 +2401,7 @@ class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin): def __init__(self, data): self._validate(data) - self.categorical = data.values + self._parent = data.values self.index = data.index self.name = data.name self._freeze() @@ -2413,19 +2413,19 @@ def _validate(data): "'category' dtype") def _delegate_property_get(self, name): - return getattr(self.categorical, name) + return getattr(self._parent, name) def _delegate_property_set(self, name, new_values): - return setattr(self.categorical, name, new_values) + return setattr(self._parent, name, new_values) @property def codes(self): from pandas import Series - return Series(self.categorical.codes, index=self.index) + return Series(self._parent.codes, index=self.index) def _delegate_method(self, name, *args, **kwargs): from pandas import Series - method = getattr(self.categorical, name) + method = getattr(self._parent, name) res = method(*args, **kwargs) if res is not None: return Series(res, index=self.index, name=self.name) diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index d7b4ea63cd48c..6ab8c4659c31e 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -27,14 +27,14 @@ def __init__(self, data, orig): raise TypeError("cannot convert an object of type {0} to a " "datetimelike index".format(type(data))) - self.values = data + self._parent = data self.orig = orig self.name = getattr(data, 'name', None) self.index = getattr(data, 'index', None) self._freeze() def _get_values(self): - data = self.values + data = self._parent if is_datetime64_dtype(data.dtype): return DatetimeIndex(data, copy=False, name=self.name) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 6deec52811aff..b9dfc3b8fc69f 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -927,7 +927,7 @@ def str_extract(arr, pat, flags=0, expand=True): if expand: return _str_extract_frame(arr._orig, pat, flags=flags) else: - result, name = _str_extract_noexpand(arr._data, pat, flags=flags) + result, name = _str_extract_noexpand(arr._parent, pat, flags=flags) return arr._wrap_result(result, name=name, expand=expand) @@ -1721,7 +1721,7 @@ def str_encode(arr, encoding, errors="strict"): def _noarg_wrapper(f, docstring=None, **kargs): def wrapper(self): - result = _na_map(f, self._data, **kargs) + result = _na_map(f, self._parent, **kargs) return self._wrap_result(result) wrapper.__name__ = f.__name__ @@ -1735,15 +1735,15 @@ def wrapper(self): def _pat_wrapper(f, flags=False, na=False, **kwargs): def wrapper1(self, pat): - result = f(self._data, pat) + result = f(self._parent, pat) return self._wrap_result(result) def wrapper2(self, pat, flags=0, **kwargs): - result = f(self._data, pat, flags=flags, **kwargs) + result = f(self._parent, pat, flags=flags, **kwargs) return self._wrap_result(result) def wrapper3(self, pat, na=np.nan): - result = f(self._data, pat, na=na) + result = f(self._parent, pat, na=na) return self._wrap_result(result) wrapper = wrapper3 if na else wrapper2 if flags else wrapper1 @@ -1783,7 +1783,7 @@ def __init__(self, data): self._is_categorical = is_categorical_dtype(data) # .values.categories works for both Series/Index - self._data = data.values.categories if self._is_categorical else data + self._parent = data.values.categories if self._is_categorical else data # save orig to blow up categoricals to the right type self._orig = data self._freeze() @@ -2334,14 +2334,14 @@ def cat(self, others=None, sep=None, na_rep=None, join=None): 'side': 'beginning', 'method': 'split'}) def split(self, pat=None, n=-1, expand=False): - result = str_split(self._data, pat, n=n) + result = str_split(self._parent, pat, n=n) return self._wrap_result(result, expand=expand) @Appender(_shared_docs['str_split'] % { 'side': 'end', 'method': 'rsplit'}) def rsplit(self, pat=None, n=-1, expand=False): - result = str_rsplit(self._data, pat, n=n) + result = str_rsplit(self._parent, pat, n=n) return self._wrap_result(result, expand=expand) _shared_docs['str_partition'] = (""" @@ -2432,7 +2432,7 @@ def rsplit(self, pat=None, n=-1, expand=False): }) def partition(self, pat=' ', expand=True): f = lambda x: x.partition(pat) - result = _na_map(f, self._data) + result = _na_map(f, self._parent) return self._wrap_result(result, expand=expand) @Appender(_shared_docs['str_partition'] % { @@ -2443,45 +2443,45 @@ def partition(self, pat=' ', expand=True): }) def rpartition(self, pat=' ', expand=True): f = lambda x: x.rpartition(pat) - result = _na_map(f, self._data) + result = _na_map(f, self._parent) return self._wrap_result(result, expand=expand) @copy(str_get) def get(self, i): - result = str_get(self._data, i) + result = str_get(self._parent, i) return self._wrap_result(result) @copy(str_join) def join(self, sep): - result = str_join(self._data, sep) + result = str_join(self._parent, sep) return self._wrap_result(result) @copy(str_contains) def contains(self, pat, case=True, flags=0, na=np.nan, regex=True): - result = str_contains(self._data, pat, case=case, flags=flags, na=na, + result = str_contains(self._parent, pat, case=case, flags=flags, na=na, regex=regex) return self._wrap_result(result) @copy(str_match) def match(self, pat, case=True, flags=0, na=np.nan, as_indexer=None): - result = str_match(self._data, pat, case=case, flags=flags, na=na, + result = str_match(self._parent, pat, case=case, flags=flags, na=na, as_indexer=as_indexer) return self._wrap_result(result) @copy(str_replace) def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True): - result = str_replace(self._data, pat, repl, n=n, case=case, + result = str_replace(self._parent, pat, repl, n=n, case=case, flags=flags, regex=regex) return self._wrap_result(result) @copy(str_repeat) def repeat(self, repeats): - result = str_repeat(self._data, repeats) + result = str_repeat(self._parent, repeats) return self._wrap_result(result) @copy(str_pad) def pad(self, width, side='left', fillchar=' '): - result = str_pad(self._data, width, side=side, fillchar=fillchar) + result = str_pad(self._parent, width, side=side, fillchar=fillchar) return self._wrap_result(result) _shared_docs['str_pad'] = (""" @@ -2574,27 +2574,27 @@ def zfill(self, width): 4 NaN dtype: object """ - result = str_pad(self._data, width, side='left', fillchar='0') + result = str_pad(self._parent, width, side='left', fillchar='0') return self._wrap_result(result) @copy(str_slice) def slice(self, start=None, stop=None, step=None): - result = str_slice(self._data, start, stop, step) + result = str_slice(self._parent, start, stop, step) return self._wrap_result(result) @copy(str_slice_replace) def slice_replace(self, start=None, stop=None, repl=None): - result = str_slice_replace(self._data, start, stop, repl) + result = str_slice_replace(self._parent, start, stop, repl) return self._wrap_result(result) @copy(str_decode) def decode(self, encoding, errors="strict"): - result = str_decode(self._data, encoding, errors) + result = str_decode(self._parent, encoding, errors) return self._wrap_result(result) @copy(str_encode) def encode(self, encoding, errors="strict"): - result = str_encode(self._data, encoding, errors) + result = str_encode(self._parent, encoding, errors) return self._wrap_result(result) _shared_docs['str_strip'] = (r""" @@ -2663,38 +2663,38 @@ def encode(self, encoding, errors="strict"): @Appender(_shared_docs['str_strip'] % dict(side='left and right sides', method='strip')) def strip(self, to_strip=None): - result = str_strip(self._data, to_strip, side='both') + result = str_strip(self._parent, to_strip, side='both') return self._wrap_result(result) @Appender(_shared_docs['str_strip'] % dict(side='left side', method='lstrip')) def lstrip(self, to_strip=None): - result = str_strip(self._data, to_strip, side='left') + result = str_strip(self._parent, to_strip, side='left') return self._wrap_result(result) @Appender(_shared_docs['str_strip'] % dict(side='right side', method='rstrip')) def rstrip(self, to_strip=None): - result = str_strip(self._data, to_strip, side='right') + result = str_strip(self._parent, to_strip, side='right') return self._wrap_result(result) @copy(str_wrap) def wrap(self, width, **kwargs): - result = str_wrap(self._data, width, **kwargs) + result = str_wrap(self._parent, width, **kwargs) return self._wrap_result(result) @copy(str_get_dummies) def get_dummies(self, sep='|'): # we need to cast to Series of strings as only that has all # methods available for making the dummies... - data = self._orig.astype(str) if self._is_categorical else self._data + data = self._orig.astype(str) if self._is_categorical else self._parent result, name = str_get_dummies(data, sep) return self._wrap_result(result, use_codes=(not self._is_categorical), name=name, expand=True) @copy(str_translate) def translate(self, table, deletechars=None): - result = str_translate(self._data, table, deletechars) + result = str_translate(self._parent, table, deletechars) return self._wrap_result(result) count = _pat_wrapper(str_count, flags=True) @@ -2737,14 +2737,15 @@ def extractall(self, pat, flags=0): dict(side='lowest', method='find', also='rfind : Return highest indexes in each strings')) def find(self, sub, start=0, end=None): - result = str_find(self._data, sub, start=start, end=end, side='left') + result = str_find(self._parent, sub, start=start, end=end, side='left') return self._wrap_result(result) @Appender(_shared_docs['find'] % dict(side='highest', method='rfind', also='find : Return lowest indexes in each strings')) def rfind(self, sub, start=0, end=None): - result = str_find(self._data, sub, start=start, end=end, side='right') + result = str_find(self._parent, sub, + start=start, end=end, side='right') return self._wrap_result(result) def normalize(self, form): @@ -2763,7 +2764,7 @@ def normalize(self, form): """ import unicodedata f = lambda x: unicodedata.normalize(form, compat.u_safe(x)) - result = _na_map(f, self._data) + result = _na_map(f, self._parent) return self._wrap_result(result) _shared_docs['index'] = (""" @@ -2794,14 +2795,16 @@ def normalize(self, form): dict(side='lowest', similar='find', method='index', also='rindex : Return highest indexes in each strings')) def index(self, sub, start=0, end=None): - result = str_index(self._data, sub, start=start, end=end, side='left') + result = str_index(self._parent, sub, + start=start, end=end, side='left') return self._wrap_result(result) @Appender(_shared_docs['index'] % dict(side='highest', similar='rfind', method='rindex', also='index : Return lowest indexes in each strings')) def rindex(self, sub, start=0, end=None): - result = str_index(self._data, sub, start=start, end=end, side='right') + result = str_index(self._parent, sub, + start=start, end=end, side='right') return self._wrap_result(result) _shared_docs['len'] = (""" diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 7ce4c23f81ad6..e81b162645b94 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2727,7 +2727,7 @@ def _grouped_plot_by_column(plotf, data, columns=None, by=None, class BasePlotMethods(PandasObject): def __init__(self, data): - self._data = data + self._parent = data # can be Series or DataFrame def __call__(self, *args, **kwargs): raise NotImplementedError @@ -2755,7 +2755,7 @@ def __call__(self, kind='line', ax=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, label=None, secondary_y=False, **kwds): - return plot_series(self._data, kind=kind, ax=ax, figsize=figsize, + return plot_series(self._parent, kind=kind, ax=ax, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, style=style, logx=logx, logy=logy, loglog=loglog, xticks=xticks, yticks=yticks, @@ -2954,7 +2954,7 @@ def __call__(self, x=None, y=None, kind='line', ax=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, secondary_y=False, sort_columns=False, **kwds): - return plot_frame(self._data, kind=kind, x=x, y=y, ax=ax, + return plot_frame(self._parent, kind=kind, x=x, y=y, ax=ax, subplots=subplots, sharex=sharex, sharey=sharey, layout=layout, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, style=style,
The idea is to reduce the number of distinct meanings `._data` has. With this it is down to just `Index._data` and `NDFrame._data`, I think.
https://api.github.com/repos/pandas-dev/pandas/pulls/21906
2018-07-13T22:41:36Z
2018-08-08T10:52:34Z
2018-08-08T10:52:34Z
2018-08-08T15:50:33Z
BUG: issues with hash-function for Float64HashTable (GH21866)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 8fe3023e9537c..b723e9cc6dca8 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -471,6 +471,7 @@ Numeric - Bug in :class:`Series` ``__rmatmul__`` doesn't support matrix vector multiplication (:issue:`21530`) - Bug in :func:`factorize` fails with read-only array (:issue:`12813`) +- Fixed bug in :func:`unique` handled signed zeros inconsistently: for some inputs 0.0 and -0.0 were treated as equal and for some inputs as different. Now they are treated as equal for all inputs (:issue:`21866`) - - diff --git a/pandas/_libs/src/klib/khash_python.h b/pandas/_libs/src/klib/khash_python.h index dd75ae5ec7e28..e9fb49e8a5e42 100644 --- a/pandas/_libs/src/klib/khash_python.h +++ b/pandas/_libs/src/klib/khash_python.h @@ -19,7 +19,20 @@ khint64_t PANDAS_INLINE asint64(double key) { memcpy(&val, &key, sizeof(double)); return val; } -#define kh_float64_hash_func(key) (khint32_t)((asint64(key))>>33^(asint64(key))^(asint64(key))<<11) + +// correct for all inputs but not -0.0 and NaNs +#define kh_float64_hash_func_0_NAN(key) (khint32_t)((asint64(key))>>33^(asint64(key))^(asint64(key))<<11) + +// correct for all inputs but not NaNs +#define kh_float64_hash_func_NAN(key) ((key) == 0.0 ? \ + kh_float64_hash_func_0_NAN(0.0) : \ + kh_float64_hash_func_0_NAN(key)) + +// correct for all +#define kh_float64_hash_func(key) ((key) != (key) ? \ + kh_float64_hash_func_NAN(Py_NAN) : \ + kh_float64_hash_func_NAN(key)) + #define kh_float64_hash_equal(a, b) ((a) == (b) || ((b) != (b) && (a) != (a))) #define KHASH_MAP_INIT_FLOAT64(name, khval_t) \ diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 25e64aa82cc36..3e754355bcb26 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -7,6 +7,7 @@ from numpy import nan from datetime import datetime from itertools import permutations +import struct from pandas import (Series, Categorical, CategoricalIndex, Timestamp, DatetimeIndex, Index, IntervalIndex) import pandas as pd @@ -500,6 +501,25 @@ def test_obj_none_preservation(self): tm.assert_numpy_array_equal(result, expected, strict_nan=True) + def test_signed_zero(self): + # GH 21866 + a = np.array([-0.0, 0.0]) + result = pd.unique(a) + expected = np.array([-0.0]) # 0.0 and -0.0 are equivalent + tm.assert_numpy_array_equal(result, expected) + + def test_different_nans(self): + # GH 21866 + # create different nans from bit-patterns: + NAN1 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000000))[0] + NAN2 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000001))[0] + assert NAN1 != NAN1 + assert NAN2 != NAN2 + a = np.array([NAN1, NAN2]) # NAN1 and NAN2 are equivalent + result = pd.unique(a) + expected = np.array([np.nan]) + tm.assert_numpy_array_equal(result, expected) + class TestIsin(object): @@ -1087,6 +1107,31 @@ def test_lookup_nan(self, writable): tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.int64)) + def test_add_signed_zeros(self): + # GH 21866 inconsistent hash-function for float64 + # default hash-function would lead to different hash-buckets + # for 0.0 and -0.0 if there are more than 2^30 hash-buckets + # but this would mean 16GB + N = 4 # 12 * 10**8 would trigger the error, if you have enough memory + m = ht.Float64HashTable(N) + m.set_item(0.0, 0) + m.set_item(-0.0, 0) + assert len(m) == 1 # 0.0 and -0.0 are equivalent + + def test_add_different_nans(self): + # GH 21866 inconsistent hash-function for float64 + # create different nans from bit-patterns: + NAN1 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000000))[0] + NAN2 = struct.unpack("d", struct.pack("=Q", 0x7ff8000000000001))[0] + assert NAN1 != NAN1 + assert NAN2 != NAN2 + # default hash function would lead to different hash-buckets + # for NAN1 and NAN2 even if there are only 4 buckets: + m = ht.Float64HashTable() + m.set_item(NAN1, 0) + m.set_item(NAN2, 0) + assert len(m) == 1 # NAN1 and NAN2 are equivalent + def test_lookup_overflow(self, writable): xs = np.array([1, 2, 2**63], dtype=np.uint64) # GH 21688 ensure we can deal with readonly memory views
The following issues 1) hash(0.0) != hash(-0.0) 2) hash(x) != hash(y) for different x,y which are nans are solved by setting: 1) hash(-0.0):=hash(0.0) 2) hash(x):=hash(np.nan) for every x which is nan - [x] closes #21866 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21904
2018-07-13T22:09:40Z
2018-07-25T19:13:29Z
2018-07-25T19:13:29Z
2018-08-09T19:34:37Z
[REF] implement internals as dir
diff --git a/pandas/core/internals.py b/pandas/core/internals/__init__.py similarity index 99% rename from pandas/core/internals.py rename to pandas/core/internals/__init__.py index 5a87a8368dc88..fde3aaa14ac5d 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals/__init__.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- import warnings import copy from warnings import catch_warnings diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 7fbf7ec05e91e..39418fb72bf4a 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -13,11 +13,12 @@ from pandas import (Index, MultiIndex, DataFrame, DatetimeIndex, Series, Categorical, TimedeltaIndex, SparseArray) from pandas.compat import OrderedDict, lrange -from pandas.core.internals import (BlockPlacement, SingleBlockManager, +from pandas.core.internals import (SingleBlockManager, make_block, BlockManager) import pandas.core.algorithms as algos import pandas.util.testing as tm import pandas as pd +from pandas._libs.internals import BlockPlacement from pandas.util.testing import (assert_almost_equal, assert_frame_equal, randn, assert_series_equal) from pandas.compat import zip, u
In the name of a) cleaning up internals and b) isolating BlockManager from everything else, this separates core.internals into `internals.managers`, `internals.blocks`, `internals.concat`.
https://api.github.com/repos/pandas-dev/pandas/pulls/21903
2018-07-13T22:06:01Z
2018-07-21T17:32:23Z
2018-07-21T17:32:23Z
2018-07-23T11:33:29Z
TST/CLN: correctly skip in indexes/common; add test for duplicated
diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index bb82d5578481b..56f59851d6d04 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -35,10 +35,6 @@ def verify_pickle(self, indices): assert indices.equals(unpickled) def test_pickle_compat_construction(self): - # this is testing for pickle compat - if self._holder is None: - return - # need an object to create with pytest.raises(TypeError, self._holder) @@ -236,7 +232,7 @@ def test_set_name_methods(self, indices): # don't tests a MultiIndex here (as its tested separated) if isinstance(indices, MultiIndex): - return + pytest.skip('Skip check for MultiIndex') original_name = indices.name new_ind = indices.set_names([new_name]) assert new_ind.name == new_name @@ -333,7 +329,8 @@ def test_copy_and_deepcopy(self, indices): from copy import copy, deepcopy if isinstance(indices, MultiIndex): - return + pytest.skip('Skip check for MultiIndex') + for func in (copy, deepcopy): idx_copy = func(indices) assert idx_copy is not indices @@ -342,20 +339,50 @@ def test_copy_and_deepcopy(self, indices): new_copy = indices.copy(deep=True, name="banana") assert new_copy.name == "banana" - def test_duplicates(self, indices): + def test_has_duplicates(self, indices): if type(indices) is not self._holder: - return + pytest.skip('Can only check if we have the correct type') if not len(indices) or isinstance(indices, MultiIndex): - return + # MultiIndex tested separately in: + # tests/indexes/multi/test_unique_and_duplicates + pytest.skip('Skip check for empty Index and MultiIndex') + idx = self._holder([indices[0]] * 5) assert not idx.is_unique assert idx.has_duplicates + @pytest.mark.parametrize('keep', ['first', 'last', False]) + def test_duplicated(self, indices, keep): + if type(indices) is not self._holder: + pytest.skip('Can only check if we know the index type') + if not len(indices) or isinstance(indices, MultiIndex): + # MultiIndex tested separately in: + # tests/indexes/multi/test_unique_and_duplicates + pytest.skip('Skip check for empty Index and MultiIndex') + + idx = self._holder(indices) + if idx.has_duplicates: + # We are testing the duplicated-method here, so we need to know + # exactly which indices are duplicate and how (for the result). + # This is not possible if "idx" has duplicates already, which we + # therefore remove. This is seemingly circular, as drop_duplicates + # invokes duplicated, but in the end, it all works out because we + # cross-check with Series.duplicated, which is tested separately. + idx = idx.drop_duplicates() + + n, k = len(idx), 10 + duplicated_selection = np.random.choice(n, k * n) + expected = pd.Series(duplicated_selection).duplicated(keep=keep).values + idx = self._holder(idx.values[duplicated_selection]) + + result = idx.duplicated(keep=keep) + tm.assert_numpy_array_equal(result, expected) + def test_unique(self, indices): # don't test a MultiIndex here (as its tested separated) # don't test a CategoricalIndex because categories change (GH 18291) if isinstance(indices, (MultiIndex, CategoricalIndex)): - return + pytest.skip('Skip check for MultiIndex/CategoricalIndex') # GH 17896 expected = indices.drop_duplicates() @@ -375,7 +402,7 @@ def test_unique_na(self): def test_get_unique_index(self, indices): # MultiIndex tested separately if not len(indices) or isinstance(indices, MultiIndex): - return + pytest.skip('Skip check for empty Index and MultiIndex') idx = indices[[0] * 5] idx_unique = indices[[0]] @@ -394,7 +421,7 @@ def test_get_unique_index(self, indices): # nans: if not indices._can_hold_na: - return + pytest.skip('Skip na-check if index cannot hold na') if needs_i8_conversion(indices): vals = indices.asi8[[0] * 5] @@ -423,7 +450,7 @@ def test_sort(self, indices): def test_mutability(self, indices): if not len(indices): - return + pytest.skip('Skip check for empty Index') pytest.raises(TypeError, indices.__setitem__, 0, indices[0]) def test_view(self, indices): @@ -761,7 +788,7 @@ def test_equals_op(self): # GH9947, GH10637 index_a = self.create_index() if isinstance(index_a, PeriodIndex): - return + pytest.skip('Skip check for PeriodIndex') n = len(index_a) index_b = index_a[0:-1] @@ -989,11 +1016,11 @@ def test_searchsorted_monotonic(self, indices): # not implemented for tuple searches in MultiIndex # or Intervals searches in IntervalIndex if isinstance(indices, (MultiIndex, IntervalIndex)): - return + pytest.skip('Skip check for MultiIndex/IntervalIndex') # nothing to test if the index is empty if indices.empty: - return + pytest.skip('Skip check for empty Index') value = indices[0] # determine the expected results (handle dupes for 'right') diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index a2a4170256088..2221fd023b561 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -590,12 +590,15 @@ def test_is_unique(self, values, expected): ci = CategoricalIndex(values) assert ci.is_unique is expected - def test_duplicates(self): + def test_has_duplicates(self): idx = CategoricalIndex([0, 0, 0], name='foo') assert not idx.is_unique assert idx.has_duplicates + def test_drop_duplicates(self): + + idx = CategoricalIndex([0, 0, 0], name='foo') expected = CategoricalIndex([0], name='foo') tm.assert_index_equal(idx.drop_duplicates(), expected) tm.assert_index_equal(idx.unique(), expected) diff --git a/pandas/tests/indexes/test_range.py b/pandas/tests/indexes/test_range.py index 38f4b341116b8..2a9efd92df8a3 100644 --- a/pandas/tests/indexes/test_range.py +++ b/pandas/tests/indexes/test_range.py @@ -806,7 +806,7 @@ def test_explicit_conversions(self): result = a - fidx tm.assert_index_equal(result, expected) - def test_duplicates(self): + def test_has_duplicates(self): for ind in self.indices: if not len(ind): continue
Splitting up #21645 * Added tests for `duplicated` * Following https://github.com/pandas-dev/pandas/pull/21645#discussion_r202192191, turned several blank `return` statements (which falsely pass the test) into `pytest.skip`.
https://api.github.com/repos/pandas-dev/pandas/pulls/21902
2018-07-13T21:20:41Z
2018-08-10T10:37:22Z
2018-08-10T10:37:21Z
2018-08-10T17:17:00Z
TST/CLN: clean up indexes/multi/test_unique_and_duplicates
diff --git a/pandas/tests/indexes/multi/conftest.py b/pandas/tests/indexes/multi/conftest.py index 6cf9003500b61..afe651d22c6a7 100644 --- a/pandas/tests/indexes/multi/conftest.py +++ b/pandas/tests/indexes/multi/conftest.py @@ -15,13 +15,25 @@ def idx(): major_labels = np.array([0, 0, 1, 2, 3, 3]) minor_labels = np.array([0, 1, 0, 1, 0, 1]) index_names = ['first', 'second'] - index = MultiIndex( - levels=[major_axis, minor_axis], - labels=[major_labels, minor_labels], - names=index_names, - verify_integrity=False - ) - return index + mi = MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels], + names=index_names, verify_integrity=False) + return mi + + +@pytest.fixture +def idx_dup(): + # compare tests/indexes/multi/conftest.py + major_axis = Index(['foo', 'bar', 'baz', 'qux']) + minor_axis = Index(['one', 'two']) + + major_labels = np.array([0, 0, 1, 0, 1, 1]) + minor_labels = np.array([0, 1, 0, 1, 0, 1]) + index_names = ['first', 'second'] + mi = MultiIndex(levels=[major_axis, minor_axis], + labels=[major_labels, minor_labels], + names=index_names, verify_integrity=False) + return mi @pytest.fixture diff --git a/pandas/tests/indexes/multi/test_unique_and_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py similarity index 58% rename from pandas/tests/indexes/multi/test_unique_and_duplicates.py rename to pandas/tests/indexes/multi/test_duplicates.py index c1000e5b6e0f6..1cdf0ca6e013e 100644 --- a/pandas/tests/indexes/multi/test_unique_and_duplicates.py +++ b/pandas/tests/indexes/multi/test_duplicates.py @@ -2,56 +2,54 @@ import warnings from itertools import product +import pytest import numpy as np -import pandas as pd -import pandas.util.testing as tm -import pytest -from pandas import MultiIndex + from pandas.compat import range, u +from pandas import MultiIndex, DatetimeIndex +from pandas._libs import hashtable +import pandas.util.testing as tm @pytest.mark.parametrize('names', [None, ['first', 'second']]) def test_unique(names): - mi = pd.MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], - names=names) + mi = MultiIndex.from_arrays([[1, 2, 1, 2], [1, 1, 1, 2]], names=names) res = mi.unique() - exp = pd.MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names) + exp = MultiIndex.from_arrays([[1, 2, 2], [1, 1, 2]], names=mi.names) tm.assert_index_equal(res, exp) - mi = pd.MultiIndex.from_arrays([list('aaaa'), list('abab')], - names=names) + mi = MultiIndex.from_arrays([list('aaaa'), list('abab')], + names=names) res = mi.unique() - exp = pd.MultiIndex.from_arrays([list('aa'), list('ab')], - names=mi.names) + exp = MultiIndex.from_arrays([list('aa'), list('ab')], names=mi.names) tm.assert_index_equal(res, exp) - mi = pd.MultiIndex.from_arrays([list('aaaa'), list('aaaa')], - names=names) + mi = MultiIndex.from_arrays([list('aaaa'), list('aaaa')], names=names) res = mi.unique() - exp = pd.MultiIndex.from_arrays([['a'], ['a']], names=mi.names) + exp = MultiIndex.from_arrays([['a'], ['a']], names=mi.names) tm.assert_index_equal(res, exp) # GH #20568 - empty MI - mi = pd.MultiIndex.from_arrays([[], []], names=names) + mi = MultiIndex.from_arrays([[], []], names=names) res = mi.unique() tm.assert_index_equal(mi, res) def test_unique_datetimelike(): - idx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01', - '2015-01-01', 'NaT', 'NaT']) - idx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02', - '2015-01-02', 'NaT', '2015-01-01'], - tz='Asia/Tokyo') - result = pd.MultiIndex.from_arrays([idx1, idx2]).unique() - - eidx1 = pd.DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT']) - eidx2 = pd.DatetimeIndex(['2015-01-01', '2015-01-02', - 'NaT', '2015-01-01'], - tz='Asia/Tokyo') - exp = pd.MultiIndex.from_arrays([eidx1, eidx2]) + idx1 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-01', + '2015-01-01', 'NaT', 'NaT']) + idx2 = DatetimeIndex(['2015-01-01', '2015-01-01', '2015-01-02', + '2015-01-02', 'NaT', '2015-01-01'], + tz='Asia/Tokyo') + result = MultiIndex.from_arrays([idx1, idx2]).unique() + + eidx1 = DatetimeIndex(['2015-01-01', '2015-01-01', 'NaT', 'NaT']) + eidx2 = DatetimeIndex(['2015-01-01', '2015-01-02', + 'NaT', '2015-01-01'], + tz='Asia/Tokyo') + exp = MultiIndex.from_arrays([eidx1, eidx2]) tm.assert_index_equal(result, exp) @@ -63,41 +61,51 @@ def test_unique_level(idx, level): tm.assert_index_equal(result, expected) # With already unique level - mi = pd.MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]], - names=['first', 'second']) + mi = MultiIndex.from_arrays([[1, 3, 2, 4], [1, 3, 2, 5]], + names=['first', 'second']) result = mi.unique(level=level) expected = mi.get_level_values(level) tm.assert_index_equal(result, expected) # With empty MI - mi = pd.MultiIndex.from_arrays([[], []], names=['first', 'second']) + mi = MultiIndex.from_arrays([[], []], names=['first', 'second']) result = mi.unique(level=level) expected = mi.get_level_values(level) +@pytest.mark.parametrize('dropna', [True, False]) +def test_get_unique_index(idx, dropna): + mi = idx[[0, 1, 0, 1, 1, 0, 0]] + expected = mi._shallow_copy(mi[[0, 1]]) + + result = mi._get_unique_index(dropna=dropna) + assert result.unique + tm.assert_index_equal(result, expected) + + def test_duplicate_multiindex_labels(): # GH 17464 # Make sure that a MultiIndex with duplicate levels throws a ValueError with pytest.raises(ValueError): - ind = pd.MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)]) + mi = MultiIndex([['A'] * 10, range(10)], [[0] * 10, range(10)]) # And that using set_levels with duplicate levels fails - ind = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'], - [1, 2, 1, 2, 3]]) + mi = MultiIndex.from_arrays([['A', 'A', 'B', 'B', 'B'], + [1, 2, 1, 2, 3]]) with pytest.raises(ValueError): - ind.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]], - inplace=True) + mi.set_levels([['A', 'B', 'A', 'A', 'B'], [2, 1, 3, -2, 5]], + inplace=True) @pytest.mark.parametrize('names', [['a', 'b', 'a'], [1, 1, 2], [1, 'a', 1]]) def test_duplicate_level_names(names): # GH18872, GH19029 - mi = pd.MultiIndex.from_product([[0, 1]] * 3, names=names) + mi = MultiIndex.from_product([[0, 1]] * 3, names=names) assert mi.names == names # With .rename() - mi = pd.MultiIndex.from_product([[0, 1]] * 3) + mi = MultiIndex.from_product([[0, 1]] * 3) mi = mi.rename(names) assert mi.names == names @@ -109,27 +117,34 @@ def test_duplicate_level_names(names): def test_duplicate_meta_data(): # GH 10115 - index = MultiIndex( + mi = MultiIndex( levels=[[0, 1], [0, 1, 2]], labels=[[0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]]) - for idx in [index, - index.set_names([None, None]), - index.set_names([None, 'Num']), - index.set_names(['Upper', 'Num']), ]: + for idx in [mi, + mi.set_names([None, None]), + mi.set_names([None, 'Num']), + mi.set_names(['Upper', 'Num']), ]: assert idx.has_duplicates assert idx.drop_duplicates().names == idx.names -def test_duplicates(idx): +def test_has_duplicates(idx, idx_dup): + # see fixtures + assert idx.is_unique assert not idx.has_duplicates - assert idx.append(idx).has_duplicates + assert not idx_dup.is_unique + assert idx_dup.has_duplicates - index = MultiIndex(levels=[[0, 1], [0, 1, 2]], labels=[ - [0, 0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 0, 1, 2]]) - assert index.has_duplicates + mi = MultiIndex(levels=[[0, 1], [0, 1, 2]], + labels=[[0, 0, 0, 0, 1, 1, 1], + [0, 1, 2, 0, 0, 1, 2]]) + assert not mi.is_unique + assert mi.has_duplicates + +def test_has_duplicates_from_tuples(): # GH 9075 t = [(u('x'), u('out'), u('z'), 5, u('y'), u('in'), u('z'), 169), (u('x'), u('out'), u('z'), 7, u('y'), u('in'), u('z'), 119), @@ -150,9 +165,11 @@ def test_duplicates(idx): (u('x'), u('out'), u('z'), 33, u('y'), u('in'), u('z'), 123), (u('x'), u('out'), u('z'), 12, u('y'), u('in'), u('z'), 144)] - index = pd.MultiIndex.from_tuples(t) - assert not index.has_duplicates + mi = MultiIndex.from_tuples(t) + assert not mi.has_duplicates + +def test_has_duplicates_overflow(): # handle int64 overflow if possible def check(nlevels, with_nulls): labels = np.tile(np.arange(500), 2) @@ -171,20 +188,20 @@ def check(nlevels, with_nulls): levels = [level] * nlevels + [[0, 1]] # no dups - index = MultiIndex(levels=levels, labels=labels) - assert not index.has_duplicates + mi = MultiIndex(levels=levels, labels=labels) + assert not mi.has_duplicates # with a dup if with_nulls: def f(a): return np.insert(a, 1000, a[0]) labels = list(map(f, labels)) - index = MultiIndex(levels=levels, labels=labels) + mi = MultiIndex(levels=levels, labels=labels) else: - values = index.values.tolist() - index = MultiIndex.from_tuples(values + [values[0]]) + values = mi.values.tolist() + mi = MultiIndex.from_tuples(values + [values[0]]) - assert index.has_duplicates + assert mi.has_duplicates # no overflow check(4, False) @@ -194,17 +211,31 @@ def f(a): check(8, False) check(8, True) + +@pytest.mark.parametrize('keep, expected', [ + ('first', np.array([False, False, False, True, True, False])), + ('last', np.array([False, True, True, False, False, False])), + (False, np.array([False, True, True, True, True, False])) +]) +def test_duplicated(idx_dup, keep, expected): + result = idx_dup.duplicated(keep=keep) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize('keep', ['first', 'last', False]) +def test_duplicated_large(keep): # GH 9125 n, k = 200, 5000 levels = [np.arange(n), tm.makeStringIndex(n), 1000 + np.arange(n)] labels = [np.random.choice(n, k * n) for lev in levels] mi = MultiIndex(levels=levels, labels=labels) - for keep in ['first', 'last', False]: - left = mi.duplicated(keep=keep) - right = pd._libs.hashtable.duplicated_object(mi.values, keep=keep) - tm.assert_numpy_array_equal(left, right) + result = mi.duplicated(keep=keep) + expected = hashtable.duplicated_object(mi.values, keep=keep) + tm.assert_numpy_array_equal(result, expected) + +def test_get_duplicates(): # GH5873 for a in [101, 102]: mi = MultiIndex.from_arrays([[101, a], [3.5, np.nan]]) @@ -212,11 +243,10 @@ def f(a): with warnings.catch_warnings(record=True): # Deprecated - see GH20239 - assert mi.get_duplicates().equals(MultiIndex.from_arrays( - [[], []])) + assert mi.get_duplicates().equals(MultiIndex.from_arrays([[], []])) - tm.assert_numpy_array_equal(mi.duplicated(), np.zeros( - 2, dtype='bool')) + tm.assert_numpy_array_equal(mi.duplicated(), + np.zeros(2, dtype='bool')) for n in range(1, 6): # 1st level shape for m in range(1, 5): # 2nd level shape @@ -232,28 +262,5 @@ def f(a): assert mi.get_duplicates().equals(MultiIndex.from_arrays( [[], []])) - tm.assert_numpy_array_equal(mi.duplicated(), np.zeros( - len(mi), dtype='bool')) - - -def test_get_unique_index(idx): - idx = idx[[0, 1, 0, 1, 1, 0, 0]] - expected = idx._shallow_copy(idx[[0, 1]]) - - for dropna in [False, True]: - result = idx._get_unique_index(dropna=dropna) - assert result.unique - tm.assert_index_equal(result, expected) - - -def test_unique_na(): - idx = pd.Index([2, np.nan, 2, 1], name='my_index') - expected = pd.Index([2, np.nan, 1], name='my_index') - result = idx.unique() - tm.assert_index_equal(result, expected) - - -def test_duplicate_level_names_access_raises(idx): - idx.names = ['foo', 'foo'] - tm.assert_raises_regex(ValueError, 'name foo occurs multiple times', - idx._get_level_number, 'foo') + tm.assert_numpy_array_equal(mi.duplicated(), + np.zeros(len(mi), dtype='bool')) diff --git a/pandas/tests/indexes/multi/test_names.py b/pandas/tests/indexes/multi/test_names.py index a9fbb55679173..68e8bb0cf58f2 100644 --- a/pandas/tests/indexes/multi/test_names.py +++ b/pandas/tests/indexes/multi/test_names.py @@ -115,3 +115,10 @@ def test_names(idx, index_names): ind_names = list(index.names) level_names = [level.name for level in index.levels] assert ind_names == level_names + + +def test_duplicate_level_names_access_raises(idx): + # GH19029 + idx.names = ['foo', 'foo'] + tm.assert_raises_regex(ValueError, 'name foo occurs multiple times', + idx._get_level_number, 'foo')
Splitting up #21645 * Added tests for `duplicated`, including a fixture for a `MultiIndex` with duplicates * Broke up a huge test (`test_duplicates`) into smaller chunks * removed a test (`test_unique_na`) that was for an `Index` (not `MultiIndex`), and exists verbatim in `tests/indexes/common.py` * moved `test_duplicate_level_names_access_raises` to its appropriate module
https://api.github.com/repos/pandas-dev/pandas/pulls/21900
2018-07-13T21:14:16Z
2018-07-16T10:57:07Z
2018-07-16T10:57:07Z
2018-07-17T07:18:59Z
TST/CLN: series.duplicated; parametrisation; fix warning
diff --git a/pandas/conftest.py b/pandas/conftest.py index c1376670ffbf0..a979c3fc3bfac 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -248,7 +248,19 @@ def tz_aware_fixture(request): return request.param -@pytest.fixture(params=[str, 'str', 'U']) +UNSIGNED_INT_DTYPES = ["uint8", "uint16", "uint32", "uint64"] +SIGNED_INT_DTYPES = [int, "int8", "int16", "int32", "int64"] +ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES + +FLOAT_DTYPES = [float, "float32", "float64"] +COMPLEX_DTYPES = [complex, "complex64", "complex128"] +STRING_DTYPES = [str, 'str', 'U'] + +ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES +ALL_NUMPY_DTYPES = ALL_REAL_DTYPES + COMPLEX_DTYPES + STRING_DTYPES + + +@pytest.fixture(params=STRING_DTYPES) def string_dtype(request): """Parametrized fixture for string dtypes. @@ -259,9 +271,6 @@ def string_dtype(request): return request.param -FLOAT_DTYPES = [float, "float32", "float64"] - - @pytest.fixture(params=FLOAT_DTYPES) def float_dtype(request): """ @@ -274,7 +283,7 @@ def float_dtype(request): return request.param -@pytest.fixture(params=[complex, "complex64", "complex128"]) +@pytest.fixture(params=COMPLEX_DTYPES) def complex_dtype(request): """ Parameterized fixture for complex dtypes. @@ -286,12 +295,6 @@ def complex_dtype(request): return request.param -UNSIGNED_INT_DTYPES = ["uint8", "uint16", "uint32", "uint64"] -SIGNED_INT_DTYPES = [int, "int8", "int16", "int32", "int64"] -ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES -ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES - - @pytest.fixture(params=SIGNED_INT_DTYPES) def sint_dtype(request): """ @@ -358,6 +361,31 @@ def any_real_dtype(request): return request.param +@pytest.fixture(params=ALL_NUMPY_DTYPES) +def any_numpy_dtype(request): + """ + Parameterized fixture for all numpy dtypes. + + * int8 + * uint8 + * int16 + * uint16 + * int32 + * uint32 + * int64 + * uint64 + * float32 + * float64 + * complex64 + * complex128 + * str + * 'str' + * 'U' + """ + + return request.param + + @pytest.fixture def mock(): """ diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index fd14118bd833f..28a77bbb1d3fa 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -907,144 +907,6 @@ def test_matmul(self): pytest.raises(Exception, a.dot, a.values[:3]) pytest.raises(ValueError, a.dot, b.T) - def test_value_counts_nunique(self): - - # basics.rst doc example - series = Series(np.random.randn(500)) - series[20:500] = np.nan - series[10:20] = 5000 - result = series.nunique() - assert result == 11 - - # GH 18051 - s = pd.Series(pd.Categorical([])) - assert s.nunique() == 0 - s = pd.Series(pd.Categorical([np.nan])) - assert s.nunique() == 0 - - def test_unique(self): - - # 714 also, dtype=float - s = Series([1.2345] * 100) - s[::2] = np.nan - result = s.unique() - assert len(result) == 2 - - s = Series([1.2345] * 100, dtype='f4') - s[::2] = np.nan - result = s.unique() - assert len(result) == 2 - - # NAs in object arrays #714 - s = Series(['foo'] * 100, dtype='O') - s[::2] = np.nan - result = s.unique() - assert len(result) == 2 - - # decision about None - s = Series([1, 2, 3, None, None, None], dtype=object) - result = s.unique() - expected = np.array([1, 2, 3, None], dtype=object) - tm.assert_numpy_array_equal(result, expected) - - # GH 18051 - s = pd.Series(pd.Categorical([])) - tm.assert_categorical_equal(s.unique(), pd.Categorical([]), - check_dtype=False) - s = pd.Series(pd.Categorical([np.nan])) - tm.assert_categorical_equal(s.unique(), pd.Categorical([np.nan]), - check_dtype=False) - - @pytest.mark.parametrize( - "tc1, tc2", - [ - ( - Series([1, 2, 3, 3], dtype=np.dtype('int_')), - Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype('int_')) - ), - ( - Series([1, 2, 3, 3], dtype=np.dtype('uint')), - Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype('uint')) - ), - ( - Series([1, 2, 3, 3], dtype=np.dtype('float_')), - Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype('float_')) - ), - ( - Series([1, 2, 3, 3], dtype=np.dtype('unicode_')), - Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype('unicode_')) - ) - ] - ) - def test_drop_duplicates_non_bool(self, tc1, tc2): - # Test case 1 - expected = Series([False, False, False, True]) - assert_series_equal(tc1.duplicated(), expected) - assert_series_equal(tc1.drop_duplicates(), tc1[~expected]) - sc = tc1.copy() - sc.drop_duplicates(inplace=True) - assert_series_equal(sc, tc1[~expected]) - - expected = Series([False, False, True, False]) - assert_series_equal(tc1.duplicated(keep='last'), expected) - assert_series_equal(tc1.drop_duplicates(keep='last'), tc1[~expected]) - sc = tc1.copy() - sc.drop_duplicates(keep='last', inplace=True) - assert_series_equal(sc, tc1[~expected]) - - expected = Series([False, False, True, True]) - assert_series_equal(tc1.duplicated(keep=False), expected) - assert_series_equal(tc1.drop_duplicates(keep=False), tc1[~expected]) - sc = tc1.copy() - sc.drop_duplicates(keep=False, inplace=True) - assert_series_equal(sc, tc1[~expected]) - - # Test case 2 - expected = Series([False, False, False, False, True, True, False]) - assert_series_equal(tc2.duplicated(), expected) - assert_series_equal(tc2.drop_duplicates(), tc2[~expected]) - sc = tc2.copy() - sc.drop_duplicates(inplace=True) - assert_series_equal(sc, tc2[~expected]) - - expected = Series([False, True, True, False, False, False, False]) - assert_series_equal(tc2.duplicated(keep='last'), expected) - assert_series_equal(tc2.drop_duplicates(keep='last'), tc2[~expected]) - sc = tc2.copy() - sc.drop_duplicates(keep='last', inplace=True) - assert_series_equal(sc, tc2[~expected]) - - expected = Series([False, True, True, False, True, True, False]) - assert_series_equal(tc2.duplicated(keep=False), expected) - assert_series_equal(tc2.drop_duplicates(keep=False), tc2[~expected]) - sc = tc2.copy() - sc.drop_duplicates(keep=False, inplace=True) - assert_series_equal(sc, tc2[~expected]) - - def test_drop_duplicates_bool(self): - tc = Series([True, False, True, False]) - - expected = Series([False, False, True, True]) - assert_series_equal(tc.duplicated(), expected) - assert_series_equal(tc.drop_duplicates(), tc[~expected]) - sc = tc.copy() - sc.drop_duplicates(inplace=True) - assert_series_equal(sc, tc[~expected]) - - expected = Series([True, True, False, False]) - assert_series_equal(tc.duplicated(keep='last'), expected) - assert_series_equal(tc.drop_duplicates(keep='last'), tc[~expected]) - sc = tc.copy() - sc.drop_duplicates(keep='last', inplace=True) - assert_series_equal(sc, tc[~expected]) - - expected = Series([True, True, True, True]) - assert_series_equal(tc.duplicated(keep=False), expected) - assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected]) - sc = tc.copy() - sc.drop_duplicates(keep=False, inplace=True) - assert_series_equal(sc, tc[~expected]) - def test_clip(self): val = self.ts.median() @@ -1416,7 +1278,8 @@ def test_ptp(self): N = 1000 arr = np.random.randn(N) ser = Series(arr) - assert np.ptp(ser) == np.ptp(arr) + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + assert np.ptp(ser) == np.ptp(arr) # GH11163 s = Series([3, 5, np.nan, -3, 10]) @@ -1457,10 +1320,6 @@ def test_empty_timeseries_redections_return_nat(self): assert Series([], dtype=dtype).min() is pd.NaT assert Series([], dtype=dtype).max() is pd.NaT - def test_unique_data_ownership(self): - # it works! #1807 - Series(Series(["a", "c", "b"]).unique()).sort_values() - def test_repeat(self): s = Series(np.random.randn(3), index=['a', 'b', 'c']) @@ -1537,29 +1396,6 @@ def test_searchsorted_sorter(self): e = np.array([0, 2], dtype=np.intp) tm.assert_numpy_array_equal(r, e) - def test_is_unique(self): - # GH11946 - s = Series(np.random.randint(0, 10, size=1000)) - assert not s.is_unique - s = Series(np.arange(1000)) - assert s.is_unique - - def test_is_unique_class_ne(self, capsys): - # GH 20661 - class Foo(object): - def __init__(self, val): - self._value = val - - def __ne__(self, other): - raise Exception("NEQ not supported") - - li = [Foo(i) for i in range(5)] - s = pd.Series(li, index=[i for i in range(5)]) - _, err = capsys.readouterr() - s.is_unique - _, err = capsys.readouterr() - assert len(err) == 0 - def test_is_monotonic(self): s = Series(np.random.randint(0, 10, size=1000)) diff --git a/pandas/tests/series/test_duplicates.py b/pandas/tests/series/test_duplicates.py new file mode 100644 index 0000000000000..2e4d64188307c --- /dev/null +++ b/pandas/tests/series/test_duplicates.py @@ -0,0 +1,140 @@ +# coding=utf-8 + +import pytest + +import numpy as np + +from pandas import Series, Categorical +import pandas.util.testing as tm + + +def test_value_counts_nunique(): + # basics.rst doc example + series = Series(np.random.randn(500)) + series[20:500] = np.nan + series[10:20] = 5000 + result = series.nunique() + assert result == 11 + + # GH 18051 + s = Series(Categorical([])) + assert s.nunique() == 0 + s = Series(Categorical([np.nan])) + assert s.nunique() == 0 + + +def test_unique(): + # GH714 also, dtype=float + s = Series([1.2345] * 100) + s[::2] = np.nan + result = s.unique() + assert len(result) == 2 + + s = Series([1.2345] * 100, dtype='f4') + s[::2] = np.nan + result = s.unique() + assert len(result) == 2 + + # NAs in object arrays #714 + s = Series(['foo'] * 100, dtype='O') + s[::2] = np.nan + result = s.unique() + assert len(result) == 2 + + # decision about None + s = Series([1, 2, 3, None, None, None], dtype=object) + result = s.unique() + expected = np.array([1, 2, 3, None], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # GH 18051 + s = Series(Categorical([])) + tm.assert_categorical_equal(s.unique(), Categorical([]), check_dtype=False) + s = Series(Categorical([np.nan])) + tm.assert_categorical_equal(s.unique(), Categorical([np.nan]), + check_dtype=False) + + +def test_unique_data_ownership(): + # it works! #1807 + Series(Series(["a", "c", "b"]).unique()).sort_values() + + +def test_is_unique(): + # GH11946 + s = Series(np.random.randint(0, 10, size=1000)) + assert not s.is_unique + s = Series(np.arange(1000)) + assert s.is_unique + + +def test_is_unique_class_ne(capsys): + # GH 20661 + class Foo(object): + def __init__(self, val): + self._value = val + + def __ne__(self, other): + raise Exception("NEQ not supported") + + li = [Foo(i) for i in range(5)] + s = Series(li, index=[i for i in range(5)]) + _, err = capsys.readouterr() + s.is_unique + _, err = capsys.readouterr() + assert len(err) == 0 + + +@pytest.mark.parametrize( + 'keep, expected', + [ + ('first', Series([False, False, False, False, True, True, False])), + ('last', Series([False, True, True, False, False, False, False])), + (False, Series([False, True, True, False, True, True, False])) + ]) +def test_drop_duplicates_non_bool(any_numpy_dtype, keep, expected): + tc = Series([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(any_numpy_dtype)) + + tm.assert_series_equal(tc.duplicated(keep=keep), expected) + tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected]) + sc = tc.copy() + sc.drop_duplicates(keep=keep, inplace=True) + tm.assert_series_equal(sc, tc[~expected]) + + +@pytest.mark.parametrize('keep, expected', + [('first', Series([False, False, True, True])), + ('last', Series([True, True, False, False])), + (False, Series([True, True, True, True]))]) +def test_drop_duplicates_bool(keep, expected): + tc = Series([True, False, True, False]) + + tm.assert_series_equal(tc.duplicated(keep=keep), expected) + tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected]) + sc = tc.copy() + sc.drop_duplicates(keep=keep, inplace=True) + tm.assert_series_equal(sc, tc[~expected]) + + +@pytest.mark.parametrize('keep, expected', [ + ('first', Series([False, False, True, False, True], name='name')), + ('last', Series([True, True, False, False, False], name='name')), + (False, Series([True, True, True, False, True], name='name')) +]) +def test_duplicated_keep(keep, expected): + s = Series(['a', 'b', 'b', 'c', 'a'], name='name') + + result = s.duplicated(keep=keep) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize('keep, expected', [ + ('first', Series([False, False, True, False, True])), + ('last', Series([True, True, False, False, False])), + (False, Series([True, True, True, False, True])) +]) +def test_duplicated_nan_none(keep, expected): + s = Series([np.nan, 3, 3, None, np.nan], dtype=object) + + result = s.duplicated(keep=keep) + tm.assert_series_equal(result, expected)
Splitting up #21645 Added tests for `duplicated`, parametrized two tests for `drop_duplicates`, fixed a warning from #21614.
https://api.github.com/repos/pandas-dev/pandas/pulls/21899
2018-07-13T21:10:31Z
2018-07-16T10:55:32Z
2018-07-16T10:55:31Z
2018-07-17T07:17:43Z
TST: add test for duplicated frame/test_analytics
diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index c0e9b89c1877f..a399fa2b68680 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -1542,384 +1542,6 @@ def test_isin_empty_datetimelike(self): result = df1_td.isin(df3) tm.assert_frame_equal(result, expected) - # ---------------------------------------------------------------------- - # Row deduplication - - def test_drop_duplicates(self): - df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'bar', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': [1, 1, 2, 2, 2, 2, 1, 2], - 'D': lrange(8)}) - - # single column - result = df.drop_duplicates('AAA') - expected = df[:2] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('AAA', keep='last') - expected = df.loc[[6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('AAA', keep=False) - expected = df.loc[[]] - tm.assert_frame_equal(result, expected) - assert len(result) == 0 - - # multi column - expected = df.loc[[0, 1, 2, 3]] - result = df.drop_duplicates(np.array(['AAA', 'B'])) - tm.assert_frame_equal(result, expected) - result = df.drop_duplicates(['AAA', 'B']) - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(('AAA', 'B'), keep='last') - expected = df.loc[[0, 5, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(('AAA', 'B'), keep=False) - expected = df.loc[[0]] - tm.assert_frame_equal(result, expected) - - # consider everything - df2 = df.loc[:, ['AAA', 'B', 'C']] - - result = df2.drop_duplicates() - # in this case only - expected = df2.drop_duplicates(['AAA', 'B']) - tm.assert_frame_equal(result, expected) - - result = df2.drop_duplicates(keep='last') - expected = df2.drop_duplicates(['AAA', 'B'], keep='last') - tm.assert_frame_equal(result, expected) - - result = df2.drop_duplicates(keep=False) - expected = df2.drop_duplicates(['AAA', 'B'], keep=False) - tm.assert_frame_equal(result, expected) - - # integers - result = df.drop_duplicates('C') - expected = df.iloc[[0, 2]] - tm.assert_frame_equal(result, expected) - result = df.drop_duplicates('C', keep='last') - expected = df.iloc[[-2, -1]] - tm.assert_frame_equal(result, expected) - - df['E'] = df['C'].astype('int8') - result = df.drop_duplicates('E') - expected = df.iloc[[0, 2]] - tm.assert_frame_equal(result, expected) - result = df.drop_duplicates('E', keep='last') - expected = df.iloc[[-2, -1]] - tm.assert_frame_equal(result, expected) - - # GH 11376 - df = pd.DataFrame({'x': [7, 6, 3, 3, 4, 8, 0], - 'y': [0, 6, 5, 5, 9, 1, 2]}) - expected = df.loc[df.index != 3] - tm.assert_frame_equal(df.drop_duplicates(), expected) - - df = pd.DataFrame([[1, 0], [0, 2]]) - tm.assert_frame_equal(df.drop_duplicates(), df) - - df = pd.DataFrame([[-2, 0], [0, -4]]) - tm.assert_frame_equal(df.drop_duplicates(), df) - - x = np.iinfo(np.int64).max / 3 * 2 - df = pd.DataFrame([[-x, x], [0, x + 4]]) - tm.assert_frame_equal(df.drop_duplicates(), df) - - df = pd.DataFrame([[-x, x], [x, x + 4]]) - tm.assert_frame_equal(df.drop_duplicates(), df) - - # GH 11864 - df = pd.DataFrame([i] * 9 for i in range(16)) - df = df.append([[1] + [0] * 8], ignore_index=True) - - for keep in ['first', 'last', False]: - assert df.duplicated(keep=keep).sum() == 0 - - @pytest.mark.parametrize('subset', ['a', ['a'], ['a', 'B']]) - def test_duplicated_with_misspelled_column_name(self, subset): - # GH 19730 - df = pd.DataFrame({'A': [0, 0, 1], - 'B': [0, 0, 1], - 'C': [0, 0, 1]}) - - with pytest.raises(KeyError): - df.duplicated(subset) - - with pytest.raises(KeyError): - df.drop_duplicates(subset) - - @pytest.mark.slow - def test_duplicated_do_not_fail_on_wide_dataframes(self): - # gh-21524 - # Given the wide dataframe with a lot of columns - # with different (important!) values - data = {'col_{0:02d}'.format(i): np.random.randint(0, 1000, 30000) - for i in range(100)} - df = pd.DataFrame(data).T - result = df.duplicated() - - # Then duplicates produce the bool pd.Series as a result - # and don't fail during calculation. - # Actual values doesn't matter here, though usually - # it's all False in this case - assert isinstance(result, pd.Series) - assert result.dtype == np.bool - - def test_drop_duplicates_with_duplicate_column_names(self): - # GH17836 - df = DataFrame([ - [1, 2, 5], - [3, 4, 6], - [3, 4, 7] - ], columns=['a', 'a', 'b']) - - result0 = df.drop_duplicates() - tm.assert_frame_equal(result0, df) - - result1 = df.drop_duplicates('a') - expected1 = df[:2] - tm.assert_frame_equal(result1, expected1) - - def test_drop_duplicates_for_take_all(self): - df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar', - 'foo', 'bar', 'qux', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': [1, 1, 2, 2, 2, 2, 1, 2], - 'D': lrange(8)}) - - # single column - result = df.drop_duplicates('AAA') - expected = df.iloc[[0, 1, 2, 6]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('AAA', keep='last') - expected = df.iloc[[2, 5, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('AAA', keep=False) - expected = df.iloc[[2, 6]] - tm.assert_frame_equal(result, expected) - - # multiple columns - result = df.drop_duplicates(['AAA', 'B']) - expected = df.iloc[[0, 1, 2, 3, 4, 6]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(['AAA', 'B'], keep='last') - expected = df.iloc[[0, 1, 2, 5, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(['AAA', 'B'], keep=False) - expected = df.iloc[[0, 1, 2, 6]] - tm.assert_frame_equal(result, expected) - - def test_drop_duplicates_tuple(self): - df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'bar', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': [1, 1, 2, 2, 2, 2, 1, 2], - 'D': lrange(8)}) - - # single column - result = df.drop_duplicates(('AA', 'AB')) - expected = df[:2] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(('AA', 'AB'), keep='last') - expected = df.loc[[6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(('AA', 'AB'), keep=False) - expected = df.loc[[]] # empty df - assert len(result) == 0 - tm.assert_frame_equal(result, expected) - - # multi column - expected = df.loc[[0, 1, 2, 3]] - result = df.drop_duplicates((('AA', 'AB'), 'B')) - tm.assert_frame_equal(result, expected) - - def test_drop_duplicates_NA(self): - # none - df = DataFrame({'A': [None, None, 'foo', 'bar', - 'foo', 'bar', 'bar', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.], - 'D': lrange(8)}) - - # single column - result = df.drop_duplicates('A') - expected = df.loc[[0, 2, 3]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('A', keep='last') - expected = df.loc[[1, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('A', keep=False) - expected = df.loc[[]] # empty df - tm.assert_frame_equal(result, expected) - assert len(result) == 0 - - # multi column - result = df.drop_duplicates(['A', 'B']) - expected = df.loc[[0, 2, 3, 6]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(['A', 'B'], keep='last') - expected = df.loc[[1, 5, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(['A', 'B'], keep=False) - expected = df.loc[[6]] - tm.assert_frame_equal(result, expected) - - # nan - df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'bar', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.], - 'D': lrange(8)}) - - # single column - result = df.drop_duplicates('C') - expected = df[:2] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('C', keep='last') - expected = df.loc[[3, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('C', keep=False) - expected = df.loc[[]] # empty df - tm.assert_frame_equal(result, expected) - assert len(result) == 0 - - # multi column - result = df.drop_duplicates(['C', 'B']) - expected = df.loc[[0, 1, 2, 4]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(['C', 'B'], keep='last') - expected = df.loc[[1, 3, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates(['C', 'B'], keep=False) - expected = df.loc[[1]] - tm.assert_frame_equal(result, expected) - - def test_drop_duplicates_NA_for_take_all(self): - # none - df = DataFrame({'A': [None, None, 'foo', 'bar', - 'foo', 'baz', 'bar', 'qux'], - 'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]}) - - # single column - result = df.drop_duplicates('A') - expected = df.iloc[[0, 2, 3, 5, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('A', keep='last') - expected = df.iloc[[1, 4, 5, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('A', keep=False) - expected = df.iloc[[5, 7]] - tm.assert_frame_equal(result, expected) - - # nan - - # single column - result = df.drop_duplicates('C') - expected = df.iloc[[0, 1, 5, 6]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('C', keep='last') - expected = df.iloc[[3, 5, 6, 7]] - tm.assert_frame_equal(result, expected) - - result = df.drop_duplicates('C', keep=False) - expected = df.iloc[[5, 6]] - tm.assert_frame_equal(result, expected) - - def test_drop_duplicates_inplace(self): - orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'bar', 'foo'], - 'B': ['one', 'one', 'two', 'two', - 'two', 'two', 'one', 'two'], - 'C': [1, 1, 2, 2, 2, 2, 1, 2], - 'D': lrange(8)}) - - # single column - df = orig.copy() - df.drop_duplicates('A', inplace=True) - expected = orig[:2] - result = df - tm.assert_frame_equal(result, expected) - - df = orig.copy() - df.drop_duplicates('A', keep='last', inplace=True) - expected = orig.loc[[6, 7]] - result = df - tm.assert_frame_equal(result, expected) - - df = orig.copy() - df.drop_duplicates('A', keep=False, inplace=True) - expected = orig.loc[[]] - result = df - tm.assert_frame_equal(result, expected) - assert len(df) == 0 - - # multi column - df = orig.copy() - df.drop_duplicates(['A', 'B'], inplace=True) - expected = orig.loc[[0, 1, 2, 3]] - result = df - tm.assert_frame_equal(result, expected) - - df = orig.copy() - df.drop_duplicates(['A', 'B'], keep='last', inplace=True) - expected = orig.loc[[0, 5, 6, 7]] - result = df - tm.assert_frame_equal(result, expected) - - df = orig.copy() - df.drop_duplicates(['A', 'B'], keep=False, inplace=True) - expected = orig.loc[[0]] - result = df - tm.assert_frame_equal(result, expected) - - # consider everything - orig2 = orig.loc[:, ['A', 'B', 'C']].copy() - - df2 = orig2.copy() - df2.drop_duplicates(inplace=True) - # in this case only - expected = orig2.drop_duplicates(['A', 'B']) - result = df2 - tm.assert_frame_equal(result, expected) - - df2 = orig2.copy() - df2.drop_duplicates(keep='last', inplace=True) - expected = orig2.drop_duplicates(['A', 'B'], keep='last') - result = df2 - tm.assert_frame_equal(result, expected) - - df2 = orig2.copy() - df2.drop_duplicates(keep=False, inplace=True) - expected = orig2.drop_duplicates(['A', 'B'], keep=False) - result = df2 - tm.assert_frame_equal(result, expected) - # Rounding def test_round(self): # GH 2665 diff --git a/pandas/tests/frame/test_duplicates.py b/pandas/tests/frame/test_duplicates.py new file mode 100644 index 0000000000000..289170527dea7 --- /dev/null +++ b/pandas/tests/frame/test_duplicates.py @@ -0,0 +1,439 @@ +# -*- coding: utf-8 -*- + +from __future__ import print_function + +import pytest + +import numpy as np + +from pandas.compat import lrange, string_types +from pandas import DataFrame, Series + +import pandas.util.testing as tm + + +@pytest.mark.parametrize('subset', ['a', ['a'], ['a', 'B']]) +def test_duplicated_with_misspelled_column_name(subset): + # GH 19730 + df = DataFrame({'A': [0, 0, 1], + 'B': [0, 0, 1], + 'C': [0, 0, 1]}) + + with pytest.raises(KeyError): + df.duplicated(subset) + + with pytest.raises(KeyError): + df.drop_duplicates(subset) + + +@pytest.mark.slow +def test_duplicated_do_not_fail_on_wide_dataframes(): + # gh-21524 + # Given the wide dataframe with a lot of columns + # with different (important!) values + data = {'col_{0:02d}'.format(i): np.random.randint(0, 1000, 30000) + for i in range(100)} + df = DataFrame(data).T + result = df.duplicated() + + # Then duplicates produce the bool Series as a result and don't fail during + # calculation. Actual values doesn't matter here, though usually it's all + # False in this case + assert isinstance(result, Series) + assert result.dtype == np.bool + + +@pytest.mark.parametrize('keep, expected', [ + ('first', Series([False, False, True, False, True])), + ('last', Series([True, True, False, False, False])), + (False, Series([True, True, True, False, True])) +]) +def test_duplicated_keep(keep, expected): + df = DataFrame({'A': [0, 1, 1, 2, 0], 'B': ['a', 'b', 'b', 'c', 'a']}) + + result = df.duplicated(keep=keep) + tm.assert_series_equal(result, expected) + + +@pytest.mark.xfail(reason="GH21720; nan/None falsely considered equal") +@pytest.mark.parametrize('keep, expected', [ + ('first', Series([False, False, True, False, True])), + ('last', Series([True, True, False, False, False])), + (False, Series([True, True, True, False, True])) +]) +def test_duplicated_nan_none(keep, expected): + df = DataFrame({'C': [np.nan, 3, 3, None, np.nan]}, dtype=object) + + result = df.duplicated(keep=keep) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize('keep', ['first', 'last', False]) +@pytest.mark.parametrize('subset', [None, ['A', 'B'], 'A']) +def test_duplicated_subset(subset, keep): + df = DataFrame({'A': [0, 1, 1, 2, 0], + 'B': ['a', 'b', 'b', 'c', 'a'], + 'C': [np.nan, 3, 3, None, np.nan]}) + + if subset is None: + subset = list(df.columns) + elif isinstance(subset, string_types): + # need to have a DataFrame, not a Series + # -> select columns with singleton list, not string + subset = [subset] + + expected = df[subset].duplicated(keep=keep) + result = df.duplicated(keep=keep, subset=subset) + tm.assert_series_equal(result, expected) + + +def test_drop_duplicates(): + df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar', + 'foo', 'bar', 'bar', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': [1, 1, 2, 2, 2, 2, 1, 2], + 'D': lrange(8)}) + + # single column + result = df.drop_duplicates('AAA') + expected = df[:2] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('AAA', keep='last') + expected = df.loc[[6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('AAA', keep=False) + expected = df.loc[[]] + tm.assert_frame_equal(result, expected) + assert len(result) == 0 + + # multi column + expected = df.loc[[0, 1, 2, 3]] + result = df.drop_duplicates(np.array(['AAA', 'B'])) + tm.assert_frame_equal(result, expected) + result = df.drop_duplicates(['AAA', 'B']) + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(('AAA', 'B'), keep='last') + expected = df.loc[[0, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(('AAA', 'B'), keep=False) + expected = df.loc[[0]] + tm.assert_frame_equal(result, expected) + + # consider everything + df2 = df.loc[:, ['AAA', 'B', 'C']] + + result = df2.drop_duplicates() + # in this case only + expected = df2.drop_duplicates(['AAA', 'B']) + tm.assert_frame_equal(result, expected) + + result = df2.drop_duplicates(keep='last') + expected = df2.drop_duplicates(['AAA', 'B'], keep='last') + tm.assert_frame_equal(result, expected) + + result = df2.drop_duplicates(keep=False) + expected = df2.drop_duplicates(['AAA', 'B'], keep=False) + tm.assert_frame_equal(result, expected) + + # integers + result = df.drop_duplicates('C') + expected = df.iloc[[0, 2]] + tm.assert_frame_equal(result, expected) + result = df.drop_duplicates('C', keep='last') + expected = df.iloc[[-2, -1]] + tm.assert_frame_equal(result, expected) + + df['E'] = df['C'].astype('int8') + result = df.drop_duplicates('E') + expected = df.iloc[[0, 2]] + tm.assert_frame_equal(result, expected) + result = df.drop_duplicates('E', keep='last') + expected = df.iloc[[-2, -1]] + tm.assert_frame_equal(result, expected) + + # GH 11376 + df = DataFrame({'x': [7, 6, 3, 3, 4, 8, 0], + 'y': [0, 6, 5, 5, 9, 1, 2]}) + expected = df.loc[df.index != 3] + tm.assert_frame_equal(df.drop_duplicates(), expected) + + df = DataFrame([[1, 0], [0, 2]]) + tm.assert_frame_equal(df.drop_duplicates(), df) + + df = DataFrame([[-2, 0], [0, -4]]) + tm.assert_frame_equal(df.drop_duplicates(), df) + + x = np.iinfo(np.int64).max / 3 * 2 + df = DataFrame([[-x, x], [0, x + 4]]) + tm.assert_frame_equal(df.drop_duplicates(), df) + + df = DataFrame([[-x, x], [x, x + 4]]) + tm.assert_frame_equal(df.drop_duplicates(), df) + + # GH 11864 + df = DataFrame([i] * 9 for i in range(16)) + df = df.append([[1] + [0] * 8], ignore_index=True) + + for keep in ['first', 'last', False]: + assert df.duplicated(keep=keep).sum() == 0 + + +def test_drop_duplicates_with_duplicate_column_names(): + # GH17836 + df = DataFrame([ + [1, 2, 5], + [3, 4, 6], + [3, 4, 7] + ], columns=['a', 'a', 'b']) + + result0 = df.drop_duplicates() + tm.assert_frame_equal(result0, df) + + result1 = df.drop_duplicates('a') + expected1 = df[:2] + tm.assert_frame_equal(result1, expected1) + + +def test_drop_duplicates_for_take_all(): + df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar', + 'foo', 'bar', 'qux', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': [1, 1, 2, 2, 2, 2, 1, 2], + 'D': lrange(8)}) + + # single column + result = df.drop_duplicates('AAA') + expected = df.iloc[[0, 1, 2, 6]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('AAA', keep='last') + expected = df.iloc[[2, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('AAA', keep=False) + expected = df.iloc[[2, 6]] + tm.assert_frame_equal(result, expected) + + # multiple columns + result = df.drop_duplicates(['AAA', 'B']) + expected = df.iloc[[0, 1, 2, 3, 4, 6]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(['AAA', 'B'], keep='last') + expected = df.iloc[[0, 1, 2, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(['AAA', 'B'], keep=False) + expected = df.iloc[[0, 1, 2, 6]] + tm.assert_frame_equal(result, expected) + + +def test_drop_duplicates_tuple(): + df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar', + 'foo', 'bar', 'bar', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': [1, 1, 2, 2, 2, 2, 1, 2], + 'D': lrange(8)}) + + # single column + result = df.drop_duplicates(('AA', 'AB')) + expected = df[:2] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(('AA', 'AB'), keep='last') + expected = df.loc[[6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(('AA', 'AB'), keep=False) + expected = df.loc[[]] # empty df + assert len(result) == 0 + tm.assert_frame_equal(result, expected) + + # multi column + expected = df.loc[[0, 1, 2, 3]] + result = df.drop_duplicates((('AA', 'AB'), 'B')) + tm.assert_frame_equal(result, expected) + + +def test_drop_duplicates_NA(): + # none + df = DataFrame({'A': [None, None, 'foo', 'bar', + 'foo', 'bar', 'bar', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.], + 'D': lrange(8)}) + + # single column + result = df.drop_duplicates('A') + expected = df.loc[[0, 2, 3]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('A', keep='last') + expected = df.loc[[1, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('A', keep=False) + expected = df.loc[[]] # empty df + tm.assert_frame_equal(result, expected) + assert len(result) == 0 + + # multi column + result = df.drop_duplicates(['A', 'B']) + expected = df.loc[[0, 2, 3, 6]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(['A', 'B'], keep='last') + expected = df.loc[[1, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(['A', 'B'], keep=False) + expected = df.loc[[6]] + tm.assert_frame_equal(result, expected) + + # nan + df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', + 'foo', 'bar', 'bar', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.], + 'D': lrange(8)}) + + # single column + result = df.drop_duplicates('C') + expected = df[:2] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('C', keep='last') + expected = df.loc[[3, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('C', keep=False) + expected = df.loc[[]] # empty df + tm.assert_frame_equal(result, expected) + assert len(result) == 0 + + # multi column + result = df.drop_duplicates(['C', 'B']) + expected = df.loc[[0, 1, 2, 4]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(['C', 'B'], keep='last') + expected = df.loc[[1, 3, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates(['C', 'B'], keep=False) + expected = df.loc[[1]] + tm.assert_frame_equal(result, expected) + + +def test_drop_duplicates_NA_for_take_all(): + # none + df = DataFrame({'A': [None, None, 'foo', 'bar', + 'foo', 'baz', 'bar', 'qux'], + 'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]}) + + # single column + result = df.drop_duplicates('A') + expected = df.iloc[[0, 2, 3, 5, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('A', keep='last') + expected = df.iloc[[1, 4, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('A', keep=False) + expected = df.iloc[[5, 7]] + tm.assert_frame_equal(result, expected) + + # nan + + # single column + result = df.drop_duplicates('C') + expected = df.iloc[[0, 1, 5, 6]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('C', keep='last') + expected = df.iloc[[3, 5, 6, 7]] + tm.assert_frame_equal(result, expected) + + result = df.drop_duplicates('C', keep=False) + expected = df.iloc[[5, 6]] + tm.assert_frame_equal(result, expected) + + +def test_drop_duplicates_inplace(): + orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', + 'foo', 'bar', 'bar', 'foo'], + 'B': ['one', 'one', 'two', 'two', + 'two', 'two', 'one', 'two'], + 'C': [1, 1, 2, 2, 2, 2, 1, 2], + 'D': lrange(8)}) + + # single column + df = orig.copy() + df.drop_duplicates('A', inplace=True) + expected = orig[:2] + result = df + tm.assert_frame_equal(result, expected) + + df = orig.copy() + df.drop_duplicates('A', keep='last', inplace=True) + expected = orig.loc[[6, 7]] + result = df + tm.assert_frame_equal(result, expected) + + df = orig.copy() + df.drop_duplicates('A', keep=False, inplace=True) + expected = orig.loc[[]] + result = df + tm.assert_frame_equal(result, expected) + assert len(df) == 0 + + # multi column + df = orig.copy() + df.drop_duplicates(['A', 'B'], inplace=True) + expected = orig.loc[[0, 1, 2, 3]] + result = df + tm.assert_frame_equal(result, expected) + + df = orig.copy() + df.drop_duplicates(['A', 'B'], keep='last', inplace=True) + expected = orig.loc[[0, 5, 6, 7]] + result = df + tm.assert_frame_equal(result, expected) + + df = orig.copy() + df.drop_duplicates(['A', 'B'], keep=False, inplace=True) + expected = orig.loc[[0]] + result = df + tm.assert_frame_equal(result, expected) + + # consider everything + orig2 = orig.loc[:, ['A', 'B', 'C']].copy() + + df2 = orig2.copy() + df2.drop_duplicates(inplace=True) + # in this case only + expected = orig2.drop_duplicates(['A', 'B']) + result = df2 + tm.assert_frame_equal(result, expected) + + df2 = orig2.copy() + df2.drop_duplicates(keep='last', inplace=True) + expected = orig2.drop_duplicates(['A', 'B'], keep='last') + result = df2 + tm.assert_frame_equal(result, expected) + + df2 = orig2.copy() + df2.drop_duplicates(keep=False, inplace=True) + expected = orig2.drop_duplicates(['A', 'B'], keep=False) + result = df2 + tm.assert_frame_equal(result, expected)
Preparation for #21645 Added some parametrised tests for `duplicated`. In addition, I reordered the tests slightly. Currently, the tests in `tests/frame/test_analytics.py` test the following functions in order: ``` drop_duplicates duplicated duplicated drop_duplicates drop_duplicates [...] ``` Since I am (and will be) adding several tests for `duplicated`, I'd like to group them within the code, and moved the two existing tests for `duplicated` to the top of that section.
https://api.github.com/repos/pandas-dev/pandas/pulls/21898
2018-07-13T20:50:59Z
2018-07-16T10:54:15Z
2018-07-16T10:54:15Z
2018-07-17T07:18:07Z
DOC: move feature to correct whatsnew section; typos
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 034a56b2ac0cb..0d2a22f3880b4 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -16,7 +16,7 @@ New features ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A ``Series`` based on an ``ExtensionArray`` now supports arithmetic and comparison -operators. (:issue:`19577`). There are two approaches for providing operator support for an ``ExtensionArray``: +operators (:issue:`19577`). There are two approaches for providing operator support for an ``ExtensionArray``: 1. Define each of the operators on your ``ExtensionArray`` subclass. 2. Use an operator implementation from pandas that depends on operators that are already defined @@ -80,7 +80,7 @@ Other Enhancements <https://pandas-gbq.readthedocs.io/en/latest/changelog.html#changelog-0-5-0>`__. (:issue:`21627`) - New method :meth:`HDFStore.walk` will recursively walk the group hierarchy of an HDF5 file (:issue:`10932`) -- :func:`read_html` copies cell data across ``colspan``s and ``rowspan``s, and it treats all-``th`` table rows as headers if ``header`` kwarg is not given and there is no ``thead`` (:issue:`17054`) +- :func:`read_html` copies cell data across ``colspan`` and ``rowspan``, and it treats all-``th`` table rows as headers if ``header`` kwarg is not given and there is no ``thead`` (:issue:`17054`) - :meth:`Series.nlargest`, :meth:`Series.nsmallest`, :meth:`DataFrame.nlargest`, and :meth:`DataFrame.nsmallest` now accept the value ``"all"`` for the ``keep`` argument. This keeps all ties for the nth largest/smallest value (:issue:`16818`) - :class:`IntervalIndex` has gained the :meth:`~IntervalIndex.set_closed` method to change the existing ``closed`` value (:issue:`21670`) - :func:`~DataFrame.to_csv` and :func:`~DataFrame.to_json` now support ``compression='infer'`` to infer compression based on filename (:issue:`15008`) @@ -319,7 +319,7 @@ Timezones - Bug in :class:`Timestamp` when passing different string date formats with a timezone offset would produce different timezone offsets (:issue:`12064`) - Bug when comparing a tz-naive :class:`Timestamp` to a tz-aware :class:`DatetimeIndex` which would coerce the :class:`DatetimeIndex` to tz-naive (:issue:`12601`) - Bug in :meth:`Series.truncate` with a tz-aware :class:`DatetimeIndex` which would cause a core dump (:issue:`9243`) -- Bug in :class:`Series` constructor which would coerce tz-aware and tz-naive :class:`Timestamp`s to tz-aware (:issue:`13051`) +- Bug in :class:`Series` constructor which would coerce tz-aware and tz-naive :class:`Timestamp` to tz-aware (:issue:`13051`) - Bug in :class:`Index` with ``datetime64[ns, tz]`` dtype that did not localize integer data correctly (:issue:`20964`) - Bug in :class:`DatetimeIndex` where constructing with an integer and tz would not localize correctly (:issue:`12619`) - Fixed bug where :meth:`DataFrame.describe` and :meth:`Series.describe` on tz-aware datetimes did not show `first` and `last` result (:issue:`21328`)
Splitting up #21645. Motivation is easy: when I started working on #21645, there was nothing under "New Features" (usually having a descriptive section each) except this one-liner. To me, this belongs to "Other Enhancements". Plus found some typos and stuff where the `rst` breaks (cannot have something directly after closing backticks).
https://api.github.com/repos/pandas-dev/pandas/pulls/21897
2018-07-13T20:48:19Z
2018-07-14T15:29:02Z
2018-07-14T15:29:02Z
2018-07-17T07:19:32Z
DEPR: Deprecate Series.to_csv signature
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 5c15c7b6a742f..730a4895055c6 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -477,6 +477,7 @@ Deprecations - :meth:`MultiIndex.to_hierarchical` is deprecated and will be removed in a future version (:issue:`21613`) - :meth:`Series.ptp` is deprecated. Use ``numpy.ptp`` instead (:issue:`21614`) - :meth:`Series.compress` is deprecated. Use ``Series[condition]`` instead (:issue:`18262`) +- The signature of :meth:`Series.to_csv` has been uniformed to that of doc:meth:`DataFrame.to_csv`: the name of the first argument is now 'path_or_buf', the order of subsequent arguments has changed, the 'header' argument now defaults to True. (:issue:`19715`) - :meth:`Categorical.from_codes` has deprecated providing float values for the ``codes`` argument. (:issue:`21767`) - :func:`pandas.read_table` is deprecated. Instead, use :func:`pandas.read_csv` passing ``sep='\t'`` if necessary (:issue:`21948`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index cb251d4648925..f2766f45bee2b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1714,107 +1714,6 @@ def to_panel(self): return self._constructor_expanddim(new_mgr) - def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, - columns=None, header=True, index=True, index_label=None, - mode='w', encoding=None, compression='infer', quoting=None, - quotechar='"', line_terminator='\n', chunksize=None, - tupleize_cols=None, date_format=None, doublequote=True, - escapechar=None, decimal='.'): - r"""Write DataFrame to a comma-separated values (csv) file - - Parameters - ---------- - path_or_buf : string or file handle, default None - File path or object, if None is provided the result is returned as - a string. - sep : character, default ',' - Field delimiter for the output file. - na_rep : string, default '' - Missing data representation - float_format : string, default None - Format string for floating point numbers - columns : sequence, optional - Columns to write - header : boolean or list of string, default True - Write out the column names. If a list of strings is given it is - assumed to be aliases for the column names - index : boolean, default True - Write row names (index) - index_label : string or sequence, or False, default None - Column label for index column(s) if desired. If None is given, and - `header` and `index` are True, then the index names are used. A - sequence should be given if the DataFrame uses MultiIndex. If - False do not print fields for index names. Use index_label=False - for easier importing in R - mode : str - Python write mode, default 'w' - encoding : string, optional - A string representing the encoding to use in the output file, - defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. - compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, - default 'infer' - If 'infer' and `path_or_buf` is path-like, then detect compression - from the following extensions: '.gz', '.bz2', '.zip' or '.xz' - (otherwise no compression). - - .. versionchanged:: 0.24.0 - 'infer' option added and set to default - line_terminator : string, default ``'\n'`` - The newline character or character sequence to use in the output - file - quoting : optional constant from csv module - defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` - then floats are converted to strings and thus csv.QUOTE_NONNUMERIC - will treat them as non-numeric - quotechar : string (length 1), default '\"' - character used to quote fields - doublequote : boolean, default True - Control quoting of `quotechar` inside a field - escapechar : string (length 1), default None - character used to escape `sep` and `quotechar` when appropriate - chunksize : int or None - rows to write at a time - tupleize_cols : boolean, default False - .. deprecated:: 0.21.0 - This argument will be removed and will always write each row - of the multi-index as a separate row in the CSV file. - - Write MultiIndex columns as a list of tuples (if True) or in - the new, expanded format, where each MultiIndex column is a row - in the CSV (if False). - date_format : string, default None - Format string for datetime objects - decimal: string, default '.' - Character recognized as decimal separator. E.g. use ',' for - European data - - """ - - if tupleize_cols is not None: - warnings.warn("The 'tupleize_cols' parameter is deprecated and " - "will be removed in a future version", - FutureWarning, stacklevel=2) - else: - tupleize_cols = False - - from pandas.io.formats.csvs import CSVFormatter - formatter = CSVFormatter(self, path_or_buf, - line_terminator=line_terminator, sep=sep, - encoding=encoding, - compression=compression, quoting=quoting, - na_rep=na_rep, float_format=float_format, - cols=columns, header=header, index=index, - index_label=index_label, mode=mode, - chunksize=chunksize, quotechar=quotechar, - tupleize_cols=tupleize_cols, - date_format=date_format, - doublequote=doublequote, - escapechar=escapechar, decimal=decimal) - formatter.save() - - if path_or_buf is None: - return formatter.path_or_buf.getvalue() - @Appender(_shared_docs['to_excel'] % _shared_doc_kwargs) def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', float_format=None, columns=None, header=True, index=True, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f62605c342702..52b3f79abf5e8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9270,6 +9270,115 @@ def first_valid_index(self): def last_valid_index(self): return self._find_valid_index('last') + def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, + columns=None, header=True, index=True, index_label=None, + mode='w', encoding=None, compression='infer', quoting=None, + quotechar='"', line_terminator='\n', chunksize=None, + tupleize_cols=None, date_format=None, doublequote=True, + escapechar=None, decimal='.'): + r"""Write object to a comma-separated values (csv) file + + Parameters + ---------- + path_or_buf : string or file handle, default None + File path or object, if None is provided the result is returned as + a string. + .. versionchanged:: 0.24.0 + Was previously named "path" for Series. + sep : character, default ',' + Field delimiter for the output file. + na_rep : string, default '' + Missing data representation + float_format : string, default None + Format string for floating point numbers + columns : sequence, optional + Columns to write + header : boolean or list of string, default True + Write out the column names. If a list of strings is given it is + assumed to be aliases for the column names + .. versionchanged:: 0.24.0 + Previously defaulted to False for Series. + index : boolean, default True + Write row names (index) + index_label : string or sequence, or False, default None + Column label for index column(s) if desired. If None is given, and + `header` and `index` are True, then the index names are used. A + sequence should be given if the object uses MultiIndex. If + False do not print fields for index names. Use index_label=False + for easier importing in R + mode : str + Python write mode, default 'w' + encoding : string, optional + A string representing the encoding to use in the output file, + defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. + compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, + default 'infer' + If 'infer' and `path_or_buf` is path-like, then detect compression + from the following extensions: '.gz', '.bz2', '.zip' or '.xz' + (otherwise no compression). + + .. versionchanged:: 0.24.0 + 'infer' option added and set to default + line_terminator : string, default ``'\n'`` + The newline character or character sequence to use in the output + file + quoting : optional constant from csv module + defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` + then floats are converted to strings and thus csv.QUOTE_NONNUMERIC + will treat them as non-numeric + quotechar : string (length 1), default '\"' + character used to quote fields + doublequote : boolean, default True + Control quoting of `quotechar` inside a field + escapechar : string (length 1), default None + character used to escape `sep` and `quotechar` when appropriate + chunksize : int or None + rows to write at a time + tupleize_cols : boolean, default False + .. deprecated:: 0.21.0 + This argument will be removed and will always write each row + of the multi-index as a separate row in the CSV file. + + Write MultiIndex columns as a list of tuples (if True) or in + the new, expanded format, where each MultiIndex column is a row + in the CSV (if False). + date_format : string, default None + Format string for datetime objects + decimal: string, default '.' + Character recognized as decimal separator. E.g. use ',' for + European data + + .. versionchanged:: 0.24.0 + The order of arguments for Series was changed. + """ + + df = self if isinstance(self, ABCDataFrame) else self.to_frame() + + if tupleize_cols is not None: + warnings.warn("The 'tupleize_cols' parameter is deprecated and " + "will be removed in a future version", + FutureWarning, stacklevel=2) + else: + tupleize_cols = False + + from pandas.io.formats.csvs import CSVFormatter + formatter = CSVFormatter(df, path_or_buf, + line_terminator=line_terminator, sep=sep, + encoding=encoding, + compression=compression, quoting=quoting, + na_rep=na_rep, float_format=float_format, + cols=columns, header=header, index=index, + index_label=index_label, mode=mode, + chunksize=chunksize, quotechar=quotechar, + tupleize_cols=tupleize_cols, + date_format=date_format, + doublequote=doublequote, + escapechar=escapechar, decimal=decimal) + formatter.save() + + if path_or_buf is None: + return formatter.path_or_buf.getvalue() + def _doc_parms(cls): """Return a tuple of the doc parms.""" diff --git a/pandas/core/series.py b/pandas/core/series.py index 21dea15772cc0..bfba6367616e2 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -17,6 +17,7 @@ from pandas.core.arrays import ExtensionArray from pandas.core.dtypes.common import ( is_categorical_dtype, + is_string_like, is_bool, is_integer, is_integer_dtype, is_float_dtype, @@ -3765,59 +3766,62 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None, return result - def to_csv(self, path=None, index=True, sep=",", na_rep='', - float_format=None, header=False, index_label=None, - mode='w', encoding=None, compression='infer', date_format=None, - decimal='.'): - """ - Write Series to a comma-separated values (csv) file - - Parameters - ---------- - path : string or file handle, default None - File path or object, if None is provided the result is returned as - a string. - na_rep : string, default '' - Missing data representation - float_format : string, default None - Format string for floating point numbers - header : boolean, default False - Write out series name - index : boolean, default True - Write row names (index) - index_label : string or sequence, default None - Column label for index column(s) if desired. If None is given, and - `header` and `index` are True, then the index names are used. A - sequence should be given if the DataFrame uses MultiIndex. - mode : Python write mode, default 'w' - sep : character, default "," - Field delimiter for the output file. - encoding : string, optional - a string representing the encoding to use if the contents are - non-ascii, for python versions prior to 3 - compression : None or string, default 'infer' - A string representing the compression to use in the output file. - Allowed values are None, 'gzip', 'bz2', 'zip', 'xz', and 'infer'. - This input is only used when the first argument is a filename. - - .. versionchanged:: 0.24.0 - 'infer' option added and set to default - date_format: string, default None - Format string for datetime objects. - decimal: string, default '.' - Character recognized as decimal separator. E.g. use ',' for - European data - """ - from pandas.core.frame import DataFrame - df = DataFrame(self) - # result is only a string if no path provided, otherwise None - result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep, - float_format=float_format, header=header, - index_label=index_label, mode=mode, - encoding=encoding, compression=compression, - date_format=date_format, decimal=decimal) - if path is None: - return result + @Appender(generic.NDFrame.to_csv.__doc__) + def to_csv(self, *args, **kwargs): + + names = ["path_or_buf", "sep", "na_rep", "float_format", "columns", + "header", "index", "index_label", "mode", "encoding", + "compression", "quoting", "quotechar", "line_terminator", + "chunksize", "tupleize_cols", "date_format", "doublequote", + "escapechar", "decimal"] + + old_names = ["path_or_buf", "index", "sep", "na_rep", "float_format", + "header", "index_label", "mode", "encoding", + "compression", "date_format", "decimal"] + + if "path" in kwargs: + warnings.warn("The signature of `Series.to_csv` was aligned " + "to that of `DataFrame.to_csv`, and argument " + "'path' will be renamed to 'path_or_buf'.", + FutureWarning, stacklevel=2) + kwargs["path_or_buf"] = kwargs.pop("path") + + if len(args) > 1: + # Either "index" (old signature) or "sep" (new signature) is being + # passed as second argument (while the first is the same) + maybe_sep = args[1] + + if not (is_string_like(maybe_sep) and len(maybe_sep) == 1): + # old signature + warnings.warn("The signature of `Series.to_csv` was aligned " + "to that of `DataFrame.to_csv`. Note that the " + "order of arguments changed, and the new one " + "has 'sep' in first place, for which \"{}\" is " + "not a valid value. The old order will cease to " + "be supported in a future version. Please refer " + "to the documentation for `DataFrame.to_csv` " + "when updating your function " + "calls.".format(maybe_sep), + FutureWarning, stacklevel=2) + names = old_names + + pos_args = dict(zip(names[:len(args)], args)) + + for key in pos_args: + if key in kwargs: + raise ValueError("Argument given by name ('{}') and position " + "({})".format(key, names.index(key))) + kwargs[key] = pos_args[key] + + if kwargs.get("header", None) is None: + warnings.warn("The signature of `Series.to_csv` was aligned " + "to that of `DataFrame.to_csv`, and argument " + "'header' will change its default value from False " + "to True: please pass an explicit value to suppress " + "this warning.", FutureWarning, + stacklevel=2) + kwargs["header"] = False # Backwards compatibility. + return self.to_frame().to_csv(**kwargs) @Appender(generic._shared_docs['to_excel'] % _shared_doc_kwargs) def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 9e3b606f31973..e1c3c29ef2846 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -893,22 +893,27 @@ def test_to_csv_line_terminators(self): def test_to_csv_from_csv_categorical(self): - # CSV with categoricals should result in the same output as when one - # would add a "normal" Series/DataFrame. - s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])) - s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']) + # CSV with categoricals should result in the same output + # as when one would add a "normal" Series/DataFrame. + s = Series(pd.Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])) + s2 = Series(["a", "b", "b", "a", "a", "c", "c", "c"]) res = StringIO() - s.to_csv(res) + + s.to_csv(res, header=False) exp = StringIO() - s2.to_csv(exp) + + s2.to_csv(exp, header=False) assert res.getvalue() == exp.getvalue() df = DataFrame({"s": s}) df2 = DataFrame({"s": s2}) + res = StringIO() df.to_csv(res) + exp = StringIO() df2.to_csv(exp) + assert res.getvalue() == exp.getvalue() def test_to_csv_path_is_none(self): diff --git a/pandas/tests/io/test_compression.py b/pandas/tests/io/test_compression.py index 76788ced44e84..1806ddd2bbcc6 100644 --- a/pandas/tests/io/test_compression.py +++ b/pandas/tests/io/test_compression.py @@ -1,4 +1,5 @@ import os +import warnings import pytest @@ -7,6 +8,14 @@ import pandas.util.testing as tm +def catch_to_csv_depr(): + # Catching warnings because Series.to_csv has + # been deprecated. Remove this context when + # Series.to_csv has been aligned. + + return warnings.catch_warnings(record=True) + + @pytest.mark.parametrize('obj', [ pd.DataFrame(100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], @@ -15,11 +24,12 @@ @pytest.mark.parametrize('method', ['to_pickle', 'to_json', 'to_csv']) def test_compression_size(obj, method, compression_only): with tm.ensure_clean() as path: - getattr(obj, method)(path, compression=compression_only) - compressed_size = os.path.getsize(path) - getattr(obj, method)(path, compression=None) - uncompressed_size = os.path.getsize(path) - assert uncompressed_size > compressed_size + with catch_to_csv_depr(): + getattr(obj, method)(path, compression=compression_only) + compressed_size = os.path.getsize(path) + getattr(obj, method)(path, compression=None) + uncompressed_size = os.path.getsize(path) + assert uncompressed_size > compressed_size @pytest.mark.parametrize('obj', [ @@ -31,16 +41,18 @@ def test_compression_size(obj, method, compression_only): def test_compression_size_fh(obj, method, compression_only): with tm.ensure_clean() as path: f, handles = icom._get_handle(path, 'w', compression=compression_only) - with f: - getattr(obj, method)(f) - assert not f.closed - assert f.closed - compressed_size = os.path.getsize(path) + with catch_to_csv_depr(): + with f: + getattr(obj, method)(f) + assert not f.closed + assert f.closed + compressed_size = os.path.getsize(path) with tm.ensure_clean() as path: f, handles = icom._get_handle(path, 'w', compression=None) - with f: - getattr(obj, method)(f) - assert not f.closed + with catch_to_csv_depr(): + with f: + getattr(obj, method)(f) + assert not f.closed assert f.closed uncompressed_size = os.path.getsize(path) assert uncompressed_size > compressed_size diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 814d794d45c18..cbf9bff06ad34 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -37,7 +37,7 @@ def read_csv(self, path, **kwargs): def test_from_csv_deprecation(self): # see gh-17812 with ensure_clean() as path: - self.ts.to_csv(path) + self.ts.to_csv(path, header=False) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): @@ -45,10 +45,28 @@ def test_from_csv_deprecation(self): depr_ts = Series.from_csv(path) assert_series_equal(depr_ts, ts) + @pytest.mark.parametrize("arg", ["path", "header", "both"]) + def test_to_csv_deprecation(self, arg): + # see gh-19715 + with ensure_clean() as path: + if arg == "path": + kwargs = dict(path=path, header=False) + elif arg == "header": + kwargs = dict(path_or_buf=path) + else: # Both discrepancies match. + kwargs = dict(path=path) + + with tm.assert_produces_warning(FutureWarning): + self.ts.to_csv(**kwargs) + + # Make sure roundtrip still works. + ts = self.read_csv(path) + assert_series_equal(self.ts, ts, check_names=False) + def test_from_csv(self): with ensure_clean() as path: - self.ts.to_csv(path) + self.ts.to_csv(path, header=False) ts = self.read_csv(path) assert_series_equal(self.ts, ts, check_names=False) @@ -65,7 +83,7 @@ def test_from_csv(self): ts_h = self.read_csv(path, header=0) assert ts_h.name == "ts" - self.series.to_csv(path) + self.series.to_csv(path, header=False) series = self.read_csv(path) assert_series_equal(self.series, series, check_names=False) @@ -92,13 +110,13 @@ def test_to_csv(self): import io with ensure_clean() as path: - self.ts.to_csv(path) + self.ts.to_csv(path, header=False) with io.open(path, newline=None) as f: lines = f.readlines() assert (lines[1] != '\n') - self.ts.to_csv(path, index=False) + self.ts.to_csv(path, index=False, header=False) arr = np.loadtxt(path) assert_almost_equal(arr, self.ts.values) @@ -106,7 +124,7 @@ def test_to_csv_unicode_index(self): buf = StringIO() s = Series([u("\u05d0"), "d2"], index=[u("\u05d0"), u("\u05d1")]) - s.to_csv(buf, encoding="UTF-8") + s.to_csv(buf, encoding="UTF-8", header=False) buf.seek(0) s2 = self.read_csv(buf, index_col=0, encoding="UTF-8") @@ -116,7 +134,7 @@ def test_to_csv_float_format(self): with ensure_clean() as filename: ser = Series([0.123456, 0.234567, 0.567567]) - ser.to_csv(filename, float_format="%.2f") + ser.to_csv(filename, float_format="%.2f", header=False) rs = self.read_csv(filename) xp = Series([0.12, 0.23, 0.57]) @@ -128,14 +146,14 @@ def test_to_csv_list_entries(self): split = s.str.split(r'\s+and\s+') buf = StringIO() - split.to_csv(buf) + split.to_csv(buf, header=False) def test_to_csv_path_is_none(self): # GH 8215 # Series.to_csv() was returning None, inconsistent with # DataFrame.to_csv() which returned string s = Series([1, 2, 3]) - csv_str = s.to_csv(path=None) + csv_str = s.to_csv(path_or_buf=None, header=False) assert isinstance(csv_str, str) @pytest.mark.parametrize('s,encoding', [
- [x] closes #19715 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry <strike>Just a proof of concept for discussion (misses docs, whatsnew, new tests). Based on #21868</strike> @dahlbaek @gfyoung
https://api.github.com/repos/pandas-dev/pandas/pulls/21896
2018-07-13T20:22:08Z
2018-08-13T13:37:53Z
2018-08-13T13:37:53Z
2018-08-13T13:43:01Z
BUG: Fixes unwanted casting in .isin (GH21804)
diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt index a88c22e3d01f7..9c2ef7ecb601c 100644 --- a/doc/source/whatsnew/v0.23.4.txt +++ b/doc/source/whatsnew/v0.23.4.txt @@ -31,7 +31,7 @@ Bug Fixes **Conversion** -- +- Bug where unwanted casting of float to int in :func:`isin` led to incorrect comparison outcome (:issue:`21804`) - **Indexing** diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 6e49e8044ff25..3833917f2e419 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -23,7 +23,7 @@ is_period_dtype, is_numeric_dtype, is_float_dtype, is_bool_dtype, needs_i8_conversion, - is_datetimetz, + is_datetimetz, is_datetime_or_timedelta_dtype, is_datetime64_any_dtype, is_datetime64tz_dtype, is_timedelta64_dtype, is_datetimelike, is_interval_dtype, is_scalar, is_list_like, @@ -39,6 +39,8 @@ from pandas.util._decorators import (Appender, Substitution, deprecate_kwarg) +from pandas._libs.tslibs.timestamps import Timestamp + _shared_docs = {} @@ -415,33 +417,40 @@ def isin(comps, values): comps = com._values_from_object(comps) comps, dtype, _ = _ensure_data(comps) - values, _, _ = _ensure_data(values, dtype=dtype) + + is_time_like = lambda x: (is_datetime_or_timedelta_dtype(x) + or isinstance(x, Timestamp)) + + is_int = lambda x: ((x == np.int64) or (x == int)) + is_float = lambda x: ((x == np.float64) or (x == float)) + + if is_time_like(dtype): + values, _, _ = _ensure_data(values, dtype=dtype) + else: + values, _, _ = _ensure_data(values) + + comps_types = set(type(v) for v in comps) + values_types = set(type(v) for v in values) # faster for larger cases to use np.in1d - f = lambda x, y: htable.ismember_object(x, values) + f = lambda x, y: htable.ismember_object(x.astype(object), y.astype(object)) # GH16012 # Ensure np.in1d doesn't get object types or it *may* throw an exception if len(comps) > 1000000 and not is_object_dtype(comps): f = lambda x, y: np.in1d(x, y) - elif is_integer_dtype(comps): - try: + elif len(comps_types) == len(values_types) == 1: + comps_types = comps_types.pop() + values_types = values_types.pop() + if (is_int(comps_types) and is_int(values_types)): values = values.astype('int64', copy=False) comps = comps.astype('int64', copy=False) f = lambda x, y: htable.ismember_int64(x, y) - except (TypeError, ValueError): - values = values.astype(object) - comps = comps.astype(object) - - elif is_float_dtype(comps): - try: + elif (is_float(comps_types) and is_float(values_types)): values = values.astype('float64', copy=False) comps = comps.astype('float64', copy=False) checknull = isna(values).any() f = lambda x, y: htable.ismember_float64(x, y, checknull) - except (TypeError, ValueError): - values = values.astype(object) - comps = comps.astype(object) return f(comps, values) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 25e64aa82cc36..8ab907a9723bd 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -509,42 +509,23 @@ def test_invalid(self): pytest.raises(TypeError, lambda: algos.isin(1, [1])) pytest.raises(TypeError, lambda: algos.isin([1], 1)) - def test_basic(self): - - result = algos.isin([1, 2], [1]) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(np.array([1, 2]), [1]) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(Series([1, 2]), [1]) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(Series([1, 2]), Series([1])) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(Series([1, 2]), set([1])) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(['a', 'b'], ['a']) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(Series(['a', 'b']), Series(['a'])) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(Series(['a', 'b']), set(['a'])) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(['a', 'b'], [1]) - expected = np.array([False, False]) + @pytest.mark.parametrize("comps,values,expected", [ + ([1, 2], [1], [True, False]), + ([1, 0], [1, 0.5], [True, False]), + ([1.0, 0], [1, 0.5], [True, False]), + ([1.0, 0.0], [1, 0], [True, True]), + (np.array([1, 2]), [1], [True, False]), + (Series([1, 2]), [1], [True, False]), + (Series([1, 2]), Series([1]), [True, False]), + (Series([1, 2]), set([1]), [True, False]), + (['a', 'b'], ['a'], [True, False]), + (Series(['a', 'b']), Series(['a']), [True, False]), + (Series(['a', 'b']), set(['a']), [True, False]), + (['a', 'b'], [1], [False, False]) + ]) + def test_basic(self, comps, values, expected): + result = algos.isin(comps, values) + expected = np.array(expected) tm.assert_numpy_array_equal(result, expected) def test_i8(self):
- [x] closes #21804 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Results from running asvs on algorithms are: ``` before after ratio [365eac4d] [ee66578f] + 1.91±0ms 2.28±0.02ms 1.20 algorithms.Hashing.time_series_timedeltas - 2.34±0.02ms 1.88±0ms 0.80 algorithms.Hashing.time_series_int SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21893
2018-07-13T13:49:03Z
2018-11-23T03:31:06Z
null
2018-11-25T05:50:19Z
Multi line comment
diff --git a/doc/make.py b/doc/make.py index 4d54a2415a194..c0fc74cb10eaa 100755 --- a/doc/make.py +++ b/doc/make.py @@ -165,8 +165,8 @@ def _copy_generated_docstring(self): if os.path.exists(fname): try: - # copying to make sure sphinx always thinks it is new - # and needs to be re-generated (to pick source code changes) + """copying to make sure sphinx always thinks it is new + and needs to be re-generated (to pick source code changes)""" shutil.copy(fname, temp_dir) except: # noqa pass
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21889
2018-07-13T06:46:06Z
2018-07-13T09:29:44Z
null
2023-05-11T01:18:05Z
Multi line comment
diff --git a/doc/make.py b/doc/make.py index 4d54a2415a194..3a737feaa7197 100755 --- a/doc/make.py +++ b/doc/make.py @@ -355,10 +355,10 @@ def main(): raise ValueError('Unknown command {}. Available options: {}'.format( args.command, ', '.join(cmds))) - # Below we update both os.environ and sys.path. The former is used by - # external libraries (namely Sphinx) to compile this module and resolve - # the import of `python_path` correctly. The latter is used to resolve - # the import within the module, injecting it into the global namespace + """Below we update both os.environ and sys.path. The former is used by + external libraries (namely Sphinx) to compile this module and resolve + the import of `python_path` correctly. The latter is used to resolve + the import within the module, injecting it into the global namespace""" os.environ['PYTHONPATH'] = args.python_path sys.path.append(args.python_path) globals()['pandas'] = importlib.import_module('pandas')
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21888
2018-07-13T06:36:54Z
2018-07-13T09:31:22Z
null
2023-05-11T01:18:05Z
BUG: Fixes unwanted casting in .isin (GH21804)
diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index e530ece2e12c5..19d745121ce17 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -924,55 +924,6 @@ bins, with ``NaN`` representing a missing value similar to other dtypes. pd.cut([0, 3, 5, 1], bins=c.categories) - -Generating Ranges of Intervals -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -If we need intervals on a regular frequency, we can use the :func:`interval_range` function -to create an ``IntervalIndex`` using various combinations of ``start``, ``end``, and ``periods``. -The default frequency for ``interval_range`` is a 1 for numeric intervals, and calendar day for -datetime-like intervals: - -.. ipython:: python - - pd.interval_range(start=0, end=5) - - pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4) - - pd.interval_range(end=pd.Timedelta('3 days'), periods=3) - -The ``freq`` parameter can used to specify non-default frequencies, and can utilize a variety -of :ref:`frequency aliases <timeseries.offset_aliases>` with datetime-like intervals: - -.. ipython:: python - - pd.interval_range(start=0, periods=5, freq=1.5) - - pd.interval_range(start=pd.Timestamp('2017-01-01'), periods=4, freq='W') - - pd.interval_range(start=pd.Timedelta('0 days'), periods=3, freq='9H') - -Additionally, the ``closed`` parameter can be used to specify which side(s) the intervals -are closed on. Intervals are closed on the right side by default. - -.. ipython:: python - - pd.interval_range(start=0, end=4, closed='both') - - pd.interval_range(start=0, end=4, closed='neither') - -.. versionadded:: 0.23.0 - -Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced -intervals from ``start`` to ``end`` inclusively, with ``periods`` number of elements -in the resulting ``IntervalIndex``: - -.. ipython:: python - - pd.interval_range(start=0, end=6, periods=4) - - pd.interval_range(pd.Timestamp('2018-01-01'), pd.Timestamp('2018-02-28'), periods=3) - Miscellaneous indexing FAQ -------------------------- diff --git a/doc/source/api.rst b/doc/source/api.rst index fff944651588e..f3b9529f841a8 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1459,6 +1459,7 @@ Modifying and Computations Index.is_floating Index.is_integer Index.is_interval + Index.is_lexsorted_for_tuple Index.is_mixed Index.is_numeric Index.is_object @@ -1470,19 +1471,11 @@ Modifying and Computations Index.where Index.take Index.putmask + Index.set_names Index.unique Index.nunique Index.value_counts -Compatibility with MultiIndex -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. autosummary:: - :toctree: generated/ - - Index.set_names - Index.is_lexsorted_for_tuple - Index.droplevel - Missing Values ~~~~~~~~~~~~~~ .. autosummary:: diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst index e602e45784f4a..d07d5dc2066dd 100644 --- a/doc/source/timedeltas.rst +++ b/doc/source/timedeltas.rst @@ -352,8 +352,8 @@ You can convert a ``Timedelta`` to an `ISO 8601 Duration`_ string with the TimedeltaIndex -------------- -To generate an index with time delta, you can use either the :class:`TimedeltaIndex` or -the :func:`timedelta_range` constructor. +To generate an index with time delta, you can use either the ``TimedeltaIndex`` or +the ``timedelta_range`` constructor. Using ``TimedeltaIndex`` you can pass string-like, ``Timedelta``, ``timedelta``, or ``np.timedelta64`` objects. Passing ``np.nan/pd.NaT/nat`` will represent missing values. @@ -394,23 +394,9 @@ The ``freq`` parameter can passed a variety of :ref:`frequency aliases <timeseri .. ipython:: python + pd.timedelta_range(start='1 days', periods=5, freq='D') pd.timedelta_range(start='1 days', end='2 days', freq='30T') - pd.timedelta_range(start='1 days', periods=5, freq='2D5H') - - -.. versionadded:: 0.23.0 - -Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced -timedeltas from ``start`` to ``end`` inclusively, with ``periods`` number of elements -in the resulting ``TimedeltaIndex``: - -.. ipython:: python - - pd.timedelta_range('0 days', '4 days', periods=5) - - pd.timedelta_range('0 days', '4 days', periods=10) - Using the TimedeltaIndex ~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 9e01296d9c9c7..15d01fa869f4c 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -406,18 +406,6 @@ of those specified will not be generated: pd.bdate_range(start=start, periods=20) -.. versionadded:: 0.23.0 - -Specifying ``start``, ``end``, and ``periods`` will generate a range of evenly spaced -dates from ``start`` to ``end`` inclusively, with ``periods`` number of elements in the -resulting ``DatetimeIndex``: - -.. ipython:: python - - pd.date_range('2018-01-01', '2018-01-05', periods=5) - - pd.date_range('2018-01-01', '2018-01-05', periods=10) - .. _timeseries.custom-freq-ranges: Custom Frequency Ranges diff --git a/doc/source/whatsnew/v0.23.1.txt b/doc/source/whatsnew/v0.23.1.txt index cf60e86553fe3..7447c0abd5595 100644 --- a/doc/source/whatsnew/v0.23.1.txt +++ b/doc/source/whatsnew/v0.23.1.txt @@ -88,7 +88,12 @@ Performance Improvements Bug Fixes ~~~~~~~~~ +<<<<<<< HEAD +Groupby/Resample/Rolling +^^^^^^^^^^^^^^^^^^^^^^^^ +======= **Groupby/Resample/Rolling** +>>>>>>> upstream/master - Bug in :func:`DataFrame.agg` where applying multiple aggregation functions to a :class:`DataFrame` with duplicated column names would cause a stack overflow (:issue:`21063`) - Bug in :func:`pandas.core.groupby.GroupBy.ffill` and :func:`pandas.core.groupby.GroupBy.bfill` where the fill within a grouping would not always be applied as intended due to the implementations' use of a non-stable sort (:issue:`21207`) diff --git a/doc/source/whatsnew/v0.23.4.txt b/doc/source/whatsnew/v0.23.4.txt index a88c22e3d01f7..2dd9e44499d68 100644 --- a/doc/source/whatsnew/v0.23.4.txt +++ b/doc/source/whatsnew/v0.23.4.txt @@ -31,7 +31,7 @@ Bug Fixes **Conversion** -- +- Unwanted casting of float to int in :func:`isin` (:issue:`21804`) - **Indexing** diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index b9405b15a0980..5831c94876ce4 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -241,22 +241,22 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1: if unit == 'D' or unit == 'd': m = 1000000000L * 86400 - p = 9 + p = 6 elif unit == 'h': m = 1000000000L * 3600 - p = 9 + p = 6 elif unit == 'm': m = 1000000000L * 60 - p = 9 + p = 6 elif unit == 's': m = 1000000000L - p = 9 + p = 6 elif unit == 'ms': m = 1000000L - p = 6 + p = 3 elif unit == 'us': m = 1000L - p = 3 + p = 0 elif unit == 'ns' or unit is None: m = 1L p = 0 @@ -270,10 +270,10 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1: # cast the unit, multiply base/frace separately # to avoid precision issues from float -> int base = <int64_t> ts - frac = ts - base + frac = ts -base if p: frac = round(frac, p) - return <int64_t> (base * m) + <int64_t> (frac * m) + return <int64_t> (base *m) + <int64_t> (frac *m) cdef inline _decode_if_necessary(object ts): @@ -799,32 +799,7 @@ cdef class _Timedelta(timedelta): @property def delta(self): - """ - Return the timedelta in nanoseconds (ns), for internal compatibility. - - Returns - ------- - int - Timedelta in nanoseconds. - - Examples - -------- - >>> td = pd.Timedelta('1 days 42 ns') - >>> td.delta - 86400000000042 - - >>> td = pd.Timedelta('3 s') - >>> td.delta - 3000000000 - - >>> td = pd.Timedelta('3 ms 5 us') - >>> td.delta - 3005000 - - >>> td = pd.Timedelta(42, unit='ns') - >>> td.delta - 42 - """ + """ return out delta in ns (for internal compat) """ return self.value @property diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index 7a853d575aa69..c638b9e4ea117 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -12,8 +12,7 @@ class DirNamesMixin(object): _accessors = frozenset([]) - _deprecations = frozenset( - ['asobject', 'base', 'data', 'flags', 'itemsize', 'strides']) + _deprecations = frozenset(['asobject']) def _dir_deletions(self): """ delete unwanted __dir__ for this object """ diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 6e49e8044ff25..989fdd3098f68 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -23,7 +23,7 @@ is_period_dtype, is_numeric_dtype, is_float_dtype, is_bool_dtype, needs_i8_conversion, - is_datetimetz, + is_datetimetz, is_datetime_or_timedelta_dtype, is_datetime64_any_dtype, is_datetime64tz_dtype, is_timedelta64_dtype, is_datetimelike, is_interval_dtype, is_scalar, is_list_like, @@ -39,6 +39,8 @@ from pandas.util._decorators import (Appender, Substitution, deprecate_kwarg) +from pandas._libs.tslibs.timestamps import Timestamp + _shared_docs = {} @@ -413,35 +415,41 @@ def isin(comps, values): return comps._values.isin(values) comps = com._values_from_object(comps) + comps, dtype_comps, _ = _ensure_data(comps) - comps, dtype, _ = _ensure_data(comps) - values, _, _ = _ensure_data(values, dtype=dtype) + is_time_like = lambda x: (is_datetime_or_timedelta_dtype(x) + or isinstance(x, Timestamp)) - # faster for larger cases to use np.in1d - f = lambda x, y: htable.ismember_object(x, values) + is_int = lambda x: ((x == np.int64) or (x == int)) + + is_float = lambda x: ((x == np.float64) or (x == float)) + + f = lambda x, y: htable.ismember_object(x.astype(object), y.astype(object)) # GH16012 # Ensure np.in1d doesn't get object types or it *may* throw an exception + # faster for larger cases to use np.in1d if len(comps) > 1000000 and not is_object_dtype(comps): f = lambda x, y: np.in1d(x, y) - elif is_integer_dtype(comps): - try: - values = values.astype('int64', copy=False) - comps = comps.astype('int64', copy=False) - f = lambda x, y: htable.ismember_int64(x, y) - except (TypeError, ValueError): - values = values.astype(object) - comps = comps.astype(object) - elif is_float_dtype(comps): - try: - values = values.astype('float64', copy=False) - comps = comps.astype('float64', copy=False) - checknull = isna(values).any() - f = lambda x, y: htable.ismember_float64(x, y, checknull) - except (TypeError, ValueError): - values = values.astype(object) - comps = comps.astype(object) + if is_time_like(dtype_comps): + values, _, _ = _ensure_data(values, dtype=dtype_comps) + else: + values, dtype_values, _ = _ensure_data(values) + comps_types = set([type(v) for v in comps]) + values_types = set([type(v) for v in values]) + if len(comps_types) == len(values_types) == 1: + comps_types = comps_types.pop() + values_types = values_types.pop() + if (is_int(comps_types) and is_int(values_types)): + values = values.astype('int64', copy=False) + comps = comps.astype('int64', copy=False) + f = lambda x, y: htable.ismember_int64(x, y) + elif (is_float(comps_types) and is_float(values_types)): + values = values.astype('float64', copy=False) + comps = comps.astype('float64', copy=False) + checknull = isna(values).any() + f = lambda x, y: htable.ismember_float64(x, y, checknull) return f(comps, values) diff --git a/pandas/core/base.py b/pandas/core/base.py index 1226662824eb5..6ec2c2ce1e043 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -114,7 +114,7 @@ def _reset_cache(self, key=None): def __sizeof__(self): """ - Generates the total memory usage for an object that returns + Generates the total memory usage for a object that returns either a value or Series of values """ if hasattr(self, 'memory_usage'): diff --git a/pandas/core/common.py b/pandas/core/common.py index 0a33873630d27..970c91913de4d 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -55,11 +55,8 @@ def flatten(l): def _consensus_name_attr(objs): name = objs[0].name for obj in objs[1:]: - try: - if obj.name != name: - name = None - except ValueError: - name = None + if obj.name != name: + return None return name diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6380944338010..b2ce8fd393dc1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4193,8 +4193,9 @@ def _maybe_casted_values(index, labels=None): if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] - if len(level) < self.index.nlevels: - new_index = self.index.droplevel(level) + if isinstance(self.index, MultiIndex): + if len(level) < self.index.nlevels: + new_index = self.index.droplevel(level) if not drop: if isinstance(self.index, MultiIndex): @@ -4554,10 +4555,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, axis = self._get_axis_number(axis) labels = self._get_axis(axis) - # make sure that the axis is lexsorted to start - # if not we need to reconstruct to get the correct indexer - labels = labels._sort_levels_monotonic() - if level is not None: + if level: new_axis, indexer = labels.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) @@ -4565,6 +4563,9 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, elif isinstance(labels, MultiIndex): from pandas.core.sorting import lexsort_indexer + # make sure that the axis is lexsorted to start + # if not we need to reconstruct to get the correct indexer + labels = labels._sort_levels_monotonic() indexer = lexsort_indexer(labels._get_labels_for_sorting(), orders=ascending, na_position=na_position) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 78fa6f8217157..28b29d87109da 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3134,60 +3134,6 @@ def _get_level_values(self, level): get_level_values = _get_level_values - def droplevel(self, level=0): - """ - Return index with requested level(s) removed. If resulting index has - only 1 level left, the result will be of Index type, not MultiIndex. - - .. versionadded:: 0.23.1 (support for non-MultiIndex) - - Parameters - ---------- - level : int, str, or list-like, default 0 - If a string is given, must be the name of a level - If list-like, elements must be names or indexes of levels. - - Returns - ------- - index : Index or MultiIndex - """ - if not isinstance(level, (tuple, list)): - level = [level] - - levnums = sorted(self._get_level_number(lev) for lev in level)[::-1] - - if len(level) == 0: - return self - if len(level) >= self.nlevels: - raise ValueError("Cannot remove {} levels from an index with {} " - "levels: at least one level must be " - "left.".format(len(level), self.nlevels)) - # The two checks above guarantee that here self is a MultiIndex - - new_levels = list(self.levels) - new_labels = list(self.labels) - new_names = list(self.names) - - for i in levnums: - new_levels.pop(i) - new_labels.pop(i) - new_names.pop(i) - - if len(new_levels) == 1: - - # set nan if needed - mask = new_labels[0] == -1 - result = new_levels[0].take(new_labels[0]) - if mask.any(): - result = result.putmask(mask, np.nan) - - result.name = new_names[0] - return result - else: - from .multi import MultiIndex - return MultiIndex(levels=new_levels, labels=new_labels, - names=new_names, verify_integrity=False) - _index_shared_docs['get_indexer'] = """ Compute indexer and mask for new index given the current index. The indexer should be then used as an input to ndarray.take to align the diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index a791ce1d87264..8c49c0e58bf4a 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1739,6 +1739,52 @@ def _drop_from_level(self, labels, level): return self[mask] + def droplevel(self, level=0): + """ + Return Index with requested level removed. If MultiIndex has only 2 + levels, the result will be of Index type not MultiIndex. + + Parameters + ---------- + level : int/level name or list thereof + + Notes + ----- + Does not check if result index is unique or not + + Returns + ------- + index : Index or MultiIndex + """ + levels = level + if not isinstance(levels, (tuple, list)): + levels = [level] + + new_levels = list(self.levels) + new_labels = list(self.labels) + new_names = list(self.names) + + levnums = sorted(self._get_level_number(lev) for lev in levels)[::-1] + + for i in levnums: + new_levels.pop(i) + new_labels.pop(i) + new_names.pop(i) + + if len(new_levels) == 1: + + # set nan if needed + mask = new_labels[0] == -1 + result = new_levels[0].take(new_labels[0]) + if mask.any(): + result = result.putmask(mask, np.nan) + + result.name = new_names[0] + return result + else: + return MultiIndex(levels=new_levels, labels=new_labels, + names=new_names, verify_integrity=False) + def swaplevel(self, i=-2, j=-1): """ Swap level i with level j. diff --git a/pandas/core/series.py b/pandas/core/series.py index 0bdb9d9cc23a6..557159bcf3739 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1207,8 +1207,9 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] - if len(level) < self.index.nlevels: - new_index = self.index.droplevel(level) + if isinstance(self.index, MultiIndex): + if len(level) < self.index.nlevels: + new_index = self.index.droplevel(level) if inplace: self.index = new_index @@ -2652,7 +2653,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, axis = self._get_axis_number(axis) index = self.index - if level is not None: + if level: new_index, indexer = index.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining) elif isinstance(index, MultiIndex): @@ -3206,8 +3207,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): # handle ufuncs and lambdas if kwds or args and not isinstance(func, np.ufunc): - def f(x): - return func(x, *args, **kwds) + f = lambda x: func(x, *args, **kwds) else: f = func diff --git a/pandas/core/strings.py b/pandas/core/strings.py index e4765c00f80fd..ac5fa750fb2e6 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -241,7 +241,7 @@ def str_count(arr, pat, flags=0): Escape ``'$'`` to find the literal dollar sign. >>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat']) - >>> s.str.count('\\$') + >>> s.str.count('\$') 0 1 1 0 2 1 @@ -358,7 +358,7 @@ def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True): Returning any digit using regular expression. - >>> s1.str.contains('\\d', regex=True) + >>> s1.str.contains('\d', regex=True) 0 False 1 False 2 False diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 9ca2b7e3c8a6a..81805222b8f01 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -399,13 +399,11 @@ def test_getitem_setitem_ix_negative_integers(self): df = DataFrame(np.random.randn(8, 4)) # ix does label-based indexing when having an integer index - with catch_warnings(record=True): - with pytest.raises(KeyError): - df.ix[[-1]] + with pytest.raises(KeyError): + df.ix[[-1]] - with catch_warnings(record=True): - with pytest.raises(KeyError): - df.ix[:, [-1]] + with pytest.raises(KeyError): + df.ix[:, [-1]] # #1942 a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)]) diff --git a/pandas/tests/frame/test_reshape.py b/pandas/tests/frame/test_reshape.py index ebf6c5e37b916..538a3fb27ffe2 100644 --- a/pandas/tests/frame/test_reshape.py +++ b/pandas/tests/frame/test_reshape.py @@ -871,23 +871,6 @@ def test_stack_preserve_categorical_dtype(self): tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("level", [0, 'baz']) - def test_unstack_swaplevel_sortlevel(self, level): - # GH 20994 - mi = pd.MultiIndex.from_product([[0], ['d', 'c']], - names=['bar', 'baz']) - df = pd.DataFrame([[0, 2], [1, 3]], index=mi, columns=['B', 'A']) - df.columns.name = 'foo' - - expected = pd.DataFrame([ - [3, 1, 2, 0]], columns=pd.MultiIndex.from_tuples([ - ('c', 'A'), ('c', 'B'), ('d', 'A'), ('d', 'B')], names=[ - 'baz', 'foo'])) - expected.index.name = 'bar' - - result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level) - tm.assert_frame_equal(result, expected) - def test_unstack_fill_frame_object(): # GH12815 Test unstacking with object. diff --git a/pandas/tests/frame/test_sorting.py b/pandas/tests/frame/test_sorting.py index 599ae683f914b..b60eb89e87da5 100644 --- a/pandas/tests/frame/test_sorting.py +++ b/pandas/tests/frame/test_sorting.py @@ -550,36 +550,18 @@ def test_sort_index(self): expected = frame.iloc[:, ::-1] assert_frame_equal(result, expected) - @pytest.mark.parametrize("level", ['A', 0]) # GH 21052 - def test_sort_index_multiindex(self, level): + def test_sort_index_multiindex(self): # GH13496 # sort rows by specified level of multi-index - mi = MultiIndex.from_tuples([ - [2, 1, 3], [2, 1, 2], [1, 1, 1]], names=list('ABC')) - df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mi) - - expected_mi = MultiIndex.from_tuples([ - [1, 1, 1], - [2, 1, 2], - [2, 1, 3]], names=list('ABC')) - expected = pd.DataFrame([ - [5, 6], - [3, 4], - [1, 2]], index=expected_mi) - result = df.sort_index(level=level) - assert_frame_equal(result, expected) + mi = MultiIndex.from_tuples([[2, 1, 3], [1, 1, 1]], names=list('ABC')) + df = DataFrame([[1, 2], [3, 4]], mi) - # sort_remaining=False - expected_mi = MultiIndex.from_tuples([ - [1, 1, 1], - [2, 1, 3], - [2, 1, 2]], names=list('ABC')) - expected = pd.DataFrame([ - [5, 6], - [1, 2], - [3, 4]], index=expected_mi) - result = df.sort_index(level=level, sort_remaining=False) + # MI sort, but no level: sort_level has no effect + mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) + df = DataFrame([[1, 2], [3, 4]], mi) + result = df.sort_index(sort_remaining=False) + expected = df.sort_index() assert_frame_equal(result, expected) def test_sort_index_intervalindex(self): diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index fa9f9fc90387a..9271a79fe0131 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -703,14 +703,6 @@ def test_unit_mixed(self, cache): with pytest.raises(ValueError): pd.to_datetime(arr, errors='raise', cache=cache) - @pytest.mark.parametrize('cache', [True, False]) - def test_unit_rounding(self, cache): - # GH 14156: argument will incur floating point errors but no - # premature rounding - result = pd.to_datetime(1434743731.8770001, unit='s', cache=cache) - expected = pd.Timestamp('2015-06-19 19:55:31.877000093') - assert result == expected - @pytest.mark.parametrize('cache', [True, False]) def test_dataframe(self, cache): diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 639e51e9361ab..f1fb30990d8da 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -245,25 +245,6 @@ def test_constructor_int_dtype_nan(self): result = Index(data, dtype='float') tm.assert_index_equal(result, expected) - def test_droplevel(self, indices): - # GH 21115 - if isinstance(indices, MultiIndex): - # Tested separately in test_multi.py - return - - assert indices.droplevel([]).equals(indices) - - for level in indices.name, [indices.name]: - if isinstance(indices.name, tuple) and level is indices.name: - # GH 21121 : droplevel with tuple name - continue - with pytest.raises(ValueError): - indices.droplevel(level) - - for level in 'wrong', ['wrong']: - with pytest.raises(KeyError): - indices.droplevel(level) - @pytest.mark.parametrize("dtype", ['int64', 'uint64']) def test_constructor_int_dtype_nan_raises(self, dtype): # see gh-15187 @@ -2131,17 +2112,6 @@ def test_get_duplicates_deprecated(self): with tm.assert_produces_warning(FutureWarning): index.get_duplicates() - def test_tab_complete_warning(self, ip): - # https://github.com/pandas-dev/pandas/issues/16409 - pytest.importorskip('IPython', minversion="6.0.0") - from IPython.core.completer import provisionalcompleter - - code = "import pandas as pd; idx = pd.Index([1, 2])" - ip.run_code(code) - with tm.assert_produces_warning(None): - with provisionalcompleter('ignore'): - list(ip.Completer.completions('idx.', 4)) - class TestMixedIntIndex(Base): # Mostly the tests from common.py for which the results differ diff --git a/pandas/tests/indexing/test_indexing.py b/pandas/tests/indexing/test_indexing.py index 9c992770fc64c..7a17408d4468f 100644 --- a/pandas/tests/indexing/test_indexing.py +++ b/pandas/tests/indexing/test_indexing.py @@ -197,7 +197,7 @@ def test_dups_fancy_indexing(self): # List containing only missing label dfnu = DataFrame(np.random.randn(5, 3), index=list('AABCD')) with pytest.raises(KeyError): - dfnu.loc[['E']] + dfnu.ix[['E']] # ToDo: check_index_type can be True after GH 11497 diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index 9e871d27f0ce8..17ef03f6a88e6 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -54,21 +54,20 @@ def test_bad_stream_exception(self): # and C engine will raise UnicodeDecodeError instead of # c engine raising ParserError and swallowing exception # that caused read to fail. + handle = open(self.csv_shiftjs, "rb") codec = codecs.lookup("utf-8") utf8 = codecs.lookup('utf-8') - + # stream must be binary UTF8 + stream = codecs.StreamRecoder( + handle, utf8.encode, utf8.decode, codec.streamreader, + codec.streamwriter) if compat.PY3: msg = "'utf-8' codec can't decode byte" else: msg = "'utf8' codec can't decode byte" - - # stream must be binary UTF8 - with open(self.csv_shiftjs, "rb") as handle, codecs.StreamRecoder( - handle, utf8.encode, utf8.decode, codec.streamreader, - codec.streamwriter) as stream: - - with tm.assert_raises_regex(UnicodeDecodeError, msg): - self.read_csv(stream) + with tm.assert_raises_regex(UnicodeDecodeError, msg): + self.read_csv(stream) + stream.close() def test_read_csv(self): if not compat.PY3: diff --git a/pandas/tests/io/parser/compression.py b/pandas/tests/io/parser/compression.py index e4950af19ea95..48b2cedb63811 100644 --- a/pandas/tests/io/parser/compression.py +++ b/pandas/tests/io/parser/compression.py @@ -110,15 +110,16 @@ def test_read_csv_infer_compression(self): # see gh-9770 expected = self.read_csv(self.csv1, index_col=0, parse_dates=True) - with open(self.csv1) as f: - inputs = [self.csv1, self.csv1 + '.gz', - self.csv1 + '.bz2', f] + inputs = [self.csv1, self.csv1 + '.gz', + self.csv1 + '.bz2', open(self.csv1)] - for inp in inputs: - df = self.read_csv(inp, index_col=0, parse_dates=True, - compression='infer') + for f in inputs: + df = self.read_csv(f, index_col=0, parse_dates=True, + compression='infer') + + tm.assert_frame_equal(expected, df) - tm.assert_frame_equal(expected, df) + inputs[3].close() def test_read_csv_compressed_utf16_example(self, datapath): # GH18071 diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index c7026e3e0fc88..c1e0f1dc753e8 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -36,18 +36,24 @@ def setup_method(self, datapath): self.xls1 = os.path.join(self.dirpath, 'test.xls') def test_file_handle(self): - with open(self.csv1, 'rb') as f: + try: + f = open(self.csv1, 'rb') reader = TextReader(f) - reader.read() + result = reader.read() # noqa + finally: + f.close() def test_string_filename(self): reader = TextReader(self.csv1, header=None) reader.read() def test_file_handle_mmap(self): - with open(self.csv1, 'rb') as f: + try: + f = open(self.csv1, 'rb') reader = TextReader(f, memory_map=True, header=None) reader.read() + finally: + f.close() def test_StringIO(self): with open(self.csv1, 'rb') as f: diff --git a/pandas/tests/io/sas/test_sas7bdat.py b/pandas/tests/io/sas/test_sas7bdat.py index 101ee3e619f5b..ae40653c28f99 100644 --- a/pandas/tests/io/sas/test_sas7bdat.py +++ b/pandas/tests/io/sas/test_sas7bdat.py @@ -178,8 +178,6 @@ def test_date_time(datapath): fname = datapath("io", "sas", "data", "datetime.csv") df0 = pd.read_csv(fname, parse_dates=['Date1', 'Date2', 'DateTime', 'DateTimeHi', 'Taiw']) - # GH 19732: Timestamps imported from sas will incur floating point errors - df.iloc[:, 3] = df.iloc[:, 3].dt.round('us') tm.assert_frame_equal(df, df0) diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index 412e218f95c6f..491d5fe33cc33 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -129,8 +129,9 @@ def test_string_io(self): with ensure_clean(self.path) as p: s = df.to_msgpack() - with open(p, 'wb') as fh: - fh.write(s) + fh = open(p, 'wb') + fh.write(s) + fh.close() result = read_msgpack(p) tm.assert_frame_equal(result, df) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index d05fd689ed754..0d9f24e01fc57 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -2497,14 +2497,3 @@ def test_concat_aligned_sort_does_not_raise(): columns=[1, 'a']) result = pd.concat([df, df], ignore_index=True, sort=True) tm.assert_frame_equal(result, expected) - - -@pytest.mark.parametrize("s1name,s2name", [ - (np.int64(190), (43, 0)), (190, (43, 0))]) -def test_concat_series_name_npscalar_tuple(s1name, s2name): - # GH21015 - s1 = pd.Series({'a': 1, 'b': 2}, name=s1name) - s2 = pd.Series({'c': 5, 'd': 6}, name=s2name) - result = pd.concat([s1, s2]) - expected = pd.Series({'a': 1, 'b': 2, 'c': 5, 'd': 6}) - tm.assert_series_equal(result, expected) diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 6472bd4245622..466ece500156a 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -106,16 +106,6 @@ def test_compare_timedelta_ndarray(self): class TestTimedeltas(object): - @pytest.mark.parametrize("unit, value, expected", [ - ('us', 9.999, 9999), ('ms', 9.999999, 9999999), - ('s', 9.999999999, 9999999999)]) - def test_rounding_on_int_unit_construction(self, unit, value, expected): - # GH 12690 - result = Timedelta(value, unit=unit) - assert result.value == expected - result = Timedelta(str(value) + unit) - assert result.value == expected - def test_total_seconds_scalar(self): # see gh-10939 rng = Timedelta('1 days, 10:11:12.100123456') diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index 4172bfd41b9db..5b9af7389d630 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -648,51 +648,10 @@ def test_basics_nanos(self): assert stamp.microsecond == 145224 assert stamp.nanosecond == 192 - @pytest.mark.parametrize('value, check_kwargs', [ - [946688461000000000, {}], - [946688461000000000 / long(1000), dict(unit='us')], - [946688461000000000 / long(1000000), dict(unit='ms')], - [946688461000000000 / long(1000000000), dict(unit='s')], - [10957, dict(unit='D', h=0)], - pytest.param((946688461000000000 + 500000) / long(1000000000), - dict(unit='s', us=499, ns=964), - marks=pytest.mark.skipif(not PY3, - reason='using truediv, so these' - ' are like floats')), - pytest.param((946688461000000000 + 500000000) / long(1000000000), - dict(unit='s', us=500000), - marks=pytest.mark.skipif(not PY3, - reason='using truediv, so these' - ' are like floats')), - pytest.param((946688461000000000 + 500000) / long(1000000), - dict(unit='ms', us=500), - marks=pytest.mark.skipif(not PY3, - reason='using truediv, so these' - ' are like floats')), - pytest.param((946688461000000000 + 500000) / long(1000000000), - dict(unit='s'), - marks=pytest.mark.skipif(PY3, - reason='get chopped in py2')), - pytest.param((946688461000000000 + 500000000) / long(1000000000), - dict(unit='s'), - marks=pytest.mark.skipif(PY3, - reason='get chopped in py2')), - pytest.param((946688461000000000 + 500000) / long(1000000), - dict(unit='ms'), - marks=pytest.mark.skipif(PY3, - reason='get chopped in py2')), - [(946688461000000000 + 500000) / long(1000), dict(unit='us', us=500)], - [(946688461000000000 + 500000000) / long(1000000), - dict(unit='ms', us=500000)], - [946688461000000000 / 1000.0 + 5, dict(unit='us', us=5)], - [946688461000000000 / 1000.0 + 5000, dict(unit='us', us=5000)], - [946688461000000000 / 1000000.0 + 0.5, dict(unit='ms', us=500)], - [946688461000000000 / 1000000.0 + 0.005, dict(unit='ms', us=5, ns=5)], - [946688461000000000 / 1000000000.0 + 0.5, dict(unit='s', us=500000)], - [10957 + 0.5, dict(unit='D', h=12)]]) - def test_unit(self, value, check_kwargs): - def check(value, unit=None, h=1, s=1, us=0, ns=0): - stamp = Timestamp(value, unit=unit) + def test_unit(self): + + def check(val, unit=None, h=1, s=1, us=0): + stamp = Timestamp(val, unit=unit) assert stamp.year == 2000 assert stamp.month == 1 assert stamp.day == 1 @@ -705,9 +664,41 @@ def check(value, unit=None, h=1, s=1, us=0, ns=0): assert stamp.minute == 0 assert stamp.second == 0 assert stamp.microsecond == 0 - assert stamp.nanosecond == ns + assert stamp.nanosecond == 0 + + ts = Timestamp('20000101 01:01:01') + val = ts.value + days = (ts - Timestamp('1970-01-01')).days + + check(val) + check(val / long(1000), unit='us') + check(val / long(1000000), unit='ms') + check(val / long(1000000000), unit='s') + check(days, unit='D', h=0) - check(value, **check_kwargs) + # using truediv, so these are like floats + if PY3: + check((val + 500000) / long(1000000000), unit='s', us=500) + check((val + 500000000) / long(1000000000), unit='s', us=500000) + check((val + 500000) / long(1000000), unit='ms', us=500) + + # get chopped in py2 + else: + check((val + 500000) / long(1000000000), unit='s') + check((val + 500000000) / long(1000000000), unit='s') + check((val + 500000) / long(1000000), unit='ms') + + # ok + check((val + 500000) / long(1000), unit='us', us=500) + check((val + 500000000) / long(1000000), unit='ms', us=500000) + + # floats + check(val / 1000.0 + 5, unit='us', us=5) + check(val / 1000.0 + 5000, unit='us', us=5000) + check(val / 1000000.0 + 0.5, unit='ms', us=500) + check(val / 1000000.0 + 0.005, unit='ms', us=5) + check(val / 1000000000.0 + 0.5, unit='s', us=500000) + check(days + 0.5, unit='D', h=12) def test_roundtrip(self): diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 814d794d45c18..90f37053ce17e 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -76,8 +76,9 @@ def test_from_csv(self): series_h = self.read_csv(path, header=0) assert series_h.name == "series" - with open(path, "w") as outfile: - outfile.write("1998-01-01|1.0\n1999-01-01|2.0") + outfile = open(path, "w") + outfile.write("1998-01-01|1.0\n1999-01-01|2.0") + outfile.close() series = self.read_csv(path, sep="|") check_series = Series({datetime(1998, 1, 1): 1.0, diff --git a/pandas/tests/series/test_sorting.py b/pandas/tests/series/test_sorting.py index 13e0d1b12c372..01b4ea6eaa238 100644 --- a/pandas/tests/series/test_sorting.py +++ b/pandas/tests/series/test_sorting.py @@ -141,20 +141,19 @@ def test_sort_index_inplace(self): assert result is None tm.assert_series_equal(random_order, self.ts) - @pytest.mark.parametrize("level", ['A', 0]) # GH 21052 - def test_sort_index_multiindex(self, level): + def test_sort_index_multiindex(self): mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC')) s = Series([1, 2], mi) backwards = s.iloc[[1, 0]] # implicit sort_remaining=True - res = s.sort_index(level=level) + res = s.sort_index(level='A') assert_series_equal(backwards, res) # GH13496 - # sort has no effect without remaining lvls - res = s.sort_index(level=level, sort_remaining=False) + # rows share same level='A': sort has no effect without remaining lvls + res = s.sort_index(level='A', sort_remaining=False) assert_series_equal(s, res) def test_sort_index_kind(self): diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 25e64aa82cc36..8ab907a9723bd 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -509,42 +509,23 @@ def test_invalid(self): pytest.raises(TypeError, lambda: algos.isin(1, [1])) pytest.raises(TypeError, lambda: algos.isin([1], 1)) - def test_basic(self): - - result = algos.isin([1, 2], [1]) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(np.array([1, 2]), [1]) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(Series([1, 2]), [1]) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(Series([1, 2]), Series([1])) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(Series([1, 2]), set([1])) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(['a', 'b'], ['a']) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(Series(['a', 'b']), Series(['a'])) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(Series(['a', 'b']), set(['a'])) - expected = np.array([True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = algos.isin(['a', 'b'], [1]) - expected = np.array([False, False]) + @pytest.mark.parametrize("comps,values,expected", [ + ([1, 2], [1], [True, False]), + ([1, 0], [1, 0.5], [True, False]), + ([1.0, 0], [1, 0.5], [True, False]), + ([1.0, 0.0], [1, 0], [True, True]), + (np.array([1, 2]), [1], [True, False]), + (Series([1, 2]), [1], [True, False]), + (Series([1, 2]), Series([1]), [True, False]), + (Series([1, 2]), set([1]), [True, False]), + (['a', 'b'], ['a'], [True, False]), + (Series(['a', 'b']), Series(['a']), [True, False]), + (Series(['a', 'b']), set(['a']), [True, False]), + (['a', 'b'], [1], [False, False]) + ]) + def test_basic(self, comps, values, expected): + result = algos.isin(comps, values) + expected = np.array(expected) tm.assert_numpy_array_equal(result, expected) def test_i8(self): diff --git a/pandas/tests/util/test_testing.py b/pandas/tests/util/test_testing.py index 95ea4658212e9..c0e8b8b627686 100644 --- a/pandas/tests/util/test_testing.py +++ b/pandas/tests/util/test_testing.py @@ -504,25 +504,6 @@ def test_index_equal_metadata_message(self): with tm.assert_raises_regex(AssertionError, expected): assert_index_equal(idx1, idx2) - def test_categorical_index_equality(self): - expected = """Index are different - -Attribute "dtype" are different -\\[left\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b'\\], ordered=False\\) -\\[right\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b', u?'c'\\], \ -ordered=False\\)""" - - with tm.assert_raises_regex(AssertionError, expected): - assert_index_equal(pd.Index(pd.Categorical(['a', 'b'])), - pd.Index(pd.Categorical(['a', 'b'], - categories=['a', 'b', 'c']))) - - def test_categorical_index_equality_relax_categories_check(self): - assert_index_equal(pd.Index(pd.Categorical(['a', 'b'])), - pd.Index(pd.Categorical(['a', 'b'], - categories=['a', 'b', 'c'])), - check_categorical=False) - class TestAssertSeriesEqual(object): @@ -620,25 +601,6 @@ def test_series_equal_message(self): assert_series_equal(pd.Series([1, 2, 3]), pd.Series([1, 2, 4]), check_less_precise=True) - def test_categorical_series_equality(self): - expected = """Attributes are different - -Attribute "dtype" are different -\\[left\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b'\\], ordered=False\\) -\\[right\\]: CategoricalDtype\\(categories=\\[u?'a', u?'b', u?'c'\\], \ -ordered=False\\)""" - - with tm.assert_raises_regex(AssertionError, expected): - assert_series_equal(pd.Series(pd.Categorical(['a', 'b'])), - pd.Series(pd.Categorical(['a', 'b'], - categories=['a', 'b', 'c']))) - - def test_categorical_series_equality_relax_categories_check(self): - assert_series_equal(pd.Series(pd.Categorical(['a', 'b'])), - pd.Series(pd.Categorical(['a', 'b'], - categories=['a', 'b', 'c'])), - check_categorical=False) - class TestAssertFrameEqual(object): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 54ae8cfb3d39e..85fc1b16c73fa 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -809,12 +809,8 @@ def assert_index_equal(left, right, exact='equiv', check_names=True, def _check_types(l, r, obj='Index'): if exact: - assert_class_equal(l, r, exact=exact, obj=obj) - - # Skip exact dtype checking when `check_categorical` is False - if check_categorical: - assert_attr_equal('dtype', l, r, obj=obj) - + assert_class_equal(left, right, exact=exact, obj=obj) + assert_attr_equal('dtype', l, r, obj=obj) # allow string-like to have different inferred_types if l.inferred_type in ('string', 'unicode'): assert r.inferred_type in ('string', 'unicode') @@ -864,8 +860,7 @@ def _get_ilevel_values(index, level): # get_level_values may change dtype _check_types(left.levels[level], right.levels[level], obj=obj) - # skip exact index checking when `check_categorical` is False - if check_exact and check_categorical: + if check_exact: if not left.equals(right): diff = np.sum((left.values != right.values) .astype(int)) * 100.0 / len(left) @@ -986,23 +981,23 @@ def is_sorted(seq): def assert_categorical_equal(left, right, check_dtype=True, - check_category_order=True, obj='Categorical'): + obj='Categorical', check_category_order=True): """Test that Categoricals are equivalent. Parameters ---------- - left : Categorical - right : Categorical + left, right : Categorical + Categoricals to compare check_dtype : bool, default True Check that integer dtype of the codes are the same + obj : str, default 'Categorical' + Specify object name being compared, internally used to show appropriate + assertion message check_category_order : bool, default True Whether the order of the categories should be compared, which implies identical integer codes. If False, only the resulting values are compared. The ordered attribute is checked regardless. - obj : str, default 'Categorical' - Specify object name being compared, internally used to show appropriate - assertion message """ _check_isinstance(left, right, Categorical) @@ -1056,7 +1051,7 @@ def raise_assert_detail(obj, message, left, right, diff=None): def assert_numpy_array_equal(left, right, strict_nan=False, check_dtype=True, err_msg=None, - check_same=None, obj='numpy array'): + obj='numpy array', check_same=None): """ Checks that 'np.ndarray' is equivalent Parameters @@ -1069,11 +1064,11 @@ def assert_numpy_array_equal(left, right, strict_nan=False, check dtype if both a and b are np.ndarray err_msg : str, default None If provided, used as assertion message - check_same : None|'copy'|'same', default None - Ensure left and right refer/do not refer to the same memory area obj : str, default 'numpy array' Specify object name being compared, internally used to show appropriate assertion message + check_same : None|'copy'|'same', default None + Ensure left and right refer/do not refer to the same memory area """ # instance validation
- [x] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry ASV output for algorithms is as follows: ```python before after ratio [5380fcd4] [99ac16f4] + 1.93±0.01ms 2.38±0.01ms 1.23 algorithms.Hashing.time_series_float SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. ```
https://api.github.com/repos/pandas-dev/pandas/pulls/21887
2018-07-13T03:57:39Z
2018-07-13T04:31:06Z
null
2018-07-13T09:41:59Z
use npy_datetimestruct instead of pandas_datetimestruct
diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c index 11e1e6522ef3b..1ad8c780ba7a4 100644 --- a/pandas/_libs/src/datetime/np_datetime.c +++ b/pandas/_libs/src/datetime/np_datetime.c @@ -28,9 +28,9 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #define PyInt_AsLong PyLong_AsLong #endif -const pandas_datetimestruct _NS_MIN_DTS = { +const npy_datetimestruct _NS_MIN_DTS = { 1677, 9, 21, 0, 12, 43, 145225, 0, 0}; -const pandas_datetimestruct _NS_MAX_DTS = { +const npy_datetimestruct _NS_MAX_DTS = { 2262, 4, 11, 23, 47, 16, 854775, 807000, 0}; @@ -62,7 +62,7 @@ int dayofweek(int y, int m, int d) { * Adjusts a datetimestruct based on a minutes offset. Assumes * the current values are valid.g */ -void add_minutes_to_datetimestruct(pandas_datetimestruct *dts, int minutes) { +void add_minutes_to_datetimestruct(npy_datetimestruct *dts, int minutes) { int isleap; /* MINUTES */ @@ -111,7 +111,7 @@ void add_minutes_to_datetimestruct(pandas_datetimestruct *dts, int minutes) { /* * Calculates the days offset from the 1970 epoch. */ -npy_int64 get_datetimestruct_days(const pandas_datetimestruct *dts) { +npy_int64 get_datetimestruct_days(const npy_datetimestruct *dts) { int i, month; npy_int64 year, days = 0; const int *month_lengths; @@ -211,7 +211,7 @@ static npy_int64 days_to_yearsdays(npy_int64 *days_) { * Adjusts a datetimestruct based on a seconds offset. Assumes * the current values are valid. */ -NPY_NO_EXPORT void add_seconds_to_datetimestruct(pandas_datetimestruct *dts, +NPY_NO_EXPORT void add_seconds_to_datetimestruct(npy_datetimestruct *dts, int seconds) { int minutes; @@ -236,7 +236,7 @@ NPY_NO_EXPORT void add_seconds_to_datetimestruct(pandas_datetimestruct *dts, * offset from 1970. */ static void set_datetimestruct_days(npy_int64 days, - pandas_datetimestruct *dts) { + npy_datetimestruct *dts) { const int *month_lengths; int i; @@ -255,10 +255,10 @@ static void set_datetimestruct_days(npy_int64 days, } /* - * Compares two pandas_datetimestruct objects chronologically + * Compares two npy_datetimestruct objects chronologically */ -int cmp_pandas_datetimestruct(const pandas_datetimestruct *a, - const pandas_datetimestruct *b) { +int cmp_npy_datetimestruct(const npy_datetimestruct *a, + const npy_datetimestruct *b) { if (a->year > b->year) { return 1; } else if (a->year < b->year) { @@ -319,7 +319,7 @@ int cmp_pandas_datetimestruct(const pandas_datetimestruct *a, /* * * Tests for and converts a Python datetime.datetime or datetime.date - * object into a NumPy pandas_datetimestruct. Uses tzinfo (if present) + * object into a NumPy npy_datetimestruct. Uses tzinfo (if present) * to convert to UTC time. * * While the C API has PyDate_* and PyDateTime_* functions, the following @@ -331,12 +331,12 @@ int cmp_pandas_datetimestruct(const pandas_datetimestruct *a, * if obj doesn't have the needed date or datetime attributes. */ int convert_pydatetime_to_datetimestruct(PyObject *obj, - pandas_datetimestruct *out) { + npy_datetimestruct *out) { PyObject *tmp; int isleap; /* Initialize the output to all zeros */ - memset(out, 0, sizeof(pandas_datetimestruct)); + memset(out, 0, sizeof(npy_datetimestruct)); out->month = 1; out->day = 1; @@ -512,8 +512,8 @@ int convert_pydatetime_to_datetimestruct(PyObject *obj, return -1; } -npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, - pandas_datetimestruct *d) { +npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, + npy_datetimestruct *d) { npy_datetime result = NPY_DATETIME_NAT; convert_datetimestruct_to_datetime(fr, d, &result); @@ -521,7 +521,7 @@ npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, } void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, - pandas_datetimestruct *result) { + npy_datetimestruct *result) { convert_datetime_to_datetimestruct(fr, val, result); } @@ -539,7 +539,7 @@ void pandas_timedelta_to_timedeltastruct(npy_timedelta val, * Returns 0 on success, -1 on failure. */ int convert_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, - const pandas_datetimestruct *dts, + const npy_datetimestruct *dts, npy_datetime *out) { npy_datetime ret; @@ -643,11 +643,11 @@ int convert_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, */ int convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, npy_datetime dt, - pandas_datetimestruct *out) { + npy_datetimestruct *out) { npy_int64 perday; /* Initialize the output to all zeros */ - memset(out, 0, sizeof(pandas_datetimestruct)); + memset(out, 0, sizeof(npy_datetimestruct)); out->year = 1970; out->month = 1; out->day = 1; diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h index 5644ac036f198..f5c48036c16f8 100644 --- a/pandas/_libs/src/datetime/np_datetime.h +++ b/pandas/_libs/src/datetime/np_datetime.h @@ -19,30 +19,25 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #include <numpy/ndarraytypes.h> -typedef struct { - npy_int64 year; - npy_int32 month, day, hour, min, sec, us, ps, as; -} pandas_datetimestruct; - typedef struct { npy_int64 days; npy_int32 hrs, min, sec, ms, us, ns, seconds, microseconds, nanoseconds; } pandas_timedeltastruct; -extern const pandas_datetimestruct _NS_MIN_DTS; -extern const pandas_datetimestruct _NS_MAX_DTS; +extern const npy_datetimestruct _NS_MIN_DTS; +extern const npy_datetimestruct _NS_MAX_DTS; // stuff pandas needs // ---------------------------------------------------------------------------- int convert_pydatetime_to_datetimestruct(PyObject *obj, - pandas_datetimestruct *out); + npy_datetimestruct *out); -npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, - pandas_datetimestruct *d); +npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, + npy_datetimestruct *d); void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, - pandas_datetimestruct *result); + npy_datetimestruct *result); void pandas_timedelta_to_timedeltastruct(npy_timedelta val, NPY_DATETIMEUNIT fr, @@ -61,14 +56,14 @@ int is_leapyear(npy_int64 year); * Calculates the days offset from the 1970 epoch. */ npy_int64 -get_datetimestruct_days(const pandas_datetimestruct *dts); +get_datetimestruct_days(const npy_datetimestruct *dts); /* - * Compares two pandas_datetimestruct objects chronologically + * Compares two npy_datetimestruct objects chronologically */ -int cmp_pandas_datetimestruct(const pandas_datetimestruct *a, - const pandas_datetimestruct *b); +int cmp_npy_datetimestruct(const npy_datetimestruct *a, + const npy_datetimestruct *b); /* @@ -76,12 +71,12 @@ int cmp_pandas_datetimestruct(const pandas_datetimestruct *a, * the current values are valid. */ void -add_minutes_to_datetimestruct(pandas_datetimestruct *dts, int minutes); +add_minutes_to_datetimestruct(npy_datetimestruct *dts, int minutes); int convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, npy_datetime dt, - pandas_datetimestruct *out); + npy_datetimestruct *out); #endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_H_ diff --git a/pandas/_libs/src/datetime/np_datetime_strings.c b/pandas/_libs/src/datetime/np_datetime_strings.c index b1852094c301e..fa96cce1756c8 100644 --- a/pandas/_libs/src/datetime/np_datetime_strings.c +++ b/pandas/_libs/src/datetime/np_datetime_strings.c @@ -63,7 +63,7 @@ This file implements string parsing and creation for NumPy datetime. * Returns 0 on success, -1 on failure. */ int parse_iso_8601_datetime(char *str, int len, - pandas_datetimestruct *out, + npy_datetimestruct *out, int *out_local, int *out_tzoffset) { int year_leap = 0; int i, numdigits; @@ -86,7 +86,7 @@ int parse_iso_8601_datetime(char *str, int len, int hour_was_2_digits = 0; /* Initialize the output to all zeros */ - memset(out, 0, sizeof(pandas_datetimestruct)); + memset(out, 0, sizeof(npy_datetimestruct)); out->month = 1; out->day = 1; @@ -567,7 +567,7 @@ int get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base) { /* - * Converts an pandas_datetimestruct to an (almost) ISO 8601 + * Converts an npy_datetimestruct to an (almost) ISO 8601 * NULL-terminated string using timezone Z (UTC). If the string fits in * the space exactly, it leaves out the NULL terminator and returns success. * @@ -580,7 +580,7 @@ int get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base) { * Returns 0 on success, -1 on failure (for example if the output * string was too short). */ -int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, +int make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, NPY_DATETIMEUNIT base) { char *substr = outstr, sublen = outlen; int tmplen; diff --git a/pandas/_libs/src/datetime/np_datetime_strings.h b/pandas/_libs/src/datetime/np_datetime_strings.h index ff1d26e5168b5..821bb79b345bd 100644 --- a/pandas/_libs/src/datetime/np_datetime_strings.h +++ b/pandas/_libs/src/datetime/np_datetime_strings.h @@ -51,9 +51,9 @@ This file implements string parsing and creation for NumPy datetime. */ int parse_iso_8601_datetime(char *str, int len, - pandas_datetimestruct *out, - int *out_local, - int *out_tzoffset); + npy_datetimestruct *out, + int *out_local, + int *out_tzoffset); /* * Provides a string length to use for converting datetime @@ -63,7 +63,7 @@ int get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base); /* - * Converts an pandas_datetimestruct to an (almost) ISO 8601 + * Converts an npy_datetimestruct to an (almost) ISO 8601 * NULL-terminated string using timezone Z (UTC). * * 'base' restricts the output to that unit. Set 'base' to @@ -73,7 +73,7 @@ get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base); * string was too short). */ int -make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, +make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, int outlen, NPY_DATETIMEUNIT base); #endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_ diff --git a/pandas/_libs/src/period_helper.c b/pandas/_libs/src/period_helper.c index 5011d33d189c2..7dab77131c1a0 100644 --- a/pandas/_libs/src/period_helper.c +++ b/pandas/_libs/src/period_helper.c @@ -47,14 +47,14 @@ static int monthToQuarter(int month) { return ((month - 1) / 3) + 1; } * Assumes GREGORIAN_CALENDAR */ npy_int64 unix_date_from_ymd(int year, int month, int day) { /* Calculate the absolute date */ - pandas_datetimestruct dts; + npy_datetimestruct dts; npy_int64 unix_date; - memset(&dts, 0, sizeof(pandas_datetimestruct)); + memset(&dts, 0, sizeof(npy_datetimestruct)); dts.year = year; dts.month = month; dts.day = day; - unix_date = pandas_datetimestruct_to_datetime(NPY_FR_D, &dts); + unix_date = npy_datetimestruct_to_datetime(NPY_FR_D, &dts); return unix_date; } @@ -127,7 +127,7 @@ static npy_int64 DtoB_weekday(npy_int64 unix_date) { return floordiv(unix_date + 4, 7) * 5 + mod_compat(unix_date + 4, 7) - 4; } -static npy_int64 DtoB(pandas_datetimestruct *dts, +static npy_int64 DtoB(npy_datetimestruct *dts, int roll_back, npy_int64 unix_date) { int day_of_week = dayofweek(dts->year, dts->month, dts->day); @@ -149,7 +149,7 @@ static npy_int64 DtoB(pandas_datetimestruct *dts, //************ FROM DAILY *************** static npy_int64 asfreq_DTtoA(npy_int64 ordinal, asfreq_info *af_info) { - pandas_datetimestruct dts; + npy_datetimestruct dts; ordinal = downsample_daytime(ordinal, af_info); pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts); if (dts.month > af_info->to_end) { @@ -160,7 +160,7 @@ static npy_int64 asfreq_DTtoA(npy_int64 ordinal, asfreq_info *af_info) { } static int DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year) { - pandas_datetimestruct dts; + npy_datetimestruct dts; int quarter; pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts); @@ -188,7 +188,7 @@ static npy_int64 asfreq_DTtoQ(npy_int64 ordinal, asfreq_info *af_info) { } static npy_int64 asfreq_DTtoM(npy_int64 ordinal, asfreq_info *af_info) { - pandas_datetimestruct dts; + npy_datetimestruct dts; ordinal = downsample_daytime(ordinal, af_info); @@ -203,7 +203,7 @@ static npy_int64 asfreq_DTtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_DTtoB(npy_int64 ordinal, asfreq_info *af_info) { int roll_back; - pandas_datetimestruct dts; + npy_datetimestruct dts; npy_int64 unix_date = downsample_daytime(ordinal, af_info); pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); @@ -262,7 +262,7 @@ static npy_int64 asfreq_WtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_WtoB(npy_int64 ordinal, asfreq_info *af_info) { int roll_back; - pandas_datetimestruct dts; + npy_datetimestruct dts; npy_int64 unix_date = asfreq_WtoDT(ordinal, af_info); pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); @@ -302,7 +302,7 @@ static npy_int64 asfreq_MtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_MtoB(npy_int64 ordinal, asfreq_info *af_info) { int roll_back; - pandas_datetimestruct dts; + npy_datetimestruct dts; npy_int64 unix_date = asfreq_MtoDT(ordinal, af_info); pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); @@ -357,7 +357,7 @@ static npy_int64 asfreq_QtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_QtoB(npy_int64 ordinal, asfreq_info *af_info) { int roll_back; - pandas_datetimestruct dts; + npy_datetimestruct dts; npy_int64 unix_date = asfreq_QtoDT(ordinal, af_info); pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); @@ -414,7 +414,7 @@ static npy_int64 asfreq_AtoW(npy_int64 ordinal, asfreq_info *af_info) { static npy_int64 asfreq_AtoB(npy_int64 ordinal, asfreq_info *af_info) { int roll_back; - pandas_datetimestruct dts; + npy_datetimestruct dts; npy_int64 unix_date = asfreq_AtoDT(ordinal, af_info); pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index eaa4eca44c15b..c9b0143ffc6ca 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -442,7 +442,7 @@ static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, return PyString_AS_STRING(newObj); } -static void *PandasDateTimeStructToJSON(pandas_datetimestruct *dts, +static void *PandasDateTimeStructToJSON(npy_datetimestruct *dts, JSONTypeContext *tc, void *outValue, size_t *_outLen) { NPY_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; @@ -471,14 +471,14 @@ static void *PandasDateTimeStructToJSON(pandas_datetimestruct *dts, } } else { PRINTMARK(); - *((JSINT64 *)outValue) = pandas_datetimestruct_to_datetime(base, dts); + *((JSINT64 *)outValue) = npy_datetimestruct_to_datetime(base, dts); return NULL; } } static void *NpyDateTimeScalarToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - pandas_datetimestruct dts; + npy_datetimestruct dts; PyDatetimeScalarObject *obj = (PyDatetimeScalarObject *)_obj; PRINTMARK(); @@ -489,7 +489,7 @@ static void *NpyDateTimeScalarToJSON(JSOBJ _obj, JSONTypeContext *tc, static void *PyDateTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - pandas_datetimestruct dts; + npy_datetimestruct dts; PyObject *obj = (PyObject *)_obj; PRINTMARK(); @@ -509,7 +509,7 @@ static void *PyDateTimeToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, static void *NpyDatetime64ToJSON(JSOBJ _obj, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - pandas_datetimestruct dts; + npy_datetimestruct dts; PRINTMARK(); pandas_datetime_to_datetimestruct((npy_datetime)GET_TC(tc)->longValue, diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 281e497945c5f..f8ce346b16317 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -21,7 +21,7 @@ PyDateTime_IMPORT from tslibs.np_datetime cimport (check_dts_bounds, - pandas_datetimestruct, + npy_datetimestruct, _string_to_dts, dt64_to_dtstruct, dtstruct_to_dt64, pydatetime_to_dt64, pydate_to_dt64, @@ -58,20 +58,20 @@ cdef bint PY2 = str == bytes cdef inline object create_datetime_from_ts( - int64_t value, pandas_datetimestruct dts, + int64_t value, npy_datetimestruct dts, object tz, object freq): """ convenience routine to construct a datetime.datetime from its parts """ return datetime(dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, tz) cdef inline object create_date_from_ts( - int64_t value, pandas_datetimestruct dts, + int64_t value, npy_datetimestruct dts, object tz, object freq): """ convenience routine to construct a datetime.date from its parts """ return date(dts.year, dts.month, dts.day) cdef inline object create_time_from_ts( - int64_t value, pandas_datetimestruct dts, + int64_t value, npy_datetimestruct dts, object tz, object freq): """ convenience routine to construct a datetime.time from its parts """ return time(dts.hour, dts.min, dts.sec, dts.us) @@ -103,11 +103,11 @@ def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, cdef: Py_ssize_t i, n = len(arr) ndarray[int64_t] trans, deltas - pandas_datetimestruct dts + npy_datetimestruct dts object dt int64_t value ndarray[object] result = np.empty(n, dtype=object) - object (*func_create)(int64_t, pandas_datetimestruct, object, object) + object (*func_create)(int64_t, npy_datetimestruct, object, object) if box == "date": assert (tz is None), "tz should be None when converting to date" @@ -230,7 +230,7 @@ def format_array_from_datetime(ndarray[int64_t] values, object tz=None, bint show_ms = 0, show_us = 0, show_ns = 0, basic_format = 0 ndarray[object] result = np.empty(N, dtype=object) object ts, res - pandas_datetimestruct dts + npy_datetimestruct dts if na_rep is None: na_rep = 'NaT' @@ -454,7 +454,7 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', object val, py_dt ndarray[int64_t] iresult ndarray[object] oresult - pandas_datetimestruct dts + npy_datetimestruct dts bint utc_convert = bool(utc) bint seen_integer = 0 bint seen_string = 0 diff --git a/pandas/_libs/tslibs/conversion.pxd b/pandas/_libs/tslibs/conversion.pxd index 448dbd27e8278..96e4676fe91c0 100644 --- a/pandas/_libs/tslibs/conversion.pxd +++ b/pandas/_libs/tslibs/conversion.pxd @@ -5,12 +5,12 @@ from cpython.datetime cimport datetime, tzinfo from numpy cimport int64_t, int32_t -from np_datetime cimport pandas_datetimestruct +from np_datetime cimport npy_datetimestruct cdef class _TSObject: cdef: - pandas_datetimestruct dts # pandas_datetimestruct + npy_datetimestruct dts # npy_datetimestruct int64_t value # numpy dt64 object tzinfo diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index b948be606645d..d0090852fa5af 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -19,7 +19,7 @@ from cpython.datetime cimport (datetime, tzinfo, PyDateTime_IMPORT from np_datetime cimport (check_dts_bounds, - pandas_datetimestruct, + npy_datetimestruct, pandas_datetime_to_datetimestruct, _string_to_dts, npy_datetime, dt64_to_dtstruct, dtstruct_to_dt64, @@ -60,7 +60,7 @@ cdef inline int64_t get_datetime64_nanos(object val) except? -1: value to nanoseconds if necessary. """ cdef: - pandas_datetimestruct dts + npy_datetimestruct dts NPY_DATETIMEUNIT unit npy_datetime ival @@ -93,7 +93,7 @@ def ensure_datetime64ns(ndarray arr, copy=True): Py_ssize_t i, n = arr.size ndarray[int64_t] ivalues, iresult NPY_DATETIMEUNIT unit - pandas_datetimestruct dts + npy_datetimestruct dts shape = (<object> arr).shape @@ -157,7 +157,7 @@ def datetime_to_datetime64(ndarray[object] values): Py_ssize_t i, n = len(values) object val, inferred_tz = None ndarray[int64_t] iresult - pandas_datetimestruct dts + npy_datetimestruct dts _TSObject _ts result = np.empty(n, dtype='M8[ns]') @@ -203,7 +203,7 @@ cdef inline maybe_datetimelike_to_i8(object val): val : int64 timestamp or original input """ cdef: - pandas_datetimestruct dts + npy_datetimestruct dts try: return val.value except AttributeError: @@ -220,7 +220,7 @@ cdef inline maybe_datetimelike_to_i8(object val): # lightweight C object to hold datetime & int64 pair cdef class _TSObject: # cdef: - # pandas_datetimestruct dts # pandas_datetimestruct + # npy_datetimestruct dts # npy_datetimestruct # int64_t value # numpy dt64 # object tzinfo @@ -682,7 +682,7 @@ cdef inline int64_t _tz_convert_tzlocal_utc(int64_t val, tzinfo tz, result : int64_t """ cdef: - pandas_datetimestruct dts + npy_datetimestruct dts int64_t result, delta datetime dt @@ -730,7 +730,7 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): ndarray[int64_t] trans, deltas Py_ssize_t pos int64_t v, offset, utc_date - pandas_datetimestruct dts + npy_datetimestruct dts ndarray[int64_t] arr # TODO: Is there a lighter-weight way to do this? # See GH#17734 We should always be converting either from UTC or to UTC @@ -784,7 +784,7 @@ def tz_convert(ndarray[int64_t] vals, object tz1, object tz2): Py_ssize_t i, j, pos, n = len(vals) ndarray[Py_ssize_t] posn int64_t v, offset, delta - pandas_datetimestruct dts + npy_datetimestruct dts if len(vals) == 0: return np.array([], dtype=np.int64) @@ -849,7 +849,7 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None, int64_t *tdata int64_t v, left, right ndarray[int64_t] result, result_a, result_b, dst_hours - pandas_datetimestruct dts + npy_datetimestruct dts bint infer_dst = False, is_dst = False, fill = False bint is_coerce = errors == 'coerce', is_raise = errors == 'raise' @@ -1086,7 +1086,7 @@ def normalize_i8_timestamps(ndarray[int64_t] stamps, tz=None): """ cdef: Py_ssize_t i, n = len(stamps) - pandas_datetimestruct dts + npy_datetimestruct dts ndarray[int64_t] result = np.empty(n, dtype=np.int64) if tz is not None: @@ -1125,7 +1125,7 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): Py_ssize_t n = len(stamps) ndarray[int64_t] result = np.empty(n, dtype=np.int64) ndarray[int64_t] trans, deltas, pos - pandas_datetimestruct dts + npy_datetimestruct dts if is_utc(tz): with nogil: @@ -1168,13 +1168,13 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): return result -cdef inline int64_t _normalized_stamp(pandas_datetimestruct *dts) nogil: +cdef inline int64_t _normalized_stamp(npy_datetimestruct *dts) nogil: """ Normalize the given datetimestruct to midnight, then convert to int64_t. Parameters ---------- - *dts : pointer to pandas_datetimestruct + *dts : pointer to npy_datetimestruct Returns ------- @@ -1206,7 +1206,7 @@ def is_date_array_normalized(ndarray[int64_t] stamps, tz=None): cdef: Py_ssize_t i, n = len(stamps) ndarray[int64_t] trans, deltas - pandas_datetimestruct dts + npy_datetimestruct dts int64_t local_val if tz is None or is_utc(tz): diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index ccf67e765e079..a298f521ef853 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -16,7 +16,7 @@ cnp.import_array() from ccalendar import get_locale_names, MONTHS_FULL, DAYS_FULL from ccalendar cimport (get_days_in_month, is_leapyear, dayofweek, get_week_of_year, get_day_of_year) -from np_datetime cimport (pandas_datetimestruct, pandas_timedeltastruct, +from np_datetime cimport (npy_datetimestruct, pandas_timedeltastruct, dt64_to_dtstruct, td64_to_tdstruct) from nattype cimport NPY_NAT @@ -47,7 +47,7 @@ def build_field_sarray(ndarray[int64_t] dtindex): """ cdef: Py_ssize_t i, count = 0 - pandas_datetimestruct dts + npy_datetimestruct dts ndarray[int32_t] years, months, days, hours, minutes, seconds, mus count = len(dtindex) @@ -94,7 +94,7 @@ def get_date_name_field(ndarray[int64_t] dtindex, object field, cdef: Py_ssize_t i, count = 0 ndarray[object] out, names - pandas_datetimestruct dts + npy_datetimestruct dts int dow count = len(dtindex) @@ -150,7 +150,7 @@ def get_start_end_field(ndarray[int64_t] dtindex, object field, ndarray[int8_t] out ndarray[int32_t, ndim=2] _month_offset bint isleap - pandas_datetimestruct dts + npy_datetimestruct dts int mo_off, dom, doy, dow, ldom _month_offset = np.array( @@ -389,7 +389,7 @@ def get_date_field(ndarray[int64_t] dtindex, object field): cdef: Py_ssize_t i, count = 0 ndarray[int32_t] out - pandas_datetimestruct dts + npy_datetimestruct dts count = len(dtindex) out = np.empty(count, dtype='i4') diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd index 7c91c5551dc47..a585259286a58 100644 --- a/pandas/_libs/tslibs/np_datetime.pxd +++ b/pandas/_libs/tslibs/np_datetime.pxd @@ -25,15 +25,11 @@ cdef extern from "numpy/arrayscalars.h": npy_timedelta obval PyArray_DatetimeMetaData obmeta -cdef extern from "../src/datetime/np_datetime.h": - ctypedef struct pandas_datetimestruct: +cdef extern from "numpy/ndarraytypes.h": + ctypedef struct npy_datetimestruct: int64_t year int32_t month, day, hour, min, sec, us, ps, as - ctypedef struct pandas_timedeltastruct: - int64_t days - int32_t hrs, min, sec, ms, us, ns, seconds, microseconds, nanoseconds - ctypedef enum NPY_DATETIMEUNIT: NPY_FR_Y NPY_FR_M @@ -50,27 +46,32 @@ cdef extern from "../src/datetime/np_datetime.h": NPY_FR_fs NPY_FR_as +cdef extern from "../src/datetime/np_datetime.h": + ctypedef struct pandas_timedeltastruct: + int64_t days + int32_t hrs, min, sec, ms, us, ns, seconds, microseconds, nanoseconds + void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, - pandas_datetimestruct *result) nogil + npy_datetimestruct *result) nogil cdef int reverse_ops[6] cdef bint cmp_scalar(int64_t lhs, int64_t rhs, int op) except -1 -cdef check_dts_bounds(pandas_datetimestruct *dts) +cdef check_dts_bounds(npy_datetimestruct *dts) -cdef int64_t dtstruct_to_dt64(pandas_datetimestruct* dts) nogil -cdef void dt64_to_dtstruct(int64_t dt64, pandas_datetimestruct* out) nogil +cdef int64_t dtstruct_to_dt64(npy_datetimestruct* dts) nogil +cdef void dt64_to_dtstruct(int64_t dt64, npy_datetimestruct* out) nogil cdef void td64_to_tdstruct(int64_t td64, pandas_timedeltastruct* out) nogil -cdef int64_t pydatetime_to_dt64(datetime val, pandas_datetimestruct *dts) -cdef int64_t pydate_to_dt64(date val, pandas_datetimestruct *dts) +cdef int64_t pydatetime_to_dt64(datetime val, npy_datetimestruct *dts) +cdef int64_t pydate_to_dt64(date val, npy_datetimestruct *dts) cdef npy_datetime get_datetime64_value(object obj) nogil cdef npy_timedelta get_timedelta64_value(object obj) nogil cdef NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil -cdef int _string_to_dts(object val, pandas_datetimestruct* dts, +cdef int _string_to_dts(object val, npy_datetimestruct* dts, int* out_local, int* out_tzoffset) except? -1 diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index e58ec0702adcc..3c0fe98ee7b7d 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -16,27 +16,26 @@ PyDateTime_IMPORT from numpy cimport int64_t cdef extern from "../src/datetime/np_datetime.h": - int cmp_pandas_datetimestruct(pandas_datetimestruct *a, - pandas_datetimestruct *b) + int cmp_npy_datetimestruct(npy_datetimestruct *a, + npy_datetimestruct *b) - npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, - pandas_datetimestruct *d - ) nogil + npy_datetime npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, + npy_datetimestruct *d) nogil void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, - pandas_datetimestruct *result) nogil + npy_datetimestruct *result) nogil void pandas_timedelta_to_timedeltastruct(npy_timedelta val, NPY_DATETIMEUNIT fr, pandas_timedeltastruct *result ) nogil - pandas_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS + npy_datetimestruct _NS_MIN_DTS, _NS_MAX_DTS cdef extern from "../src/datetime/np_datetime_strings.h": int parse_iso_8601_datetime(char *str, int len, - pandas_datetimestruct *out, + npy_datetimestruct *out, int *out_local, int *out_tzoffset) # ---------------------------------------------------------------------- @@ -101,17 +100,17 @@ class OutOfBoundsDatetime(ValueError): pass -cdef inline check_dts_bounds(pandas_datetimestruct *dts): +cdef inline check_dts_bounds(npy_datetimestruct *dts): """Raises OutOfBoundsDatetime if the given date is outside the range that can be represented by nanosecond-resolution 64-bit integers.""" cdef: bint error = False if (dts.year <= 1677 and - cmp_pandas_datetimestruct(dts, &_NS_MIN_DTS) == -1): + cmp_npy_datetimestruct(dts, &_NS_MIN_DTS) == -1): error = True elif (dts.year >= 2262 and - cmp_pandas_datetimestruct(dts, &_NS_MAX_DTS) == 1): + cmp_npy_datetimestruct(dts, &_NS_MAX_DTS) == 1): error = True if error: @@ -125,14 +124,14 @@ cdef inline check_dts_bounds(pandas_datetimestruct *dts): # ---------------------------------------------------------------------- # Conversion -cdef inline int64_t dtstruct_to_dt64(pandas_datetimestruct* dts) nogil: - """Convenience function to call pandas_datetimestruct_to_datetime +cdef inline int64_t dtstruct_to_dt64(npy_datetimestruct* dts) nogil: + """Convenience function to call npy_datetimestruct_to_datetime with the by-far-most-common frequency NPY_FR_ns""" - return pandas_datetimestruct_to_datetime(NPY_FR_ns, dts) + return npy_datetimestruct_to_datetime(NPY_FR_ns, dts) cdef inline void dt64_to_dtstruct(int64_t dt64, - pandas_datetimestruct* out) nogil: + npy_datetimestruct* out) nogil: """Convenience function to call pandas_datetime_to_datetimestruct with the by-far-most-common frequency NPY_FR_ns""" pandas_datetime_to_datetimestruct(dt64, NPY_FR_ns, out) @@ -147,7 +146,7 @@ cdef inline void td64_to_tdstruct(int64_t td64, cdef inline int64_t pydatetime_to_dt64(datetime val, - pandas_datetimestruct *dts): + npy_datetimestruct *dts): dts.year = PyDateTime_GET_YEAR(val) dts.month = PyDateTime_GET_MONTH(val) dts.day = PyDateTime_GET_DAY(val) @@ -160,7 +159,7 @@ cdef inline int64_t pydatetime_to_dt64(datetime val, cdef inline int64_t pydate_to_dt64(date val, - pandas_datetimestruct *dts): + npy_datetimestruct *dts): dts.year = PyDateTime_GET_YEAR(val) dts.month = PyDateTime_GET_MONTH(val) dts.day = PyDateTime_GET_DAY(val) @@ -169,7 +168,7 @@ cdef inline int64_t pydate_to_dt64(date val, return dtstruct_to_dt64(dts) -cdef inline int _string_to_dts(object val, pandas_datetimestruct* dts, +cdef inline int _string_to_dts(object val, npy_datetimestruct* dts, int* out_local, int* out_tzoffset) except? -1: cdef: int result @@ -187,7 +186,7 @@ cdef inline int _string_to_dts(object val, pandas_datetimestruct* dts, cdef inline int _cstring_to_dts(char *val, int length, - pandas_datetimestruct* dts, + npy_datetimestruct* dts, int* out_local, int* out_tzoffset) except? -1: # Note: without this "extra layer" between _string_to_dts # and parse_iso_8601_datetime, calling _string_to_dts raises diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 27b7f03358a3a..1efcfaa5b9741 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -24,7 +24,7 @@ from ccalendar import MONTHS, DAYS from ccalendar cimport get_days_in_month, dayofweek from conversion cimport tz_convert_single, pydt_to_i8, localize_pydatetime from nattype cimport NPY_NAT -from np_datetime cimport (pandas_datetimestruct, +from np_datetime cimport (npy_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct) # --------------------------------------------------------------------- @@ -548,14 +548,14 @@ cpdef datetime shift_day(datetime other, int days): return localize_pydatetime(shifted, tz) -cdef inline int year_add_months(pandas_datetimestruct dts, int months) nogil: - """new year number after shifting pandas_datetimestruct number of months""" +cdef inline int year_add_months(npy_datetimestruct dts, int months) nogil: + """new year number after shifting npy_datetimestruct number of months""" return dts.year + (dts.month + months - 1) / 12 -cdef inline int month_add_months(pandas_datetimestruct dts, int months) nogil: +cdef inline int month_add_months(npy_datetimestruct dts, int months) nogil: """ - New month number after shifting pandas_datetimestruct + New month number after shifting npy_datetimestruct number of months. """ cdef int new_month = (dts.month + months) % 12 @@ -584,7 +584,7 @@ def shift_quarters(int64_t[:] dtindex, int quarters, """ cdef: Py_ssize_t i - pandas_datetimestruct dts + npy_datetimestruct dts int count = len(dtindex) int months_to_roll, months_since, n, compare_day bint roll_check @@ -726,7 +726,7 @@ def shift_months(int64_t[:] dtindex, int months, object day=None): """ cdef: Py_ssize_t i - pandas_datetimestruct dts + npy_datetimestruct dts int count = len(dtindex) int months_to_roll bint roll_check diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 0ec5d25beeeb9..3d38320e31533 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -22,15 +22,14 @@ from cpython.datetime cimport (PyDateTime_Check, PyDelta_Check, # import datetime C API PyDateTime_IMPORT -from np_datetime cimport (pandas_datetimestruct, dtstruct_to_dt64, +from np_datetime cimport (npy_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct, pandas_datetime_to_datetimestruct, NPY_DATETIMEUNIT, NPY_FR_D) cdef extern from "../src/datetime/np_datetime.h": - int64_t pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, - pandas_datetimestruct *d - ) nogil + int64_t npy_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, + npy_datetimestruct *d) nogil cimport util from util cimport is_period_object, is_string_object, INT32_MIN @@ -87,14 +86,14 @@ cdef extern from "period_helper.h": @cython.cdivision -cdef char* c_strftime(pandas_datetimestruct *dts, char *fmt): +cdef char* c_strftime(npy_datetimestruct *dts, char *fmt): """ Generate a nice string representation of the period object, originally from DateObject_strftime Parameters ---------- - dts : pandas_datetimestruct* + dts : npy_datetimestruct* fmt : char* Returns @@ -124,7 +123,7 @@ cdef char* c_strftime(pandas_datetimestruct *dts, char *fmt): # ---------------------------------------------------------------------- -# Conversion between date_info and pandas_datetimestruct +# Conversion between date_info and npy_datetimestruct cdef inline int get_freq_group(int freq) nogil: return (freq // 1000) * 1000 @@ -137,13 +136,13 @@ cdef inline int get_freq_group_index(int freq) nogil: # specifically _dont_ use cdvision or else ordinals near -1 are assigned to # incorrect dates GH#19643 @cython.cdivision(False) -cdef int64_t get_period_ordinal(pandas_datetimestruct *dts, int freq) nogil: +cdef int64_t get_period_ordinal(npy_datetimestruct *dts, int freq) nogil: """ Generate an ordinal in period space Parameters ---------- - dts: pandas_datetimestruct* + dts: npy_datetimestruct* freq : int Returns @@ -187,7 +186,7 @@ cdef int64_t get_period_ordinal(pandas_datetimestruct *dts, int freq) nogil: elif freq == FR_MTH: return (dts.year - 1970) * 12 + dts.month - 1 - unix_date = pandas_datetimestruct_to_datetime(NPY_FR_D, dts) + unix_date = npy_datetimestruct_to_datetime(NPY_FR_D, dts) if freq >= FR_SEC: seconds = unix_date * 86400 + dts.hour * 3600 + dts.min * 60 + dts.sec @@ -238,7 +237,7 @@ cdef int64_t get_period_ordinal(pandas_datetimestruct *dts, int freq) nogil: cdef void get_date_info(int64_t ordinal, int freq, - pandas_datetimestruct *dts) nogil: + npy_datetimestruct *dts) nogil: cdef: int64_t unix_date double abstime @@ -286,7 +285,7 @@ cdef int64_t get_unix_date(int64_t period_ordinal, int freq) nogil: @cython.cdivision -cdef void date_info_from_days_and_time(pandas_datetimestruct *dts, +cdef void date_info_from_days_and_time(npy_datetimestruct *dts, int64_t unix_date, double abstime) nogil: """ @@ -294,7 +293,7 @@ cdef void date_info_from_days_and_time(pandas_datetimestruct *dts, Parameters ---------- - dts : pandas_datetimestruct* + dts : npy_datetimestruct* unix_date : int64_t days elapsed since datetime(1970, 1, 1) abstime : double @@ -397,7 +396,7 @@ cdef int get_yq(int64_t ordinal, int freq, int *quarter, int *year): cdef int DtoQ_yq(int64_t unix_date, asfreq_info *af_info, int *year): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts int quarter date_info_from_days_and_time(&dts, unix_date, 0) @@ -432,7 +431,7 @@ def dt64arr_to_periodarr(ndarray[int64_t] dtarr, int freq, tz=None): cdef: ndarray[int64_t] out Py_ssize_t i, l - pandas_datetimestruct dts + npy_datetimestruct dts l = len(dtarr) @@ -610,7 +609,7 @@ cpdef int64_t period_ordinal(int y, int m, int d, int h, int min, ordinal : int64_t """ cdef: - pandas_datetimestruct dts + npy_datetimestruct dts dts.year = y dts.month = m dts.day = d @@ -624,7 +623,7 @@ cpdef int64_t period_ordinal(int y, int m, int d, int h, int min, cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) nogil: cdef: - pandas_datetimestruct dts + npy_datetimestruct dts if ordinal == NPY_NAT: return NPY_NAT @@ -687,7 +686,7 @@ cdef list str_extra_fmts = ["^`AB`^", "^`CD`^", "^`EF`^", cdef object _period_strftime(int64_t value, int freq, object fmt): cdef: Py_ssize_t i - pandas_datetimestruct dts + npy_datetimestruct dts char *formatted object pat, repl, result list found_pat = [False] * len(extra_fmts) @@ -743,7 +742,7 @@ ctypedef int (*accessor)(int64_t ordinal, int freq) except INT32_MIN cdef int pyear(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return dts.year @@ -765,63 +764,63 @@ cdef int pquarter(int64_t ordinal, int freq): cdef int pmonth(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return dts.month cdef int pday(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return dts.day cdef int pweekday(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return dayofweek(dts.year, dts.month, dts.day) cdef int pday_of_year(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return get_day_of_year(dts.year, dts.month, dts.day) cdef int pweek(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return ccalendar.get_week_of_year(dts.year, dts.month, dts.day) cdef int phour(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return dts.hour cdef int pminute(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return dts.min cdef int psecond(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return <int>dts.sec cdef int pdays_in_month(int64_t ordinal, int freq): cdef: - pandas_datetimestruct dts + npy_datetimestruct dts get_date_info(ordinal, freq, &dts) return ccalendar.get_days_in_month(dts.year, dts.month) @@ -936,7 +935,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, Py_ssize_t n = len(stamps) ndarray[int64_t] result = np.empty(n, dtype=np.int64) ndarray[int64_t] trans, deltas, pos - pandas_datetimestruct dts + npy_datetimestruct dts int64_t local_val if is_utc(tz): diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index a53d794b48cfa..10e730763175d 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -15,7 +15,7 @@ from pandas._libs.khash cimport (khiter_t, kh_init_int64, kh_int64_t, kh_resize_int64, kh_get_int64) -from np_datetime cimport pandas_datetimestruct, dt64_to_dtstruct +from np_datetime cimport npy_datetimestruct, dt64_to_dtstruct from frequencies cimport get_freq_code from timezones cimport (is_utc, is_tzlocal, maybe_get_tz, get_dst_info) @@ -53,7 +53,7 @@ _ONE_DAY = <int64_t>(24 * _ONE_HOUR) cpdef resolution(ndarray[int64_t] stamps, tz=None): cdef: Py_ssize_t i, n = len(stamps) - pandas_datetimestruct dts + npy_datetimestruct dts int reso = RESO_DAY, curr_reso if tz is not None: @@ -75,7 +75,7 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz): Py_ssize_t n = len(stamps) int reso = RESO_DAY, curr_reso ndarray[int64_t] trans, deltas, pos - pandas_datetimestruct dts + npy_datetimestruct dts int64_t local_val if is_utc(tz): @@ -122,7 +122,7 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz): return reso -cdef inline int _reso_stamp(pandas_datetimestruct *dts): +cdef inline int _reso_stamp(npy_datetimestruct *dts): if dts.us != 0: if dts.us % 1000 == 0: return RESO_MS diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 5b3d4399a6e10..a843a8e2b5612 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -33,7 +33,7 @@ from numpy cimport ndarray, int64_t from datetime import date as datetime_date from np_datetime cimport (check_dts_bounds, - dtstruct_to_dt64, pandas_datetimestruct) + dtstruct_to_dt64, npy_datetimestruct) from util cimport is_string_object @@ -77,7 +77,7 @@ def array_strptime(ndarray[object] values, object fmt, cdef: Py_ssize_t i, n = len(values) - pandas_datetimestruct dts + npy_datetimestruct dts ndarray[int64_t] iresult ndarray[object] result_timezone int year, month, day, minute, hour, second, weekday, julian diff --git a/pandas/_libs/tslibs/timestamps.pxd b/pandas/_libs/tslibs/timestamps.pxd index 8e7380b37209e..e9e484c715f9a 100644 --- a/pandas/_libs/tslibs/timestamps.pxd +++ b/pandas/_libs/tslibs/timestamps.pxd @@ -2,10 +2,10 @@ # cython: profile=False from numpy cimport int64_t -from np_datetime cimport pandas_datetimestruct +from np_datetime cimport npy_datetimestruct cdef object create_timestamp_from_ts(int64_t value, - pandas_datetimestruct dts, + npy_datetimestruct dts, object tz, object freq) cdef int64_t _NS_UPPER_BOUND, _NS_LOWER_BOUND diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 864950ff03eae..be988e7247e59 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -29,7 +29,7 @@ from nattype import NaT from nattype cimport NPY_NAT from np_datetime import OutOfBoundsDatetime from np_datetime cimport (reverse_ops, cmp_scalar, check_dts_bounds, - pandas_datetimestruct, dt64_to_dtstruct) + npy_datetimestruct, dt64_to_dtstruct) from offsets cimport to_offset from timedeltas import Timedelta from timedeltas cimport delta_to_nanoseconds @@ -45,7 +45,7 @@ _no_input = object() cdef inline object create_timestamp_from_ts(int64_t value, - pandas_datetimestruct dts, + npy_datetimestruct dts, object tz, object freq): """ convenience routine to construct a Timestamp from its parts """ cdef _Timestamp ts_base @@ -973,7 +973,7 @@ class Timestamp(_Timestamp): """ cdef: - pandas_datetimestruct dts + npy_datetimestruct dts int64_t value, value_tz, offset object _tzinfo, result, k, v datetime ts_input
Follow-on to #21863
https://api.github.com/repos/pandas-dev/pandas/pulls/21886
2018-07-13T02:51:08Z
2018-07-14T14:54:30Z
2018-07-14T14:54:30Z
2018-07-14T15:38:11Z
TST: Parametrize tests in tests/util/test_hashing.py
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py index 82b870c156cc8..0c14dcb49c56f 100644 --- a/pandas/tests/util/test_hashing.py +++ b/pandas/tests/util/test_hashing.py @@ -13,17 +13,17 @@ class TestHashing(object): - def setup_method(self, method): - self.df = DataFrame( - {'i32': np.array([1, 2, 3] * 3, dtype='int32'), - 'f32': np.array([None, 2.5, 3.5] * 3, dtype='float32'), - 'cat': Series(['a', 'b', 'c'] * 3).astype('category'), - 'obj': Series(['d', 'e', 'f'] * 3), - 'bool': np.array([True, False, True] * 3), - 'dt': Series(pd.date_range('20130101', periods=9)), - 'dt_tz': Series(pd.date_range('20130101', periods=9, - tz='US/Eastern')), - 'td': Series(pd.timedelta_range('2000', periods=9))}) + @pytest.fixture(params=[ + Series([1, 2, 3] * 3, dtype='int32'), + Series([None, 2.5, 3.5] * 3, dtype='float32'), + Series(['a', 'b', 'c'] * 3, dtype='category'), + Series(['d', 'e', 'f'] * 3), + Series([True, False, True] * 3), + Series(pd.date_range('20130101', periods=9)), + Series(pd.date_range('20130101', periods=9, tz='US/Eastern')), + Series(pd.timedelta_range('2000', periods=9))]) + def series(self, request): + return request.param def test_consistency(self): # check that our hash doesn't change because of a mistake @@ -34,10 +34,9 @@ def test_consistency(self): index=['foo', 'bar', 'baz']) tm.assert_series_equal(result, expected) - def test_hash_array(self): - for name, s in self.df.iteritems(): - a = s.values - tm.assert_numpy_array_equal(hash_array(a), hash_array(a)) + def test_hash_array(self, series): + a = series.values + tm.assert_numpy_array_equal(hash_array(a), hash_array(a)) def test_hash_array_mixed(self): result1 = hash_array(np.array([3, 4, 'All'])) @@ -46,10 +45,11 @@ def test_hash_array_mixed(self): tm.assert_numpy_array_equal(result1, result2) tm.assert_numpy_array_equal(result1, result3) - def test_hash_array_errors(self): - - for val in [5, 'foo', pd.Timestamp('20130101')]: - pytest.raises(TypeError, hash_array, val) + @pytest.mark.parametrize('val', [5, 'foo', pd.Timestamp('20130101')]) + def test_hash_array_errors(self, val): + msg = 'must pass a ndarray-like' + with tm.assert_raises_regex(TypeError, msg): + hash_array(val) def check_equal(self, obj, **kwargs): a = hash_pandas_object(obj, **kwargs) @@ -80,31 +80,33 @@ def test_hash_tuples(self): result = hash_tuples(tups[0]) assert result == expected[0] - def test_hash_tuple(self): + @pytest.mark.parametrize('tup', [ + (1, 'one'), (1, np.nan), (1.0, pd.NaT, 'A'), + ('A', pd.Timestamp("2012-01-01"))]) + def test_hash_tuple(self, tup): # test equivalence between hash_tuples and hash_tuple - for tup in [(1, 'one'), (1, np.nan), (1.0, pd.NaT, 'A'), - ('A', pd.Timestamp("2012-01-01"))]: - result = hash_tuple(tup) - expected = hash_tuples([tup])[0] - assert result == expected - - def test_hash_scalar(self): - for val in [1, 1.4, 'A', b'A', u'A', pd.Timestamp("2012-01-01"), - pd.Timestamp("2012-01-01", tz='Europe/Brussels'), - datetime.datetime(2012, 1, 1), - pd.Timestamp("2012-01-01", tz='EST').to_pydatetime(), - pd.Timedelta('1 days'), datetime.timedelta(1), - pd.Period('2012-01-01', freq='D'), pd.Interval(0, 1), - np.nan, pd.NaT, None]: - result = _hash_scalar(val) - expected = hash_array(np.array([val], dtype=object), - categorize=True) - assert result[0] == expected[0] - - def test_hash_tuples_err(self): - - for val in [5, 'foo', pd.Timestamp('20130101')]: - pytest.raises(TypeError, hash_tuples, val) + result = hash_tuple(tup) + expected = hash_tuples([tup])[0] + assert result == expected + + @pytest.mark.parametrize('val', [ + 1, 1.4, 'A', b'A', u'A', pd.Timestamp("2012-01-01"), + pd.Timestamp("2012-01-01", tz='Europe/Brussels'), + datetime.datetime(2012, 1, 1), + pd.Timestamp("2012-01-01", tz='EST').to_pydatetime(), + pd.Timedelta('1 days'), datetime.timedelta(1), + pd.Period('2012-01-01', freq='D'), pd.Interval(0, 1), + np.nan, pd.NaT, None]) + def test_hash_scalar(self, val): + result = _hash_scalar(val) + expected = hash_array(np.array([val], dtype=object), categorize=True) + assert result[0] == expected[0] + + @pytest.mark.parametrize('val', [5, 'foo', pd.Timestamp('20130101')]) + def test_hash_tuples_err(self, val): + msg = 'must be convertible to a list-of-tuples' + with tm.assert_raises_regex(TypeError, msg): + hash_tuples(val) def test_multiindex_unique(self): mi = MultiIndex.from_tuples([(118, 472), (236, 118), @@ -172,36 +174,35 @@ def test_hash_pandas_object(self, obj): self.check_equal(obj) self.check_not_equal_with_index(obj) - def test_hash_pandas_object2(self): - for name, s in self.df.iteritems(): - self.check_equal(s) - self.check_not_equal_with_index(s) - - def test_hash_pandas_empty_object(self): - for obj in [Series([], dtype='float64'), - Series([], dtype='object'), - Index([])]: - self.check_equal(obj) + def test_hash_pandas_object2(self, series): + self.check_equal(series) + self.check_not_equal_with_index(series) - # these are by-definition the same with - # or w/o the index as the data is empty + @pytest.mark.parametrize('obj', [ + Series([], dtype='float64'), Series([], dtype='object'), Index([])]) + def test_hash_pandas_empty_object(self, obj): + # these are by-definition the same with + # or w/o the index as the data is empty + self.check_equal(obj) - def test_categorical_consistency(self): + @pytest.mark.parametrize('s1', [ + Series(['a', 'b', 'c', 'd']), + Series([1000, 2000, 3000, 4000]), + Series(pd.date_range(0, periods=4))]) + @pytest.mark.parametrize('categorize', [True, False]) + def test_categorical_consistency(self, s1, categorize): # GH15143 # Check that categoricals hash consistent with their values, not codes # This should work for categoricals of any dtype - for s1 in [Series(['a', 'b', 'c', 'd']), - Series([1000, 2000, 3000, 4000]), - Series(pd.date_range(0, periods=4))]: - s2 = s1.astype('category').cat.set_categories(s1) - s3 = s2.cat.set_categories(list(reversed(s1))) - for categorize in [True, False]: - # These should all hash identically - h1 = hash_pandas_object(s1, categorize=categorize) - h2 = hash_pandas_object(s2, categorize=categorize) - h3 = hash_pandas_object(s3, categorize=categorize) - tm.assert_series_equal(h1, h2) - tm.assert_series_equal(h1, h3) + s2 = s1.astype('category').cat.set_categories(s1) + s3 = s2.cat.set_categories(list(reversed(s1))) + + # These should all hash identically + h1 = hash_pandas_object(s1, categorize=categorize) + h2 = hash_pandas_object(s2, categorize=categorize) + h3 = hash_pandas_object(s3, categorize=categorize) + tm.assert_series_equal(h1, h2) + tm.assert_series_equal(h1, h3) def test_categorical_with_nan_consistency(self): c = pd.Categorical.from_codes( @@ -216,13 +217,12 @@ def test_categorical_with_nan_consistency(self): assert result[1] in expected def test_pandas_errors(self): - - for obj in [pd.Timestamp('20130101')]: - with pytest.raises(TypeError): - hash_pandas_object(obj) + with pytest.raises(TypeError): + hash_pandas_object(pd.Timestamp('20130101')) with catch_warnings(record=True): obj = tm.makePanel() + with pytest.raises(TypeError): hash_pandas_object(obj) @@ -238,9 +238,9 @@ def test_hash_keys(self): def test_invalid_key(self): # this only matters for object dtypes - def f(): + msg = 'key should be a 16-byte string encoded' + with tm.assert_raises_regex(ValueError, msg): hash_pandas_object(Series(list('abc')), hash_key='foo') - pytest.raises(ValueError, f) def test_alread_encoded(self): # if already encoded then ok @@ -253,19 +253,13 @@ def test_alternate_encoding(self): obj = Series(list('abc')) self.check_equal(obj, encoding='ascii') - def test_same_len_hash_collisions(self): - - for l in range(8): - length = 2**(l + 8) + 1 - s = tm.rands_array(length, 2) - result = hash_array(s, 'utf8') - assert not result[0] == result[1] - - for l in range(8): - length = 2**(l + 8) - s = tm.rands_array(length, 2) - result = hash_array(s, 'utf8') - assert not result[0] == result[1] + @pytest.mark.parametrize('l_exp', range(8)) + @pytest.mark.parametrize('l_add', [0, 1]) + def test_same_len_hash_collisions(self, l_exp, l_add): + length = 2**(l_exp + 8) + l_add + s = tm.rands_array(length, 2) + result = hash_array(s, 'utf8') + assert not result[0] == result[1] def test_hash_collisions(self):
Noticed that these tests could be parametrized when investigating a comment in the `IntervalArray` PR, and originally started doing to work there. The `IntervalArray` PR is big enough already, and this is unrelated, so creating a separate PR.
https://api.github.com/repos/pandas-dev/pandas/pulls/21883
2018-07-13T00:58:55Z
2018-07-14T14:32:45Z
2018-07-14T14:32:45Z
2018-09-24T17:22:31Z
[BLD] enable cython coverage, use cythonize
diff --git a/.coveragerc b/.coveragerc index 3f630aa6cf8f5..f5c8b701a79a8 100644 --- a/.coveragerc +++ b/.coveragerc @@ -2,6 +2,7 @@ [run] branch = False omit = */tests/* +plugins = Cython.Coverage [report] # Regexes for lines to exclude from consideration @@ -22,6 +23,7 @@ exclude_lines = if __name__ == .__main__.: ignore_errors = False +show_missing = True [html] directory = coverage_html_report diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 76dadb4ec3e23..cd3ce5c1a8f09 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1655,8 +1655,8 @@ cdef class _Period(object): return value def __setstate__(self, state): - self.freq=state[1] - self.ordinal=state[2] + self.freq = state[1] + self.ordinal = state[2] def __reduce__(self): object_state = None, self.freq, self.ordinal diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/tslibs/util.pxd similarity index 97% rename from pandas/_libs/src/util.pxd rename to pandas/_libs/tslibs/util.pxd index 7ce2181f32553..305c4f8f908e0 100644 --- a/pandas/_libs/src/util.pxd +++ b/pandas/_libs/tslibs/util.pxd @@ -1,4 +1,4 @@ -from numpy cimport ndarray, NPY_C_CONTIGUOUS, NPY_F_CONTIGUOUS +from numpy cimport ndarray cimport numpy as cnp cnp.import_array() @@ -64,7 +64,7 @@ cdef inline bint is_datetime64_object(object obj) nogil: # -------------------------------------------------------------------- -cdef extern from "numpy_helper.h": +cdef extern from "../src/numpy_helper.h": void set_array_not_contiguous(ndarray ao) int assign_value_1d(ndarray, Py_ssize_t, object) except -1 @@ -87,7 +87,7 @@ ctypedef fused numeric: cnp.float32_t cnp.float64_t -cdef extern from "headers/stdint.h": +cdef extern from "../src/headers/stdint.h": enum: UINT8_MAX enum: UINT16_MAX enum: UINT32_MAX diff --git a/pandas/_libs/util.pxd b/pandas/_libs/util.pxd new file mode 100644 index 0000000000000..0b7e66902cbb1 --- /dev/null +++ b/pandas/_libs/util.pxd @@ -0,0 +1 @@ +from tslibs.util cimport * diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index cea77e2c88b1b..b9dd46a10dfda 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -13,7 +13,7 @@ from numpy cimport ndarray, double_t, int64_t, float64_t cnp.import_array() -cdef extern from "../src/headers/cmath" namespace "std": +cdef extern from "src/headers/cmath" namespace "std": int signbit(double) nogil double sqrt(double x) nogil diff --git a/setup.py b/setup.py index 4910fcf292ca6..85c5970af018f 100755 --- a/setup.py +++ b/setup.py @@ -40,9 +40,11 @@ def is_platform_windows(): try: import Cython ver = Cython.__version__ + from Cython.Build import cythonize _CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver) except ImportError: _CYTHON_INSTALLED = False + cythonize = lambda x, *args, **kwargs: x # dummy func # The import of Extension must be after the import of Cython, otherwise # we do not get the appropriately patched class. @@ -419,11 +421,66 @@ def get_tag(self): cmdclass['build_src'] = DummyBuildSrc cmdclass['build_ext'] = CheckingBuildExt +# ---------------------------------------------------------------------- +# Preparation of compiler arguments + if sys.byteorder == 'big': endian_macro = [('__BIG_ENDIAN__', '1')] else: endian_macro = [('__LITTLE_ENDIAN__', '1')] + +if is_platform_windows(): + extra_compile_args = [] +else: + # args to ignore warnings + extra_compile_args = ['-Wno-unused-function'] + + +# enable coverage by building cython files by setting the environment variable +# "PANDAS_CYTHON_COVERAGE" (with a Truthy value) +linetrace = os.environ.get('PANDAS_CYTHON_COVERAGE', False) +CYTHON_TRACE = str(int(bool(linetrace))) + +# Note: if not using `cythonize`, coverage can be enabled by +# pinning `ext.cython_directives = directives` to each ext in extensions. +# github.com/cython/cython/wiki/enhancements-compilerdirectives#in-setuppy +directives = {'linetrace': False} +macros = [] +if linetrace: + # https://pypkg.com/pypi/pytest-cython/f/tests/example-project/setup.py + directives['linetrace'] = True + macros = [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')] + + +# ---------------------------------------------------------------------- +# Specification of Dependencies + +# TODO: Need to check to see if e.g. `linetrace` has changed and possibly +# re-compile. +def maybe_cythonize(extensions, *args, **kwargs): + """ + Render tempita templates before calling cythonize + """ + if len(sys.argv) > 1 and 'clean' in sys.argv: + # Avoid running cythonize on `python setup.py clean` + # See https://github.com/cython/cython/issues/1495 + return extensions + + numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') + # TODO: Is this really necessary here? + for ext in extensions: + if (hasattr(ext, 'include_dirs') and + numpy_incl not in ext.include_dirs): + ext.include_dirs.append(numpy_incl) + + if cython: + build_ext.render_templates(_pxifiles) + return cythonize(extensions, *args, **kwargs) + else: + return extensions + + lib_depends = ['inference'] @@ -434,23 +491,13 @@ def srcpath(name=None, suffix='.pyx', subdir='src'): if suffix == '.pyx': lib_depends = [srcpath(f, suffix='.pyx', subdir='_libs/src') for f in lib_depends] - lib_depends.append('pandas/_libs/src/util.pxd') + lib_depends.append('pandas/_libs/util.pxd') else: lib_depends = [] common_include = ['pandas/_libs/src/klib', 'pandas/_libs/src'] -def pxd(name): - return pjoin('pandas', name + '.pxd') - - -if is_platform_windows(): - extra_compile_args = [] -else: - # args to ignore warnings - extra_compile_args = ['-Wno-unused-function'] - lib_depends = lib_depends + ['pandas/_libs/src/numpy_helper.h', 'pandas/_libs/src/parse_helper.h', 'pandas/_libs/src/compat_helper.h'] @@ -466,22 +513,18 @@ def pxd(name): ext_data = { '_libs.algos': { 'pyxfile': '_libs/algos', - 'pxdfiles': ['_libs/src/util', '_libs/algos', '_libs/hashtable'], 'depends': _pxi_dep['algos']}, '_libs.groupby': { 'pyxfile': '_libs/groupby', - 'pxdfiles': ['_libs/src/util', '_libs/algos'], 'depends': _pxi_dep['groupby']}, '_libs.hashing': { 'pyxfile': '_libs/hashing'}, '_libs.hashtable': { 'pyxfile': '_libs/hashtable', - 'pxdfiles': ['_libs/hashtable', '_libs/missing', '_libs/khash'], 'depends': (['pandas/_libs/src/klib/khash_python.h'] + _pxi_dep['hashtable'])}, '_libs.index': { 'pyxfile': '_libs/index', - 'pxdfiles': ['_libs/src/util', '_libs/hashtable'], 'depends': _pxi_dep['index'], 'sources': np_datetime_sources}, '_libs.indexing': { @@ -490,21 +533,15 @@ def pxd(name): 'pyxfile': '_libs/internals'}, '_libs.interval': { 'pyxfile': '_libs/interval', - 'pxdfiles': ['_libs/hashtable'], 'depends': _pxi_dep['interval']}, '_libs.join': { 'pyxfile': '_libs/join', - 'pxdfiles': ['_libs/src/util', '_libs/hashtable'], 'depends': _pxi_dep['join']}, '_libs.lib': { 'pyxfile': '_libs/lib', - 'pxdfiles': ['_libs/src/util', - '_libs/missing', - '_libs/tslibs/conversion'], 'depends': lib_depends + tseries_depends}, '_libs.missing': { 'pyxfile': '_libs/missing', - 'pxdfiles': ['_libs/src/util'], 'depends': tseries_depends}, '_libs.parsers': { 'pyxfile': '_libs/parsers', @@ -514,12 +551,9 @@ def pxd(name): 'sources': ['pandas/_libs/src/parser/tokenizer.c', 'pandas/_libs/src/parser/io.c']}, '_libs.reduction': { - 'pyxfile': '_libs/reduction', - 'pxdfiles': ['_libs/src/util']}, + 'pyxfile': '_libs/reduction'}, '_libs.ops': { - 'pyxfile': '_libs/ops', - 'pxdfiles': ['_libs/src/util', - '_libs/missing']}, + 'pyxfile': '_libs/ops'}, '_libs.properties': { 'pyxfile': '_libs/properties', 'include': []}, @@ -534,113 +568,66 @@ def pxd(name): 'depends': _pxi_dep['sparse']}, '_libs.tslib': { 'pyxfile': '_libs/tslib', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/conversion', - '_libs/tslibs/timedeltas', - '_libs/tslibs/timestamps', - '_libs/tslibs/timezones', - '_libs/tslibs/nattype', - '_libs/tslibs/offsets'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.ccalendar': { 'pyxfile': '_libs/tslibs/ccalendar'}, '_libs.tslibs.conversion': { 'pyxfile': '_libs/tslibs/conversion', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/nattype', - '_libs/tslibs/timezones', - '_libs/tslibs/timedeltas'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.fields': { 'pyxfile': '_libs/tslibs/fields', - 'pxdfiles': ['_libs/tslibs/ccalendar', - '_libs/tslibs/nattype'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.frequencies': { - 'pyxfile': '_libs/tslibs/frequencies', - 'pxdfiles': ['_libs/src/util']}, + 'pyxfile': '_libs/tslibs/frequencies'}, '_libs.tslibs.nattype': { - 'pyxfile': '_libs/tslibs/nattype', - 'pxdfiles': ['_libs/src/util']}, + 'pyxfile': '_libs/tslibs/nattype'}, '_libs.tslibs.np_datetime': { 'pyxfile': '_libs/tslibs/np_datetime', 'depends': np_datetime_headers, 'sources': np_datetime_sources}, '_libs.tslibs.offsets': { 'pyxfile': '_libs/tslibs/offsets', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/ccalendar', - '_libs/tslibs/conversion', - '_libs/tslibs/frequencies', - '_libs/tslibs/nattype'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.parsing': { - 'pyxfile': '_libs/tslibs/parsing', - 'pxdfiles': ['_libs/src/util']}, + 'pyxfile': '_libs/tslibs/parsing'}, '_libs.tslibs.period': { 'pyxfile': '_libs/tslibs/period', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/ccalendar', - '_libs/tslibs/timedeltas', - '_libs/tslibs/timezones', - '_libs/tslibs/nattype', - '_libs/tslibs/offsets'], 'depends': tseries_depends + ['pandas/_libs/src/period_helper.h'], 'sources': np_datetime_sources + ['pandas/_libs/src/period_helper.c']}, '_libs.tslibs.resolution': { 'pyxfile': '_libs/tslibs/resolution', - 'pxdfiles': ['_libs/src/util', - '_libs/khash', - '_libs/tslibs/ccalendar', - '_libs/tslibs/frequencies', - '_libs/tslibs/timezones'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.strptime': { 'pyxfile': '_libs/tslibs/strptime', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/nattype'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.timedeltas': { 'pyxfile': '_libs/tslibs/timedeltas', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/nattype', - '_libs/tslibs/offsets'], 'depends': np_datetime_headers, 'sources': np_datetime_sources}, '_libs.tslibs.timestamps': { 'pyxfile': '_libs/tslibs/timestamps', - 'pxdfiles': ['_libs/src/util', - '_libs/tslibs/ccalendar', - '_libs/tslibs/conversion', - '_libs/tslibs/nattype', - '_libs/tslibs/offsets', - '_libs/tslibs/timedeltas', - '_libs/tslibs/timezones'], 'depends': tseries_depends, 'sources': np_datetime_sources}, '_libs.tslibs.timezones': { - 'pyxfile': '_libs/tslibs/timezones', - 'pxdfiles': ['_libs/src/util']}, + 'pyxfile': '_libs/tslibs/timezones'}, '_libs.testing': { 'pyxfile': '_libs/testing'}, '_libs.window': { 'pyxfile': '_libs/window', - 'pxdfiles': ['_libs/skiplist', '_libs/src/util'], 'language': 'c++', 'suffix': '.cpp'}, '_libs.writers': { - 'pyxfile': '_libs/writers', - 'pxdfiles': ['_libs/src/util']}, + 'pyxfile': '_libs/writers'}, 'io.sas._sas': { 'pyxfile': 'io/sas/sas'}, 'io.msgpack._packer': { - 'macros': endian_macro, + 'macros': endian_macro + macros, 'depends': ['pandas/_libs/src/msgpack/pack.h', 'pandas/_libs/src/msgpack/pack_template.h'], 'include': ['pandas/_libs/src/msgpack'] + common_include, @@ -652,7 +639,7 @@ def pxd(name): 'depends': ['pandas/_libs/src/msgpack/unpack.h', 'pandas/_libs/src/msgpack/unpack_define.h', 'pandas/_libs/src/msgpack/unpack_template.h'], - 'macros': endian_macro, + 'macros': endian_macro + macros, 'include': ['pandas/_libs/src/msgpack'] + common_include, 'language': 'c++', 'suffix': '.cpp', @@ -668,10 +655,6 @@ def pxd(name): sources = [srcpath(data['pyxfile'], suffix=source_suffix, subdir='')] - pxds = [pxd(x) for x in data.get('pxdfiles', [])] - if suffix == '.pyx' and pxds: - sources.extend(pxds) - sources.extend(data.get('sources', [])) include = data.get('include', common_include) @@ -681,7 +664,7 @@ def pxd(name): depends=data.get('depends', []), include_dirs=include, language=data.get('language', 'c'), - define_macros=data.get('macros', []), + define_macros=data.get('macros', macros), extra_compile_args=extra_compile_args) extensions.append(obj) @@ -708,7 +691,8 @@ def pxd(name): 'pandas/_libs/src/ujson/lib', 'pandas/_libs/src/datetime'], extra_compile_args=(['-D_GNU_SOURCE'] + - extra_compile_args)) + extra_compile_args), + define_macros=macros) extensions.append(ujson_ext) @@ -718,7 +702,8 @@ def pxd(name): # extension for pseudo-safely moving bytes into mutable buffers _move_ext = Extension('pandas.util._move', depends=[], - sources=['pandas/util/move.c']) + sources=['pandas/util/move.c'], + define_macros=macros) extensions.append(_move_ext) # The build cache system does string matching below this point. @@ -729,7 +714,7 @@ def pxd(name): version=versioneer.get_version(), packages=find_packages(include=['pandas', 'pandas.*']), package_data={'': ['templates/*', '_libs/*.dll']}, - ext_modules=extensions, + ext_modules=maybe_cythonize(extensions, compiler_directives=directives), maintainer_email=EMAIL, description=DESCRIPTION, license=LICENSE,
- [x] closes #18089 Doesn't close #12624, but merits a mention. Usage: ``` $ export PANDAS_CYTHON_COVERAGE=TRUE $ python setup.py build_ext --inplace $ coverage erase $ pytest pandas/tests/scalar --cov=pandas/_libs [...] Name Stmts Miss Cover Missing ----------------------------------------------------------------------- pandas/_libs/__init__.py 1 0 100% pandas/_libs/algos.pyx 162 116 28% 43, 45-47, 58, 66, 82-107, 132-145, 149-153, 160-181, 183, 198-206, 209, 212-244, 263-275, 278-284, 288-289, 292-299, 302-310 pandas/_libs/algos_common_helper.pxi 1884 1851 2% 25-1203, 1214-1942, 1958-1962, 1964, 1967, 1974-1976, 1981-1987, 1989-1991, 1999-3171, 3177-3227, 3232-3243 pandas/_libs/groupby.pyx 168 148 12% 29-94, 113-134, 143-163, 165-171, 190-207, 210-213, 224-257, 260-265, 293-303, 306-314, 317-323, 354-380 pandas/_libs/hashing.pyx 97 94 3% 48-190 pandas/_libs/hashtable.pyx 83 75 10% 59-142, 154-177 pandas/_libs/hashtable_class_helper.pxi 846 828 2% 19-805, 810, 813, 818-829, 837-864, 878-1423 pandas/_libs/index.pyx 384 300 22% 34, 36-39, 46, 49-71, 81-89, 95-100, 110, 114, 126, 130, 135-143, 147, 153-202, 204, 211-214, 221, 223, 227, 233-236, 245-258, 261, 277, 280, 289, 293-314, 318-403, 406, 409, 411, 415-429, 433-434, 440-525, 536-542, 546, 549-694 pandas/_libs/index_class_helper.pxi 95 90 5% 14-115, 117, 120, 122-123, 126-165 [...] ``` (I'll post results from a full test run in a bit) Apparently there is an issue with the Cython.Coverage plugin that causes cdef function/class definition lines to not get covered. Not sure if that's going to be fixed or if we need to find a workaround. To make `cythonize` work I had to move `util.pxd` to `tslibs`, then cimport everything into a _libs.util pxd namespace. There may be a way around this that I haven't found. If not, there are parts of `tslibs.util` that are not used in `tslibs`, can be moved to `_libs.util`. We _could_ remove a whole bunch more from the `ext_data` dictionary, but I'm saving that until after the first pass of the review process. (I think doing so will lighten the build, not sure) This will have a merge conflict with #21878, but it'll be easy to resolve when the time comes.
https://api.github.com/repos/pandas-dev/pandas/pulls/21879
2018-07-12T18:11:58Z
2018-07-20T20:28:43Z
2018-07-20T20:28:43Z
2018-07-22T14:41:16Z
Cleanup cimports, implement bits of numpy_helper in util.pxd
diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index f5f9c06a7e4c2..5e4a431caca00 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -1,21 +1,20 @@ # -*- coding: utf-8 -*- # cython: profile=False -cimport numpy as cnp -import numpy as np - cimport cython +from cython cimport Py_ssize_t -cnp.import_array() +from libc.stdlib cimport malloc, free +import numpy as np from numpy cimport (ndarray, double_t, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, float32_t, float64_t) -from libc.stdlib cimport malloc, free from util cimport numeric, get_nat + from algos cimport (swap, TiebreakEnumType, TIEBREAK_AVERAGE, TIEBREAK_MIN, TIEBREAK_MAX, TIEBREAK_FIRST, TIEBREAK_DENSE) from algos import take_2d_axis1_float64_float64, groupsort_indexer, tiebreakers @@ -74,8 +73,8 @@ cdef inline float64_t kth_smallest_c(float64_t* a, double_t x, t l = 0 - m = n -1 - while (l<m): + m = n - 1 + while l < m: x = a[k] i = l j = m diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index e408e02b9d5a1..31ef4b7a3e807 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -9,14 +9,12 @@ from cpython.slice cimport PySlice_Check import numpy as np cimport numpy as cnp from numpy cimport (ndarray, float64_t, int32_t, - int64_t, uint8_t, uint64_t, intp_t) + int64_t, uint8_t, uint64_t, intp_t, + # Note: NPY_DATETIME, NPY_TIMEDELTA are only available + # for cimport in cython>=0.27.3 + NPY_DATETIME, NPY_TIMEDELTA) cnp.import_array() -cdef extern from "numpy/arrayobject.h": - # These can be cimported directly from numpy in cython>=0.27.3 - cdef enum NPY_TYPES: - NPY_DATETIME - NPY_TIMEDELTA cimport util diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 4129132251682..b0d8ce9e4b237 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -1,17 +1,22 @@ -cimport numpy as cnp -import numpy as np +# -*- coding: utf-8 -*- +import numbers + +from cpython.object cimport (Py_EQ, Py_NE, Py_GT, Py_LT, Py_GE, Py_LE, + PyObject_RichCompare) -cimport util cimport cython -import cython +from cython cimport Py_ssize_t + +import numpy as np from numpy cimport ndarray + + +cimport util + from tslibs import Timestamp from tslibs.timezones cimport tz_compare -from cpython.object cimport (Py_EQ, Py_NE, Py_GT, Py_LT, Py_GE, Py_LE, - PyObject_RichCompare) -import numbers _VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither']) diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 23aebc85e6300..172117f7d8059 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -753,4 +753,4 @@ def indices_fast(object index, ndarray[int64_t] labels, list keys, return result -include "inference.pyx" +include "src/inference.pyx" diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h index 5cfa51dc8a0be..f409fec44890d 100644 --- a/pandas/_libs/src/numpy_helper.h +++ b/pandas/_libs/src/numpy_helper.h @@ -30,24 +30,6 @@ PANDAS_INLINE PyObject* get_value_1d(PyArrayObject* ap, Py_ssize_t i) { return PyArray_Scalar(item, PyArray_DESCR(ap), (PyObject*)ap); } -// returns ASCII or UTF8 (py3) view on python str -// python object owns memory, should not be freed -PANDAS_INLINE const char* get_c_string(PyObject* obj) { -#if PY_VERSION_HEX >= 0x03000000 - return PyUnicode_AsUTF8(obj); -#else - return PyString_AsString(obj); -#endif -} - -PANDAS_INLINE PyObject* char_to_string(const char* data) { -#if PY_VERSION_HEX >= 0x03000000 - return PyUnicode_FromString(data); -#else - return PyString_FromString(data); -#endif -} - void set_array_not_contiguous(PyArrayObject* ao) { ao->flags &= ~(NPY_C_CONTIGUOUS | NPY_F_CONTIGUOUS); diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/src/util.pxd index a8cd78016665f..728eb63dc836c 100644 --- a/pandas/_libs/src/util.pxd +++ b/pandas/_libs/src/util.pxd @@ -4,6 +4,9 @@ cnp.import_array() cimport cpython from cpython cimport PyTypeObject +from cpython.string cimport PyString_FromString, PyString_AsString + +DEF PY3 = bytes != str cdef extern from "Python.h": # Note: importing extern-style allows us to declare these as nogil @@ -14,6 +17,8 @@ cdef extern from "Python.h": bint PyFloat_Check(object obj) nogil bint PyComplex_Check(object obj) nogil bint PyObject_TypeCheck(object obj, PyTypeObject* type) nogil + char* PyUnicode_AsUTF8(object unicode) + object PyUnicode_FromString(const char* u) nogil cdef extern from "numpy/arrayobject.h": @@ -69,8 +74,6 @@ cdef extern from "numpy_helper.h": int assign_value_1d(ndarray, Py_ssize_t, object) except -1 cnp.int64_t get_nat() object get_value_1d(ndarray, Py_ssize_t) - char *get_c_string(object) except NULL - object char_to_string(char*) ctypedef fused numeric: cnp.int8_t @@ -101,6 +104,26 @@ cdef extern from "headers/stdint.h": enum: INT64_MIN +cdef inline const char* get_c_string(object obj) except NULL: + """ + returns ASCII or UTF8 (py3) view on python str + python object owns memory, should not be freed + """ + # TODO: this docstring is copied verbatim from version that was + # directly in numpy_helper.C; is it still accurate? + IF PY3: + return PyUnicode_AsUTF8(obj) + ELSE: + return PyString_AsString(obj) + + +cdef inline object char_to_string(const char* data): + IF PY3: + return PyUnicode_FromString(data) + ELSE: + return PyString_FromString(data) + + cdef inline object get_value_at(ndarray arr, object loc): cdef: Py_ssize_t i, sz diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 281e497945c5f..1d44af6b81992 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -1,24 +1,29 @@ # -*- coding: utf-8 -*- # cython: profile=False +cimport cython +from cython cimport Py_ssize_t + +from cpython cimport PyFloat_Check, PyUnicode_Check + +from cpython.datetime cimport (PyDateTime_Check, PyDate_Check, + PyDateTime_CheckExact, + PyDateTime_IMPORT, + timedelta, datetime, date, time) +# import datetime C API +PyDateTime_IMPORT + cimport numpy as cnp from numpy cimport int64_t, ndarray, float64_t import numpy as np cnp.import_array() +import pytz -from cpython cimport PyFloat_Check, PyUnicode_Check from util cimport (is_integer_object, is_float_object, is_string_object, is_datetime64_object) -from cpython.datetime cimport (PyDateTime_Check, PyDate_Check, - PyDateTime_CheckExact, - PyDateTime_IMPORT, - timedelta, datetime, date, time) -# import datetime C API -PyDateTime_IMPORT - from tslibs.np_datetime cimport (check_dts_bounds, pandas_datetimestruct, @@ -30,13 +35,6 @@ from tslibs.np_datetime import OutOfBoundsDatetime from tslibs.parsing import parse_datetime_string -cimport cython -from cython cimport Py_ssize_t - - -import pytz - - from tslibs.timedeltas cimport cast_from_unit from tslibs.timezones cimport (is_utc, is_tzlocal, is_fixed_offset, treat_tz_as_pytz, get_dst_info) @@ -54,7 +52,8 @@ from tslibs.timestamps cimport (create_timestamp_from_ts, _NS_UPPER_BOUND, _NS_LOWER_BOUND) from tslibs.timestamps import Timestamp -cdef bint PY2 = str == bytes + +DEF PY2 = str == bytes cdef inline object create_datetime_from_ts( @@ -556,8 +555,9 @@ cpdef array_to_datetime(ndarray[object] values, errors='raise', if len(val) == 0 or val in nat_strings: iresult[i] = NPY_NAT continue - if PyUnicode_Check(val) and PY2: - val = val.encode('utf-8') + if PY2: + if PyUnicode_Check(val): + val = val.encode('utf-8') try: _string_to_dts(val, &dts, &out_local, &out_tzoffset) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 27b7f03358a3a..094a37b210516 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -334,8 +334,6 @@ class _BaseOffset(object): # other is not a DateOffset object return False - return self._params == other._params - def __ne__(self, other): return not self == other diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index f5048d32e826b..580d155f87fa8 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -10,7 +10,7 @@ cimport cython from cython cimport Py_ssize_t -from datetime import datetime +from cpython.datetime cimport datetime import time import numpy as np @@ -37,7 +37,7 @@ from dateutil.parser import DEFAULTPARSER from dateutil.parser import parse as du_parse from ccalendar import MONTH_NUMBERS -from nattype import nat_strings +from nattype import nat_strings, NaT # ---------------------------------------------------------------------- # Constants @@ -54,9 +54,6 @@ cdef object _TIMEPAT = re.compile(r'^([01]?[0-9]|2[0-3]):([0-5][0-9])') cdef set _not_datelike_strings = {'a', 'A', 'm', 'M', 'p', 'P', 't', 'T'} -NAT_SENTINEL = object() -# This allows us to reference NaT without having to import it - # ---------------------------------------------------------------------- @@ -136,9 +133,6 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): res = parse_datetime_string_with_reso(arg, freq=freq, dayfirst=dayfirst, yearfirst=yearfirst) - if res[0] is NAT_SENTINEL: - from pandas._libs.tslib import NaT - res = (NaT,) + res[1:] return res @@ -206,7 +200,7 @@ cdef inline object _parse_dateabbr_string(object date_string, object default, # should be NaT??? if date_string in nat_strings: - return NAT_SENTINEL, NAT_SENTINEL, '' + return NaT, NaT, '' date_string = date_string.upper() date_len = len(date_string) @@ -407,7 +401,7 @@ def try_parse_dates(ndarray[object] values, parser=None, # EAFP here try: - for i from 0 <= i < n: + for i in range(n): if values[i] == '': result[i] = np.nan else: @@ -419,7 +413,7 @@ def try_parse_dates(ndarray[object] values, parser=None, parse_date = parser try: - for i from 0 <= i < n: + for i in range(n): if values[i] == '': result[i] = np.nan else: @@ -459,7 +453,7 @@ def try_parse_date_and_time(ndarray[object] dates, ndarray[object] times, else: parse_time = time_parser - for i from 0 <= i < n: + for i in range(n): d = parse_date(str(dates[i])) t = parse_time(str(times[i])) result[i] = datetime(d.year, d.month, d.day, @@ -479,7 +473,7 @@ def try_parse_year_month_day(ndarray[object] years, ndarray[object] months, raise ValueError('Length of years/months/days must all be equal') result = np.empty(n, dtype='O') - for i from 0 <= i < n: + for i in range(n): result[i] = datetime(int(years[i]), int(months[i]), int(days[i])) return result @@ -505,7 +499,7 @@ def try_parse_datetime_components(ndarray[object] years, raise ValueError('Length of all datetime components must be equal') result = np.empty(n, dtype='O') - for i from 0 <= i < n: + for i in range(n): float_secs = float(seconds[i]) secs = int(float_secs) diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 0ec5d25beeeb9..2ce1008d0ffb3 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -46,14 +46,14 @@ from conversion cimport tz_convert_utc_to_tzlocal from frequencies cimport (get_freq_code, get_base_alias, get_to_timestamp_base, get_freq_str, get_rule_month) -from parsing import parse_time_string, NAT_SENTINEL +from parsing import parse_time_string from resolution import Resolution from nattype import nat_strings, NaT, iNaT from nattype cimport _nat_scalar_rules, NPY_NAT, is_null_datetimelike from offsets cimport to_offset from offsets import _Tick -cdef bint PY2 = str == bytes +DEF PY2 = str == bytes cdef extern from "period_helper.h": @@ -729,7 +729,7 @@ cdef object _period_strftime(int64_t value, int freq, object fmt): result = result.replace(str_extra_fmts[i], repl) - if PY2: + IF PY2: result = result.decode('utf-8', 'ignore') return result @@ -1820,7 +1820,7 @@ class Period(_Period): value = str(value) value = value.upper() dt, _, reso = parse_time_string(value, freq) - if dt is NAT_SENTINEL: + if dt is NaT: ordinal = iNaT if freq is None: diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index a53d794b48cfa..5f085ff135d93 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -339,10 +339,6 @@ class Resolution(object): # ---------------------------------------------------------------------- # Frequency Inference - -# TODO: this is non performant logic here (and duplicative) and this -# simply should call unique_1d directly -# plus no reason to depend on khash directly cdef ndarray[int64_t, ndim=1] unique_deltas(ndarray[int64_t] arr): cdef: Py_ssize_t i, n = len(arr) @@ -367,6 +363,50 @@ cdef ndarray[int64_t, ndim=1] unique_deltas(ndarray[int64_t] arr): return result +cdef object month_position_check(fields, weekdays): + cdef: + int32_t daysinmonth, y, m, d + bint calendar_end = True + bint business_end = True + bint calendar_start = True + bint business_start = True + bint cal + int32_t[:] years + int32_t[:] months + int32_t[:] days + + years = fields['Y'] + months = fields['M'] + days = fields['D'] + + for y, m, d, wd in zip(years, months, days, weekdays): + if calendar_start: + calendar_start &= d == 1 + if business_start: + business_start &= d == 1 or (d <= 3 and wd == 0) + + if calendar_end or business_end: + daysinmonth = get_days_in_month(y, m) + cal = d == daysinmonth + if calendar_end: + calendar_end &= cal + if business_end: + business_end &= cal or (daysinmonth - d < 3 and wd == 4) + elif not calendar_start and not business_start: + break + + if calendar_end: + return 'ce' + elif business_end: + return 'be' + elif calendar_start: + return 'cs' + elif business_start: + return 'bs' + else: + return None + + cdef inline bint _is_multiple(int64_t us, int64_t mult): return us % mult == 0 @@ -475,52 +515,8 @@ cdef class _FrequencyInferer(object): def rep_stamp(self): return Timestamp(self.values[0]) - cdef month_position_check(self): - # TODO: cythonize this, very slow - cdef: - int32_t daysinmonth, y, m, d - bint calendar_end = True - bint business_end = True - bint calendar_start = True - bint business_start = True - bint cal - int32_t[:] years - int32_t[:] months - int32_t[:] days - - fields = self.fields - years = fields['Y'] - months = fields['M'] - days = fields['D'] - weekdays = self.index.dayofweek - - for y, m, d, wd in zip(years, months, days, weekdays): - - if calendar_start: - calendar_start &= d == 1 - if business_start: - business_start &= d == 1 or (d <= 3 and wd == 0) - - if calendar_end or business_end: - daysinmonth = get_days_in_month(y, m) - cal = d == daysinmonth - if calendar_end: - calendar_end &= cal - if business_end: - business_end &= cal or (daysinmonth - d < 3 and wd == 4) - elif not calendar_start and not business_start: - break - - if calendar_end: - return 'ce' - elif business_end: - return 'be' - elif calendar_start: - return 'cs' - elif business_start: - return 'bs' - else: - return None + cdef object month_position_check(self): + return month_position_check(self.fields, self.index.dayofweek) @cache_readonly def mdiffs(self):
Removes a few unnecessary uses of `cnp.import_array()` Uses cython's conditional compilation to avoid runtime PY2/PY3 checks Arranges cython imports in stdlib-->3rd party--> internal order. Separates out the one part of `libresolution._FrequencyInferer` that we actually _do_ want to keep in cython Couple of small lintings in groupby.pyx
https://api.github.com/repos/pandas-dev/pandas/pulls/21878
2018-07-12T17:50:20Z
2018-07-14T14:36:11Z
2018-07-14T14:36:11Z
2018-07-14T15:52:16Z
BUG: Align Series.str.zfill() with str.zfill()
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 00379c7e9d511..51735548db7d3 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -84,6 +84,7 @@ Other Enhancements - :meth:`Series.nlargest`, :meth:`Series.nsmallest`, :meth:`DataFrame.nlargest`, and :meth:`DataFrame.nsmallest` now accept the value ``"all"`` for the ``keep`` argument. This keeps all ties for the nth largest/smallest value (:issue:`16818`) - :class:`IntervalIndex` has gained the :meth:`~IntervalIndex.set_closed` method to change the existing ``closed`` value (:issue:`21670`) - :func:`~DataFrame.to_csv` and :func:`~DataFrame.to_json` now support ``compression='infer'`` to infer compression based on filename (:issue:`15008`) +- :func:`Series.str.zfill()` now matches with standard string library zfill (:issue:`20868`) - .. _whatsnew_0240.api_breaking: diff --git a/pandas/core/strings.py b/pandas/core/strings.py index e4765c00f80fd..29990e0efa8af 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -2557,20 +2557,26 @@ def zfill(self, width): Note that ``10`` and ``NaN`` are not strings, therefore they are converted to ``NaN``. The minus sign in ``'-1'`` is treated as a - regular character and the zero is added to the left of it + special character and the zero is added to the right of it (:meth:`str.zfill` would have moved it to the left). ``1000`` remains unchanged as it is longer than `width`. >>> s.str.zfill(3) - 0 0-1 + 0 -01 1 001 2 1000 3 NaN 4 NaN dtype: object """ - result = str_pad(self._data, width, side='left', fillchar='0') - return self._wrap_result(result) + + if not is_integer(width): + msg = 'width must be of integer type, not {0}' + raise TypeError(msg.format(type(width).__name__)) + + f = lambda x: x.zfill(width) + + return self._wrap_result(_na_map(f, self._data)) @copy(str_slice) def slice(self, start=None, stop=None, step=None): diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 9d008dfd25c90..8624cba57a354 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -2220,27 +2220,31 @@ def test_center_ljust_rjust_fillchar(self): result = values.str.rjust(5, fillchar=1) def test_zfill(self): - values = Series(['1', '22', 'aaa', '333', '45678']) + values = Series(['1', '+1', '-1', '22', 'aaa', '333', '45678']) result = values.str.zfill(5) - expected = Series(['00001', '00022', '00aaa', '00333', '45678']) + expected = Series(['00001', '+0001', '-0001', '00022', '00aaa', + '00333', '45678']) tm.assert_series_equal(result, expected) expected = np.array([v.zfill(5) for v in values.values], dtype=np.object_) tm.assert_numpy_array_equal(result.values, expected) result = values.str.zfill(3) - expected = Series(['001', '022', 'aaa', '333', '45678']) + expected = Series(['001', '+01', '-01', '022', 'aaa', '333', '45678']) tm.assert_series_equal(result, expected) expected = np.array([v.zfill(3) for v in values.values], dtype=np.object_) tm.assert_numpy_array_equal(result.values, expected) - values = Series(['1', np.nan, 'aaa', np.nan, '45678']) + values = Series(['1', np.nan, 'aaa', np.nan, '45678', 10]) result = values.str.zfill(5) - expected = Series(['00001', np.nan, '00aaa', np.nan, '45678']) + expected = Series(['00001', np.nan, '00aaa', np.nan, '45678', np.nan]) tm.assert_series_equal(result, expected) + with tm.assert_raises_regex(TypeError, "width must be of integer"): + values.str.zfill('5') + def test_split(self): values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
- [ ] closes #20868 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Using standard library `zfill` method directly to handle specific characters issue
https://api.github.com/repos/pandas-dev/pandas/pulls/21874
2018-07-12T06:20:40Z
2018-11-23T03:32:37Z
null
2018-11-23T03:32:37Z
TST: Parameterize more tests
diff --git a/pandas/conftest.py b/pandas/conftest.py index e49b2bedee47b..c1376670ffbf0 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -259,7 +259,10 @@ def string_dtype(request): return request.param -@pytest.fixture(params=[float, "float32", "float64"]) +FLOAT_DTYPES = [float, "float32", "float64"] + + +@pytest.fixture(params=FLOAT_DTYPES) def float_dtype(request): """ Parameterized fixture for float dtypes. @@ -286,6 +289,7 @@ def complex_dtype(request): UNSIGNED_INT_DTYPES = ["uint8", "uint16", "uint32", "uint64"] SIGNED_INT_DTYPES = [int, "int8", "int16", "int32", "int64"] ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES +ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES @pytest.fixture(params=SIGNED_INT_DTYPES) @@ -334,6 +338,26 @@ def any_int_dtype(request): return request.param +@pytest.fixture(params=ALL_REAL_DTYPES) +def any_real_dtype(request): + """ + Parameterized fixture for any (purely) real numeric dtypes. + + * int8 + * uint8 + * int16 + * uint16 + * int32 + * uint32 + * int64 + * uint64 + * float32 + * float64 + """ + + return request.param + + @pytest.fixture def mock(): """ diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 1eeeec0be3b8b..76a50a9ecf5e7 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -9,7 +9,7 @@ import numpy as np from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp, Categorical, compat, concat, option_context) -from pandas.compat import u +from pandas.compat import u, PY2 from pandas import _np_version_under1p14 from pandas.core.dtypes.dtypes import DatetimeTZDtype, CategoricalDtype @@ -21,6 +21,11 @@ import pandas as pd +@pytest.fixture(params=[str, compat.text_type]) +def text_dtype(request): + return request.param + + class TestDataFrameDataTypes(TestData): def test_concat_empty_dataframe_dtypes(self): @@ -351,27 +356,23 @@ def test_select_dtypes_datetime_with_tz(self): expected = df3.reindex(columns=[]) assert_frame_equal(result, expected) - def test_select_dtypes_str_raises(self): - df = DataFrame({'a': list('abc'), - 'g': list(u('abc')), - 'b': list(range(1, 4)), - 'c': np.arange(3, 6).astype('u1'), - 'd': np.arange(4.0, 7.0, dtype='float64'), - 'e': [True, False, True], - 'f': pd.date_range('now', periods=3).values}) - string_dtypes = set((str, 'str', np.string_, 'S1', - 'unicode', np.unicode_, 'U1')) - try: - string_dtypes.add(unicode) - except NameError: - pass - for dt in string_dtypes: - with tm.assert_raises_regex(TypeError, - 'string dtypes are not allowed'): - df.select_dtypes(include=[dt]) - with tm.assert_raises_regex(TypeError, - 'string dtypes are not allowed'): - df.select_dtypes(exclude=[dt]) + @pytest.mark.parametrize( + "dtype", [str, "str", np.string_, "S1", + "unicode", np.unicode_, "U1"] + ([unicode] if PY2 else [])) + @pytest.mark.parametrize("arg", ["include", "exclude"]) + def test_select_dtypes_str_raises(self, dtype, arg): + df = DataFrame({"a": list("abc"), + "g": list(u("abc")), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values}) + msg = "string dtypes are not allowed" + kwargs = {arg: [dtype]} + + with tm.assert_raises_regex(TypeError, msg): + df.select_dtypes(**kwargs) def test_select_dtypes_bad_arg_raises(self): df = DataFrame({'a': list('abc'), @@ -502,61 +503,59 @@ def test_astype_with_view(self): tf = self.frame.astype(np.float64) casted = tf.astype(np.int64, copy=False) # noqa - def test_astype_cast_nan_inf_int(self): - # GH14265, check nan and inf raise error when converting to int - types = [np.int32, np.int64] - values = [np.nan, np.inf] - msg = 'Cannot convert non-finite values \\(NA or inf\\) to integer' + @pytest.mark.parametrize("dtype", [np.int32, np.int64]) + @pytest.mark.parametrize("val", [np.nan, np.inf]) + def test_astype_cast_nan_inf_int(self, val, dtype): + # see gh-14265 + # + # Check NaN and inf --> raise error when converting to int. + msg = "Cannot convert non-finite values \\(NA or inf\\) to integer" + df = DataFrame([val]) - for this_type in types: - for this_val in values: - df = DataFrame([this_val]) - with tm.assert_raises_regex(ValueError, msg): - df.astype(this_type) + with tm.assert_raises_regex(ValueError, msg): + df.astype(dtype) - def test_astype_str(self): - # GH9757 - a = Series(date_range('2010-01-04', periods=5)) - b = Series(date_range('3/6/2012 00:00', periods=5, tz='US/Eastern')) - c = Series([Timedelta(x, unit='d') for x in range(5)]) + def test_astype_str(self, text_dtype): + # see gh-9757 + a = Series(date_range("2010-01-04", periods=5)) + b = Series(date_range("3/6/2012 00:00", periods=5, tz="US/Eastern")) + c = Series([Timedelta(x, unit="d") for x in range(5)]) d = Series(range(5)) e = Series([0.0, 0.2, 0.4, 0.6, 0.8]) - df = DataFrame({'a': a, 'b': b, 'c': c, 'd': d, 'e': e}) - - # datetimelike - # Test str and unicode on python 2.x and just str on python 3.x - for tt in set([str, compat.text_type]): - result = df.astype(tt) - - expected = DataFrame({ - 'a': list(map(tt, map(lambda x: Timestamp(x)._date_repr, - a._values))), - 'b': list(map(tt, map(Timestamp, b._values))), - 'c': list(map(tt, map(lambda x: Timedelta(x) - ._repr_base(format='all'), c._values))), - 'd': list(map(tt, d._values)), - 'e': list(map(tt, e._values)), - }) - - assert_frame_equal(result, expected) - - # float/nan - # 11302 - # consistency in astype(str) - for tt in set([str, compat.text_type]): - result = DataFrame([np.NaN]).astype(tt) - expected = DataFrame(['nan']) - assert_frame_equal(result, expected) - - result = DataFrame([1.12345678901234567890]).astype(tt) - if _np_version_under1p14: - # < 1.14 truncates - expected = DataFrame(['1.12345678901']) - else: - # >= 1.14 preserves the full repr - expected = DataFrame(['1.1234567890123457']) - assert_frame_equal(result, expected) + df = DataFrame({"a": a, "b": b, "c": c, "d": d, "e": e}) + + # Datetime-like + # Test str and unicode on Python 2.x and just str on Python 3.x + result = df.astype(text_dtype) + + expected = DataFrame({ + "a": list(map(text_dtype, + map(lambda x: Timestamp(x)._date_repr, a._values))), + "b": list(map(text_dtype, map(Timestamp, b._values))), + "c": list(map(text_dtype, + map(lambda x: Timedelta(x)._repr_base(format="all"), + c._values))), + "d": list(map(text_dtype, d._values)), + "e": list(map(text_dtype, e._values)), + }) + + assert_frame_equal(result, expected) + + def test_astype_str_float(self, text_dtype): + # see gh-11302 + result = DataFrame([np.NaN]).astype(text_dtype) + expected = DataFrame(["nan"]) + + assert_frame_equal(result, expected) + result = DataFrame([1.12345678901234567890]).astype(text_dtype) + + # < 1.14 truncates + # >= 1.14 preserves the full repr + val = ("1.12345678901" if _np_version_under1p14 + else "1.1234567890123457") + expected = DataFrame([val]) + assert_frame_equal(result, expected) @pytest.mark.parametrize("dtype_class", [dict, Series]) def test_astype_dict_like(self, dtype_class): diff --git a/pandas/tests/reshape/merge/test_merge_asof.py b/pandas/tests/reshape/merge/test_merge_asof.py index 59b53cd23010e..d5df9d3820fdc 100644 --- a/pandas/tests/reshape/merge/test_merge_asof.py +++ b/pandas/tests/reshape/merge/test_merge_asof.py @@ -892,77 +892,64 @@ def test_on_float(self): assert_frame_equal(result, expected) - def test_on_specialized_type(self): - # GH13936 - for dtype in [np.uint8, np.uint16, np.uint32, np.uint64, - np.int8, np.int16, np.int32, np.int64, - np.float16, np.float32, np.float64]: - df1 = pd.DataFrame({ - 'value': [5, 2, 25, 100, 78, 120, 79], - 'symbol': list("ABCDEFG")}, - columns=['symbol', 'value']) - df1.value = dtype(df1.value) - - df2 = pd.DataFrame({ - 'value': [0, 80, 120, 125], - 'result': list('xyzw')}, - columns=['value', 'result']) - df2.value = dtype(df2.value) - - df1 = df1.sort_values('value').reset_index(drop=True) - - if dtype == np.float16: - with pytest.raises(MergeError): - pd.merge_asof(df1, df2, on='value') - continue - - result = pd.merge_asof(df1, df2, on='value') - - expected = pd.DataFrame( - {'symbol': list("BACEGDF"), - 'value': [2, 5, 25, 78, 79, 100, 120], - 'result': list('xxxxxyz') - }, columns=['symbol', 'value', 'result']) - expected.value = dtype(expected.value) - - assert_frame_equal(result, expected) - - def test_on_specialized_type_by_int(self): - # GH13936 - for dtype in [np.uint8, np.uint16, np.uint32, np.uint64, - np.int8, np.int16, np.int32, np.int64, - np.float16, np.float32, np.float64]: - df1 = pd.DataFrame({ - 'value': [5, 2, 25, 100, 78, 120, 79], - 'key': [1, 2, 3, 2, 3, 1, 2], - 'symbol': list("ABCDEFG")}, - columns=['symbol', 'key', 'value']) - df1.value = dtype(df1.value) - - df2 = pd.DataFrame({ - 'value': [0, 80, 120, 125], - 'key': [1, 2, 2, 3], - 'result': list('xyzw')}, - columns=['value', 'key', 'result']) - df2.value = dtype(df2.value) - - df1 = df1.sort_values('value').reset_index(drop=True) - - if dtype == np.float16: - with pytest.raises(MergeError): - pd.merge_asof(df1, df2, on='value', by='key') - else: - result = pd.merge_asof(df1, df2, on='value', by='key') - - expected = pd.DataFrame({ - 'symbol': list("BACEGDF"), - 'key': [2, 1, 3, 3, 2, 2, 1], - 'value': [2, 5, 25, 78, 79, 100, 120], - 'result': [np.nan, 'x', np.nan, np.nan, np.nan, 'y', 'x']}, - columns=['symbol', 'key', 'value', 'result']) - expected.value = dtype(expected.value) - - assert_frame_equal(result, expected) + def test_on_specialized_type(self, any_real_dtype): + # see gh-13936 + dtype = np.dtype(any_real_dtype).type + + df1 = pd.DataFrame({ + "value": [5, 2, 25, 100, 78, 120, 79], + "symbol": list("ABCDEFG")}, + columns=["symbol", "value"]) + df1.value = dtype(df1.value) + + df2 = pd.DataFrame({ + "value": [0, 80, 120, 125], + "result": list("xyzw")}, + columns=["value", "result"]) + df2.value = dtype(df2.value) + + df1 = df1.sort_values("value").reset_index(drop=True) + result = pd.merge_asof(df1, df2, on="value") + + expected = pd.DataFrame( + {"symbol": list("BACEGDF"), + "value": [2, 5, 25, 78, 79, 100, 120], + "result": list("xxxxxyz") + }, columns=["symbol", "value", "result"]) + expected.value = dtype(expected.value) + + assert_frame_equal(result, expected) + + def test_on_specialized_type_by_int(self, any_real_dtype): + # see gh-13936 + dtype = np.dtype(any_real_dtype).type + + df1 = pd.DataFrame({ + "value": [5, 2, 25, 100, 78, 120, 79], + "key": [1, 2, 3, 2, 3, 1, 2], + "symbol": list("ABCDEFG")}, + columns=["symbol", "key", "value"]) + df1.value = dtype(df1.value) + + df2 = pd.DataFrame({ + "value": [0, 80, 120, 125], + "key": [1, 2, 2, 3], + "result": list("xyzw")}, + columns=["value", "key", "result"]) + df2.value = dtype(df2.value) + + df1 = df1.sort_values("value").reset_index(drop=True) + result = pd.merge_asof(df1, df2, on="value", by="key") + + expected = pd.DataFrame({ + "symbol": list("BACEGDF"), + "key": [2, 1, 3, 3, 2, 2, 1], + "value": [2, 5, 25, 78, 79, 100, 120], + "result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"]}, + columns=["symbol", "key", "value", "result"]) + expected.value = dtype(expected.value) + + assert_frame_equal(result, expected) def test_on_float_by_int(self): # type specialize both "by" and "on" parameters diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py index 5cfb9b1ff4292..bd54d5f57d12d 100644 --- a/pandas/tests/series/indexing/test_boolean.py +++ b/pandas/tests/series/indexing/test_boolean.py @@ -137,44 +137,63 @@ def test_get_set_boolean_different_order(test_data): assert_series_equal(sel, exp) +def test_where_unsafe_int(sint_dtype): + s = Series(np.arange(10), dtype=sint_dtype) + mask = s < 5 + + s[mask] = lrange(2, 7) + expected = Series(lrange(2, 7) + lrange(5, 10), dtype=sint_dtype) + + assert_series_equal(s, expected) + + +def test_where_unsafe_float(float_dtype): + s = Series(np.arange(10), dtype=float_dtype) + mask = s < 5 + + s[mask] = lrange(2, 7) + expected = Series(lrange(2, 7) + lrange(5, 10), dtype=float_dtype) + + assert_series_equal(s, expected) + + +@pytest.mark.parametrize("dtype", [np.int64, np.float64]) +def test_where_unsafe_upcast(dtype): + s = Series(np.arange(10), dtype=dtype) + values = [2.5, 3.5, 4.5, 5.5, 6.5] + + mask = s < 5 + expected = Series(values + lrange(5, 10), dtype="float64") + + s[mask] = values + assert_series_equal(s, expected) + + +@pytest.mark.parametrize("dtype", [ + np.int8, np.int16, np.int32, np.float32 +]) +def test_where_unsafe_itemsize_fail(dtype): + # Can't do these, as we are forced to change the + # item size of the input to something we cannot. + s = Series(np.arange(10), dtype=dtype) + mask = s < 5 + + values = [2.5, 3.5, 4.5, 5.5, 6.5] + pytest.raises(Exception, s.__setitem__, tuple(mask), values) + + def test_where_unsafe(): - # unsafe dtype changes - for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, - np.float32, np.float64]: - s = Series(np.arange(10), dtype=dtype) - mask = s < 5 - s[mask] = lrange(2, 7) - expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype) - assert_series_equal(s, expected) - assert s.dtype == expected.dtype - - # these are allowed operations, but are upcasted - for dtype in [np.int64, np.float64]: - s = Series(np.arange(10), dtype=dtype) - mask = s < 5 - values = [2.5, 3.5, 4.5, 5.5, 6.5] - s[mask] = values - expected = Series(values + lrange(5, 10), dtype='float64') - assert_series_equal(s, expected) - assert s.dtype == expected.dtype - - # GH 9731 - s = Series(np.arange(10), dtype='int64') - mask = s > 5 + # see gh-9731 + s = Series(np.arange(10), dtype="int64") values = [2.5, 3.5, 4.5, 5.5] + + mask = s > 5 + expected = Series(lrange(6) + values, dtype="float64") + s[mask] = values - expected = Series(lrange(6) + values, dtype='float64') assert_series_equal(s, expected) - # can't do these as we are forced to change the itemsize of the input - # to something we cannot - for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]: - s = Series(np.arange(10), dtype=dtype) - mask = s < 5 - values = [2.5, 3.5, 4.5, 5.5, 6.5] - pytest.raises(Exception, s.__setitem__, tuple(mask), values) - - # GH3235 + # see gh-3235 s = Series(np.arange(10), dtype='int64') mask = s < 5 s[mask] = lrange(2, 7)
Add parameterization to the following tests: * `frame/test_dtypes.py` * `series/indexing/test_boolean.py` * `reshape/merge/test_merge_asof.py`
https://api.github.com/repos/pandas-dev/pandas/pulls/21873
2018-07-12T05:20:06Z
2018-07-13T04:55:27Z
2018-07-13T04:55:27Z
2018-07-13T04:55:55Z
[REF] Move comparison methods to EAMixins, share code
diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h index 5cfa51dc8a0be..3573a561945d2 100644 --- a/pandas/_libs/src/numpy_helper.h +++ b/pandas/_libs/src/numpy_helper.h @@ -50,7 +50,7 @@ PANDAS_INLINE PyObject* char_to_string(const char* data) { void set_array_not_contiguous(PyArrayObject* ao) { - ao->flags &= ~(NPY_C_CONTIGUOUS | NPY_F_CONTIGUOUS); + ao->flags &= ~(NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS); } #endif // PANDAS__LIBS_SRC_NUMPY_HELPER_H_ diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 0ec5d25beeeb9..e4350ee8ded53 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -1859,21 +1859,40 @@ cdef int64_t _ordinal_from_fields(year, month, quarter, day, hour, minute, second, freq): base, mult = get_freq_code(freq) if quarter is not None: - year, month = _quarter_to_myear(year, quarter, freq) + year, month = quarter_to_myear(year, quarter, freq) return period_ordinal(year, month, day, hour, minute, second, 0, 0, base) -def _quarter_to_myear(year, quarter, freq): - if quarter is not None: - if quarter <= 0 or quarter > 4: - raise ValueError('Quarter must be 1 <= q <= 4') +def quarter_to_myear(int year, int quarter, freq): + """ + A quarterly frequency defines a "year" which may not coincide with + the calendar-year. Find the calendar-year and calendar-month associated + with the given year and quarter under the `freq`-derived calendar. + + Parameters + ---------- + year : int + quarter : int + freq : DateOffset + + Returns + ------- + year : int + month : int + + See Also + -------- + Period.qyear + """ + if quarter <= 0 or quarter > 4: + raise ValueError('Quarter must be 1 <= q <= 4') - mnum = MONTH_NUMBERS[get_rule_month(freq)] + 1 - month = (mnum + (quarter - 1) * 3) % 12 + 1 - if month > mnum: - year -= 1 + mnum = MONTH_NUMBERS[get_rule_month(freq)] + 1 + month = (mnum + (quarter - 1) * 3) % 12 + 1 + if month > mnum: + year -= 1 return year, month diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py index 1b8a43d4293a5..6ccbb872bf50e 100644 --- a/pandas/core/arrays/__init__.py +++ b/pandas/core/arrays/__init__.py @@ -3,4 +3,4 @@ from .categorical import Categorical # noqa from .datetimes import DatetimeArrayMixin # noqa from .period import PeriodArrayMixin # noqa -from .timedelta import TimedeltaArrayMixin # noqa +from .timedeltas import TimedeltaArrayMixin # noqa diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 65f34b847f8d0..ec430e4bf17b1 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -10,19 +10,53 @@ DIFFERENT_FREQ_INDEX, IncompatibleFrequency) from pandas.errors import NullFrequencyError, PerformanceWarning +from pandas import compat from pandas.tseries import frequencies from pandas.tseries.offsets import Tick from pandas.core.dtypes.common import ( + needs_i8_conversion, + is_list_like, + is_bool_dtype, is_period_dtype, is_timedelta64_dtype, is_object_dtype) +from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame, ABCIndexClass import pandas.core.common as com from pandas.core.algorithms import checked_add_with_arr +def _make_comparison_op(op, cls): + # TODO: share code with indexes.base version? Main difference is that + # the block for MultiIndex was removed here. + def cmp_method(self, other): + if isinstance(other, ABCDataFrame): + return NotImplemented + + if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)): + if other.ndim > 0 and len(self) != len(other): + raise ValueError('Lengths must match to compare') + + if needs_i8_conversion(self) and needs_i8_conversion(other): + # we may need to directly compare underlying + # representations + return self._evaluate_compare(other, op) + + # numpy will show a DeprecationWarning on invalid elementwise + # comparisons, this will raise in the future + with warnings.catch_warnings(record=True): + with np.errstate(all='ignore'): + result = op(self.values, np.asarray(other)) + + return result + + name = '__{name}__'.format(name=op.__name__) + # TODO: docstring? + return compat.set_function_name(cmp_method, name, cls) + + class AttributesMixin(object): @property @@ -435,3 +469,85 @@ def _addsub_offset_array(self, other, op): if not is_period_dtype(self): kwargs['freq'] = 'infer' return type(self)(res_values, **kwargs) + + # -------------------------------------------------------------- + # Comparison Methods + + def _evaluate_compare(self, other, op): + """ + We have been called because a comparison between + 8 aware arrays. numpy >= 1.11 will + now warn about NaT comparisons + """ + # Called by comparison methods when comparing datetimelike + # with datetimelike + + if not isinstance(other, type(self)): + # coerce to a similar object + if not is_list_like(other): + # scalar + other = [other] + elif lib.is_scalar(lib.item_from_zerodim(other)): + # ndarray scalar + other = [other.item()] + other = type(self)(other) + + # compare + result = op(self.asi8, other.asi8) + + # technically we could support bool dtyped Index + # for now just return the indexing array directly + mask = (self._isnan) | (other._isnan) + + filler = iNaT + if is_bool_dtype(result): + filler = False + + result[mask] = filler + return result + + # TODO: get this from ExtensionOpsMixin + @classmethod + def _add_comparison_methods(cls): + """ add in comparison methods """ + # DatetimeArray and TimedeltaArray comparison methods will + # call these as their super(...) methods + cls.__eq__ = _make_comparison_op(operator.eq, cls) + cls.__ne__ = _make_comparison_op(operator.ne, cls) + cls.__lt__ = _make_comparison_op(operator.lt, cls) + cls.__gt__ = _make_comparison_op(operator.gt, cls) + cls.__le__ = _make_comparison_op(operator.le, cls) + cls.__ge__ = _make_comparison_op(operator.ge, cls) + + +DatetimeLikeArrayMixin._add_comparison_methods() + + +# ------------------------------------------------------------------- +# Shared Constructor Helpers + +def validate_periods(periods): + """ + If a `periods` argument is passed to the Datetime/Timedelta Array/Index + constructor, cast it to an integer. + + Parameters + ---------- + periods : None, float, int + + Returns + ------- + periods : None or int + + Raises + ------ + TypeError + if periods is None, float, or int + """ + if periods is not None: + if lib.is_float(periods): + periods = int(periods) + elif not lib.is_integer(periods): + raise TypeError('periods must be a number, got {periods}' + .format(periods=periods)) + return periods diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index d7dfa73c53d8d..5835090e25de1 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -13,21 +13,37 @@ from pandas.util._decorators import cache_readonly from pandas.errors import PerformanceWarning +from pandas import compat from pandas.core.dtypes.common import ( _NS_DTYPE, + is_datetimelike, is_datetime64tz_dtype, is_datetime64_dtype, is_timedelta64_dtype, _ensure_int64) from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.dtypes.missing import isna +from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries +import pandas.core.common as com from pandas.core.algorithms import checked_add_with_arr from pandas.tseries.frequencies import to_offset, DateOffset from pandas.tseries.offsets import Tick -from .datetimelike import DatetimeLikeArrayMixin +from pandas.core.arrays import datetimelike as dtl + + +def _to_m8(key, tz=None): + """ + Timestamp-like => dt64 + """ + if not isinstance(key, Timestamp): + # this also converts strings + key = Timestamp(key, tz=tz) + + return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE) def _field_accessor(name, field, docstring=None): @@ -68,7 +84,58 @@ def f(self): return property(f) -class DatetimeArrayMixin(DatetimeLikeArrayMixin): +def _dt_array_cmp(opname, cls): + """ + Wrap comparison operations to convert datetime-like to datetime64 + """ + nat_result = True if opname == '__ne__' else False + + def wrapper(self, other): + meth = getattr(dtl.DatetimeLikeArrayMixin, opname) + + if isinstance(other, (datetime, np.datetime64, compat.string_types)): + if isinstance(other, datetime): + # GH#18435 strings get a pass from tzawareness compat + self._assert_tzawareness_compat(other) + + other = _to_m8(other, tz=self.tz) + result = meth(self, other) + if isna(other): + result.fill(nat_result) + else: + if isinstance(other, list): + other = type(self)(other) + elif not isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)): + # Following Timestamp convention, __eq__ is all-False + # and __ne__ is all True, others raise TypeError. + if opname == '__eq__': + return np.zeros(shape=self.shape, dtype=bool) + elif opname == '__ne__': + return np.ones(shape=self.shape, dtype=bool) + raise TypeError('%s type object %s' % + (type(other), str(other))) + + if is_datetimelike(other): + self._assert_tzawareness_compat(other) + + result = meth(self, np.asarray(other)) + result = com._values_from_object(result) + + # Make sure to pass an array to result[...]; indexing with + # Series breaks with older version of numpy + o_mask = np.array(isna(other)) + if o_mask.any(): + result[o_mask] = nat_result + + if self.hasnans: + result[self._isnan] = nat_result + + return result + + return compat.set_function_name(wrapper, opname, cls) + + +class DatetimeArrayMixin(dtl.DatetimeLikeArrayMixin): """ Assumes that subclass __new__/__init__ defines: tz @@ -222,6 +289,18 @@ def __iter__(self): # ----------------------------------------------------------------- # Comparison Methods + @classmethod + def _add_comparison_methods(cls): + """add in comparison methods""" + cls.__eq__ = _dt_array_cmp('__eq__', cls) + cls.__ne__ = _dt_array_cmp('__ne__', cls) + cls.__lt__ = _dt_array_cmp('__lt__', cls) + cls.__gt__ = _dt_array_cmp('__gt__', cls) + cls.__le__ = _dt_array_cmp('__le__', cls) + cls.__ge__ = _dt_array_cmp('__ge__', cls) + # TODO: Some classes pass __eq__ while others pass operator.eq; + # standardize this. + def _has_same_tz(self, other): zzone = self._timezone @@ -335,7 +414,7 @@ def _add_delta(self, delta): The result's name is set outside of _add_delta by the calling method (__add__ or __sub__) """ - from pandas.core.arrays.timedelta import TimedeltaArrayMixin + from pandas.core.arrays.timedeltas import TimedeltaArrayMixin if isinstance(delta, (Tick, timedelta, np.timedelta64)): new_values = self._add_delta_td(delta) @@ -1021,3 +1100,6 @@ def to_julian_date(self): self.microsecond / 3600.0 / 1e+6 + self.nanosecond / 3600.0 / 1e+9 ) / 24.0) + + +DatetimeArrayMixin._add_comparison_methods() diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 000775361061e..66b1fb8db25c0 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -8,7 +8,7 @@ from pandas._libs.tslib import NaT, iNaT from pandas._libs.tslibs.period import ( Period, IncompatibleFrequency, DIFFERENT_FREQ_INDEX, - get_period_field_arr, period_asfreq_arr, _quarter_to_myear) + get_period_field_arr, period_asfreq_arr) from pandas._libs.tslibs import period as libperiod from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds from pandas._libs.tslibs.fields import isleapyear_arr @@ -26,7 +26,7 @@ from pandas.tseries import frequencies from pandas.tseries.offsets import Tick, DateOffset -from .datetimelike import DatetimeLikeArrayMixin +from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin def _field_accessor(name, alias, docstring=None): @@ -466,7 +466,7 @@ def _range_from_fields(year=None, month=None, quarter=None, day=None, year, quarter = _make_field_arrays(year, quarter) for y, q in compat.zip(year, quarter): - y, m = _quarter_to_myear(y, q, freq) + y, m = libperiod.quarter_to_myear(y, q, freq) val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) ordinals.append(val) else: diff --git a/pandas/core/arrays/timedelta.py b/pandas/core/arrays/timedeltas.py similarity index 81% rename from pandas/core/arrays/timedelta.py rename to pandas/core/arrays/timedeltas.py index dbd481aae4f37..f027b84506164 100644 --- a/pandas/core/arrays/timedelta.py +++ b/pandas/core/arrays/timedeltas.py @@ -3,7 +3,7 @@ import numpy as np -from pandas._libs import tslibs, lib +from pandas._libs import tslibs from pandas._libs.tslibs import Timedelta, NaT from pandas._libs.tslibs.fields import get_timedelta_field from pandas._libs.tslibs.timedeltas import array_to_timedelta64 @@ -11,7 +11,7 @@ from pandas import compat from pandas.core.dtypes.common import ( - _TD_DTYPE, _ensure_int64, is_timedelta64_dtype) + _TD_DTYPE, _ensure_int64, is_timedelta64_dtype, is_list_like) from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna @@ -20,7 +20,19 @@ from pandas.tseries.offsets import Tick, DateOffset from pandas.tseries.frequencies import to_offset -from .datetimelike import DatetimeLikeArrayMixin +from . import datetimelike as dtl + + +def _to_m8(key): + """ + Timedelta-like => dt64 + """ + if not isinstance(key, Timedelta): + # this also converts strings + key = Timedelta(key) + + # return an type that can be compared + return np.int64(key.value).view(_TD_DTYPE) def _is_convertible_to_td(key): @@ -42,7 +54,47 @@ def f(self): return property(f) -class TimedeltaArrayMixin(DatetimeLikeArrayMixin): +def _td_array_cmp(opname, cls): + """ + Wrap comparison operations to convert timedelta-like to timedelta64 + """ + nat_result = True if opname == '__ne__' else False + + def wrapper(self, other): + msg = "cannot compare a {cls} with type {typ}" + meth = getattr(dtl.DatetimeLikeArrayMixin, opname) + if _is_convertible_to_td(other) or other is NaT: + try: + other = _to_m8(other) + except ValueError: + # failed to parse as timedelta + raise TypeError(msg.format(cls=type(self).__name__, + typ=type(other).__name__)) + result = meth(self, other) + if isna(other): + result.fill(nat_result) + + elif not is_list_like(other): + raise TypeError(msg.format(cls=type(self).__name__, + typ=type(other).__name__)) + else: + other = type(self)(other).values + result = meth(self, other) + result = com._values_from_object(result) + + o_mask = np.array(isna(other)) + if o_mask.any(): + result[o_mask] = nat_result + + if self.hasnans: + result[self._isnan] = nat_result + + return result + + return compat.set_function_name(wrapper, opname, cls) + + +class TimedeltaArrayMixin(dtl.DatetimeLikeArrayMixin): @property def _box_func(self): return lambda x: Timedelta(x, unit='ns') @@ -78,20 +130,15 @@ def __new__(cls, values, freq=None, start=None, end=None, periods=None, freq != 'infer'): freq = to_offset(freq) - if periods is not None: - if lib.is_float(periods): - periods = int(periods) - elif not lib.is_integer(periods): - raise TypeError('`periods` must be a number, got {periods}' - .format(periods=periods)) + periods = dtl.validate_periods(periods) if values is None: if freq is None and com._any_none(periods, start, end): raise ValueError('Must provide freq argument if no data is ' 'supplied') else: - return cls._generate(start, end, periods, freq, - closed=closed) + return cls._generate_range(start, end, periods, freq, + closed=closed) result = cls._simple_new(values, freq=freq) if freq == 'infer': @@ -102,7 +149,7 @@ def __new__(cls, values, freq=None, start=None, end=None, periods=None, return result @classmethod - def _generate(cls, start, end, periods, freq, closed=None, **kwargs): + def _generate_range(cls, start, end, periods, freq, closed=None, **kwargs): # **kwargs are for compat with TimedeltaIndex, which includes `name` if com._count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, ' @@ -219,6 +266,19 @@ def _evaluate_with_timedelta_like(self, other, op): return NotImplemented + # ---------------------------------------------------------------- + # Comparison Methods + + @classmethod + def _add_comparison_methods(cls): + """add in comparison methods""" + cls.__eq__ = _td_array_cmp('__eq__', cls) + cls.__ne__ = _td_array_cmp('__ne__', cls) + cls.__lt__ = _td_array_cmp('__lt__', cls) + cls.__gt__ = _td_array_cmp('__gt__', cls) + cls.__le__ = _td_array_cmp('__le__', cls) + cls.__ge__ = _td_array_cmp('__ge__', cls) + # ---------------------------------------------------------------- # Conversion Methods - Vectorized analogues of Timedelta methods @@ -332,6 +392,9 @@ def f(x): return result +TimedeltaArrayMixin._add_comparison_methods() + + # --------------------------------------------------------------------- # Constructor Helpers diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index a0456630c9a0f..ed416c3ef857d 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -285,7 +285,9 @@ def is_list_like(obj): """ return (isinstance(obj, Iterable) and + # we do not count strings/unicode/bytes as list-like not isinstance(obj, string_and_binary_types) and + # exclude zero-dimensional numpy arrays, effectively scalars not (isinstance(obj, np.ndarray) and obj.ndim == 0)) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 78fa6f8217157..419e543ae8044 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -45,7 +45,6 @@ is_datetime64tz_dtype, is_timedelta64_dtype, is_hashable, - needs_i8_conversion, is_iterator, is_list_like, is_scalar) @@ -87,11 +86,6 @@ def cmp_method(self, other): if other.ndim > 0 and len(self) != len(other): raise ValueError('Lengths must match to compare') - # we may need to directly compare underlying - # representations - if needs_i8_conversion(self) and needs_i8_conversion(other): - return self._evaluate_compare(other, op) - from .multi import MultiIndex if is_object_dtype(self) and not isinstance(self, MultiIndex): # don't pass MultiIndex @@ -4628,9 +4622,6 @@ def _evaluate_with_timedelta_like(self, other, op): def _evaluate_with_datetime_like(self, other, op): raise TypeError("can only perform ops with datetime like values") - def _evaluate_compare(self, other, op): - raise com.AbstractMethodError(self) - @classmethod def _add_comparison_methods(cls): """ add in comparison methods """ diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 37e20496aafce..3f0bdf18f7230 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -223,7 +223,7 @@ def _validate_frequency(cls, index, freq, **kwargs): if index.empty or inferred == freq.freqstr: return None - on_freq = cls._generate( + on_freq = cls._generate_range( index[0], None, len(index), None, freq, **kwargs) if not np.array_equal(index.asi8, on_freq.asi8): msg = ('Inferred frequency {infer} from passed values does not ' @@ -290,34 +290,11 @@ def wrapper(left, right): return wrapper + @Appender(DatetimeLikeArrayMixin._evaluate_compare.__doc__) def _evaluate_compare(self, other, op): - """ - We have been called because a comparison between - 8 aware arrays. numpy >= 1.11 will - now warn about NaT comparisons - """ - - # coerce to a similar object - if not isinstance(other, type(self)): - if not is_list_like(other): - # scalar - other = [other] - elif is_scalar(lib.item_from_zerodim(other)): - # ndarray scalar - other = [other.item()] - other = type(self)(other) - - # compare - result = op(self.asi8, other.asi8) - - # technically we could support bool dtyped Index - # for now just return the indexing array directly - mask = (self._isnan) | (other._isnan) + result = DatetimeLikeArrayMixin._evaluate_compare(self, other, op) if is_bool_dtype(result): - result[mask] = False return result - - result[mask] = iNaT try: return Index(result) except TypeError: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 4931610e652b6..4732178d552be 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -18,7 +18,7 @@ is_integer, is_float, is_integer_dtype, - is_datetime64_ns_dtype, is_datetimelike, + is_datetime64_ns_dtype, is_period_dtype, is_bool_dtype, is_string_like, @@ -31,7 +31,8 @@ from pandas.core.dtypes.missing import isna import pandas.core.dtypes.concat as _concat -from pandas.core.arrays.datetimes import DatetimeArrayMixin +from pandas.core.arrays.datetimes import DatetimeArrayMixin, _to_m8 +from pandas.core.arrays import datetimelike as dtl from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.numeric import Int64Index, Float64Index @@ -87,49 +88,8 @@ def _dt_index_cmp(opname, cls): """ Wrap comparison operations to convert datetime-like to datetime64 """ - nat_result = True if opname == '__ne__' else False - def wrapper(self, other): - func = getattr(super(DatetimeIndex, self), opname) - - if isinstance(other, (datetime, np.datetime64, compat.string_types)): - if isinstance(other, datetime): - # GH#18435 strings get a pass from tzawareness compat - self._assert_tzawareness_compat(other) - - other = _to_m8(other, tz=self.tz) - result = func(other) - if isna(other): - result.fill(nat_result) - else: - if isinstance(other, list): - other = DatetimeIndex(other) - elif not isinstance(other, (np.ndarray, Index, ABCSeries)): - # Following Timestamp convention, __eq__ is all-False - # and __ne__ is all True, others raise TypeError. - if opname == '__eq__': - return np.zeros(shape=self.shape, dtype=bool) - elif opname == '__ne__': - return np.ones(shape=self.shape, dtype=bool) - raise TypeError('%s type object %s' % - (type(other), str(other))) - - if is_datetimelike(other): - self._assert_tzawareness_compat(other) - - result = func(np.asarray(other)) - result = com._values_from_object(result) - - # Make sure to pass an array to result[...]; indexing with - # Series breaks with older version of numpy - o_mask = np.array(isna(other)) - if o_mask.any(): - result[o_mask] = nat_result - - if self.hasnans: - result[self._isnan] = nat_result - - # support of bool dtype indexers + result = getattr(DatetimeArrayMixin, opname)(self, other) if is_bool_dtype(result): return result return Index(result) @@ -339,12 +299,7 @@ def __new__(cls, data=None, freq_infer = True freq = None - if periods is not None: - if is_float(periods): - periods = int(periods) - elif not is_integer(periods): - msg = 'periods must be a number, got {periods}' - raise TypeError(msg.format(periods=periods)) + periods = dtl.validate_periods(periods) # if dtype has an embedded tz, capture it if dtype is not None: @@ -364,9 +319,9 @@ def __new__(cls, data=None, msg = 'Must provide freq argument if no data is supplied' raise ValueError(msg) else: - return cls._generate(start, end, periods, name, freq, tz=tz, - normalize=normalize, closed=closed, - ambiguous=ambiguous) + return cls._generate_range(start, end, periods, name, freq, + tz=tz, normalize=normalize, + closed=closed, ambiguous=ambiguous) if not isinstance(data, (np.ndarray, Index, ABCSeries)): if is_scalar(data): @@ -438,8 +393,8 @@ def __new__(cls, data=None, return subarr._deepcopy_if_needed(ref_to_data, copy) @classmethod - def _generate(cls, start, end, periods, name, freq, - tz=None, normalize=False, ambiguous='raise', closed=None): + def _generate_range(cls, start, end, periods, name, freq, tz=None, + normalize=False, ambiguous='raise', closed=None): if com._count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, ' 'and freq, exactly three must be specified') @@ -521,7 +476,7 @@ def _generate(cls, start, end, periods, name, freq, index = cls._cached_range(start, end, periods=periods, freq=freq, name=name) else: - index = _generate_regular_range(start, end, periods, freq) + index = _generate_regular_range(cls, start, end, periods, freq) else: @@ -545,14 +500,15 @@ def _generate(cls, start, end, periods, name, freq, index = cls._cached_range(start, end, periods=periods, freq=freq, name=name) else: - index = _generate_regular_range(start, end, periods, freq) + index = _generate_regular_range(cls, start, end, + periods, freq) if tz is not None and getattr(index, 'tz', None) is None: arr = conversion.tz_localize_to_utc(_ensure_int64(index), tz, ambiguous=ambiguous) - index = DatetimeIndex(arr) + index = cls(arr) # index is localized datetime64 array -> have to convert # start/end as well to compare @@ -1764,7 +1720,7 @@ def to_julian_date(self): DatetimeIndex._add_datetimelike_methods() -def _generate_regular_range(start, end, periods, freq): +def _generate_regular_range(cls, start, end, periods, freq): if isinstance(freq, Tick): stride = freq.nanos if periods is None: @@ -1788,7 +1744,8 @@ def _generate_regular_range(start, end, periods, freq): "if a 'period' is given.") data = np.arange(b, e, stride, dtype=np.int64) - data = DatetimeIndex._simple_new(data.view(_NS_DTYPE), None, tz=tz) + # TODO: Do we need to use _simple_new here? just return data.view? + data = cls._simple_new(data.view(_NS_DTYPE), None, tz=tz) else: if isinstance(start, Timestamp): start = start.to_pydatetime() @@ -2088,17 +2045,6 @@ def cdate_range(start=None, end=None, periods=None, freq='C', tz=None, closed=closed, **kwargs) -def _to_m8(key, tz=None): - """ - Timestamp-like => dt64 - """ - if not isinstance(key, Timestamp): - # this also converts strings - key = Timestamp(key, tz=tz) - - return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE) - - _CACHE_START = Timestamp(datetime(1950, 1, 1)) _CACHE_END = Timestamp(datetime(2030, 1, 1)) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index eb1171c45b1e5..1ed6145f01a44 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -15,8 +15,10 @@ from pandas.core.dtypes.missing import isna from pandas.core.dtypes.generic import ABCSeries -from pandas.core.arrays.timedelta import ( - TimedeltaArrayMixin, _is_convertible_to_td) +from pandas.core.arrays.timedeltas import ( + TimedeltaArrayMixin, _is_convertible_to_td, _to_m8) +from pandas.core.arrays import datetimelike as dtl + from pandas.core.indexes.base import Index from pandas.core.indexes.numeric import Int64Index import pandas.compat as compat @@ -53,39 +55,10 @@ def _td_index_cmp(opname, cls): """ Wrap comparison operations to convert timedelta-like to timedelta64 """ - nat_result = True if opname == '__ne__' else False - def wrapper(self, other): - msg = "cannot compare a {cls} with type {typ}" - func = getattr(super(TimedeltaIndex, self), opname) - if _is_convertible_to_td(other) or other is NaT: - try: - other = _to_m8(other) - except ValueError: - # failed to parse as timedelta - raise TypeError(msg.format(cls=type(self).__name__, - typ=type(other).__name__)) - result = func(other) - if isna(other): - result.fill(nat_result) - - elif not is_list_like(other): - raise TypeError(msg.format(cls=type(self).__name__, - typ=type(other).__name__)) - else: - other = TimedeltaIndex(other).values - result = func(other) - result = com._values_from_object(result) - - o_mask = np.array(isna(other)) - if o_mask.any(): - result[o_mask] = nat_result - - if self.hasnans: - result[self._isnan] = nat_result - - # support of bool dtype indexers + result = getattr(TimedeltaArrayMixin, opname)(self, other) if is_bool_dtype(result): + # support of bool dtype indexers return result return Index(result) @@ -218,20 +191,15 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, freq_infer = True freq = None - if periods is not None: - if is_float(periods): - periods = int(periods) - elif not is_integer(periods): - msg = 'periods must be a number, got {periods}' - raise TypeError(msg.format(periods=periods)) + periods = dtl.validate_periods(periods) if data is None: if freq is None and com._any_none(periods, start, end): msg = 'Must provide freq argument if no data is supplied' raise ValueError(msg) else: - return cls._generate(start, end, periods, name, freq, - closed=closed) + return cls._generate_range(start, end, periods, name, freq, + closed=closed) if unit is not None: data = to_timedelta(data, unit=unit, box=False) @@ -248,30 +216,28 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, elif copy: data = np.array(data, copy=True) + subarr = cls._simple_new(data, name=name, freq=freq) # check that we are matching freqs - if verify_integrity and len(data) > 0: + if verify_integrity and len(subarr) > 0: if freq is not None and not freq_infer: - index = cls._simple_new(data, name=name) - cls._validate_frequency(index, freq) - index.freq = freq - return index + cls._validate_frequency(subarr, freq) if freq_infer: - index = cls._simple_new(data, name=name) - inferred = index.inferred_freq + inferred = subarr.inferred_freq if inferred: - index.freq = to_offset(inferred) - return index + subarr.freq = to_offset(inferred) + return subarr - return cls._simple_new(data, name=name, freq=freq) + return subarr @classmethod - def _generate(cls, start, end, periods, name, freq, closed=None): + def _generate_range(cls, start, end, periods, name, freq, closed=None): # TimedeltaArray gets `name` via **kwargs, so we need to explicitly # override it if name is passed as a positional argument - return super(TimedeltaIndex, cls)._generate(start, end, - periods, freq, - name=name, closed=closed) + return super(TimedeltaIndex, cls)._generate_range(start, end, + periods, freq, + name=name, + closed=closed) @classmethod def _simple_new(cls, values, name=None, freq=None, **kwargs): @@ -797,18 +763,6 @@ def _is_convertible_to_index(other): return False -def _to_m8(key): - """ - Timedelta-like => dt64 - """ - if not isinstance(key, Timedelta): - # this also converts strings - key = Timedelta(key) - - # return an type that can be compared - return np.int64(key.value).view(_TD_DTYPE) - - def timedelta_range(start=None, end=None, periods=None, freq=None, name=None, closed=None): """ diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index d116b3bcff86a..69e802fbaa3f0 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -4,7 +4,7 @@ import pandas as pd from pandas.core.arrays.datetimes import DatetimeArrayMixin -from pandas.core.arrays.timedelta import TimedeltaArrayMixin +from pandas.core.arrays.timedeltas import TimedeltaArrayMixin from pandas.core.arrays.period import PeriodArrayMixin diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index d4ad2e4eeb2e6..387a70fe37253 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -208,8 +208,8 @@ def get_offset(name): raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(name)) # cache _offset_map[name] = offset - # do not return cache because it's mutable - return _offset_map[name].copy() + + return _offset_map[name] getOffset = get_offset
Changes an old usage numpy's C API that is deprecated. This won't get rid of the warnings because cython hasn't changed it, but still. Also stops making copies of offsets since they are now immutable. Handles a handful of changes requested in the last pass: de-privatizes _quarter_to_myear (plus bonus docstring), renames _generate --> _generate_range, comments in is_list_like renames arrays.timedelta --> arrays.timedeltas to match core.indexes Implements comparison methods in DatetimeArray and TimedeltaArray, cleans up some Index code that is no longer needed as a result. Makes some progress on sharing code between TDI and DTI constructors (most of which we want to move up to the array classes)
https://api.github.com/repos/pandas-dev/pandas/pulls/21872
2018-07-12T03:17:44Z
2018-07-14T14:38:01Z
2018-07-14T14:38:01Z
2020-04-05T17:42:01Z
API: Add DataFrame.droplevel
diff --git a/doc/source/api.rst b/doc/source/api.rst index fff944651588e..9faac4c616477 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -444,6 +444,7 @@ Reindexing / Selection / Label manipulation Series.align Series.drop + Series.droplevel Series.drop_duplicates Series.duplicated Series.equals @@ -1063,6 +1064,7 @@ Reshaping, sorting, transposing .. autosummary:: :toctree: generated/ + DataFrame.droplevel DataFrame.pivot DataFrame.pivot_table DataFrame.reorder_levels diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 034a56b2ac0cb..d300c2b273906 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -74,6 +74,7 @@ Other Enhancements - :func:`Series.mode` and :func:`DataFrame.mode` now support the ``dropna`` parameter which can be used to specify whether NaN/NaT values should be considered (:issue:`17534`) - :func:`to_csv` now supports ``compression`` keyword when a file handle is passed. (:issue:`21227`) - :meth:`Index.droplevel` is now implemented also for flat indexes, for compatibility with :class:`MultiIndex` (:issue:`21115`) +- :meth:`Series.droplevel` and :meth:`DataFrame.droplevel` are now implemented (:issue:`20342`) - Added support for reading from Google Cloud Storage via the ``gcsfs`` library (:issue:`19454`) - :func:`to_gbq` and :func:`read_gbq` signature and documentation updated to reflect changes from the `Pandas-GBQ library version 0.5.0 diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8da678e0adec0..608eebd079eef 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -716,6 +716,66 @@ def swapaxes(self, axis1, axis2, copy=True): return self._constructor(new_values, *new_axes).__finalize__(self) + def droplevel(self, level, axis=0): + """Return DataFrame with requested index / column level(s) removed. + + .. versionadded:: 0.24.0 + + Parameters + ---------- + level : int, str, or list-like + If a string is given, must be the name of a level + If list-like, elements must be names or positional indexes + of levels. + + axis : {0 or 'index', 1 or 'columns'}, default 0 + + + Returns + ------- + DataFrame.droplevel() + + Examples + -------- + >>> df = pd.DataFrame([ + ...: [1, 2, 3, 4], + ...: [5, 6, 7, 8], + ...: [9, 10, 11, 12] + ...: ]).set_index([0, 1]).rename_axis(['a', 'b']) + + >>> df.columns = pd.MultiIndex.from_tuples([ + ...: ('c', 'e'), ('d', 'f') + ...:], names=['level_1', 'level_2']) + + >>> df + level_1 c d + level_2 e f + a b + 1 2 3 4 + 5 6 7 8 + 9 10 11 12 + + >>> df.droplevel('a') + level_1 c d + level_2 e f + b + 2 3 4 + 6 7 8 + 10 11 12 + + >>> df.droplevel('level2', axis=1) + level_1 c d + a b + 1 2 3 4 + 5 6 7 8 + 9 10 11 12 + + """ + labels = self._get_axis(axis) + new_labels = labels.droplevel(level) + result = self.set_axis(new_labels, axis=axis, inplace=False) + return result + def pop(self, item): """ Return item and drop from frame. Raise KeyError if not found. diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 21961906c39bb..4f95eb3fe7b47 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -1056,6 +1056,28 @@ def test_reindex_signature(self): "limit", "copy", "level", "method", "fill_value", "tolerance"} + def test_droplevel(self): + # GH20342 + df = pd.DataFrame([ + [1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12] + ]) + df = df.set_index([0, 1]).rename_axis(['a', 'b']) + df.columns = pd.MultiIndex.from_tuples([('c', 'e'), ('d', 'f')], + names=['level_1', 'level_2']) + + # test that dropping of a level in index works + expected = df.reset_index('a', drop=True) + result = df.droplevel('a', axis='index') + assert_frame_equal(result, expected) + + # test that dropping of a level in columns works + expected = df.copy() + expected.columns = pd.Index(['c', 'd'], name='level_1') + result = df.droplevel('level_2', axis='columns') + assert_frame_equal(result, expected) + class TestIntervalIndex(object): diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index 859082a7e722d..840c80d6775a5 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -295,3 +295,15 @@ def test_reset_index_drop_errors(self): s = pd.Series(range(4), index=pd.MultiIndex.from_product([[1, 2]] * 2)) with tm.assert_raises_regex(KeyError, 'not found'): s.reset_index('wrong', drop=True) + + def test_droplevel(self): + # GH20342 + ser = pd.Series([1, 2, 3, 4]) + ser.index = pd.MultiIndex.from_arrays([(1, 2, 3, 4), (5, 6, 7, 8)], + names=['a', 'b']) + expected = ser.reset_index('b', drop=True) + result = ser.droplevel('b', axis='index') + assert_series_equal(result, expected) + # test that droplevel raises ValueError on axis != 0 + with pytest.raises(ValueError): + ser.droplevel(1, axis='columns')
- [x] closes #20342 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21871
2018-07-12T01:50:50Z
2018-07-20T13:11:27Z
2018-07-20T13:11:27Z
2018-07-20T15:14:34Z
[CLN] De-privatize commonly-used functions
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 172117f7d8059..4cc119a700ca0 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -485,7 +485,7 @@ cpdef ndarray[object] astype_str(ndarray arr): def clean_index_list(list obj): """ - Utility used in pandas.core.index._ensure_index + Utility used in pandas.core.index.ensure_index """ cdef: Py_ssize_t i, n = len(obj) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 6e49e8044ff25..78c9113ce60de 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -27,9 +27,9 @@ is_datetime64_any_dtype, is_datetime64tz_dtype, is_timedelta64_dtype, is_datetimelike, is_interval_dtype, is_scalar, is_list_like, - _ensure_platform_int, _ensure_object, - _ensure_float64, _ensure_uint64, - _ensure_int64) + ensure_platform_int, ensure_object, + ensure_float64, ensure_uint64, + ensure_int64) from pandas.compat.numpy import _np_version_under1p10 from pandas.core.dtypes.missing import isna, na_value_for_dtype @@ -73,32 +73,32 @@ def _ensure_data(values, dtype=None): # we check some simple dtypes first try: if is_object_dtype(dtype): - return _ensure_object(np.asarray(values)), 'object', 'object' + return ensure_object(np.asarray(values)), 'object', 'object' if is_bool_dtype(values) or is_bool_dtype(dtype): # we are actually coercing to uint64 # until our algos support uint8 directly (see TODO) return np.asarray(values).astype('uint64'), 'bool', 'uint64' elif is_signed_integer_dtype(values) or is_signed_integer_dtype(dtype): - return _ensure_int64(values), 'int64', 'int64' + return ensure_int64(values), 'int64', 'int64' elif (is_unsigned_integer_dtype(values) or is_unsigned_integer_dtype(dtype)): - return _ensure_uint64(values), 'uint64', 'uint64' + return ensure_uint64(values), 'uint64', 'uint64' elif is_float_dtype(values) or is_float_dtype(dtype): - return _ensure_float64(values), 'float64', 'float64' + return ensure_float64(values), 'float64', 'float64' elif is_object_dtype(values) and dtype is None: - return _ensure_object(np.asarray(values)), 'object', 'object' + return ensure_object(np.asarray(values)), 'object', 'object' elif is_complex_dtype(values) or is_complex_dtype(dtype): # ignore the fact that we are casting to float # which discards complex parts with catch_warnings(record=True): - values = _ensure_float64(values) + values = ensure_float64(values) return values, 'float64', 'float64' except (TypeError, ValueError): # if we are trying to coerce to a dtype # and it is incompat this will fall thru to here - return _ensure_object(values), 'object', 'object' + return ensure_object(values), 'object', 'object' # datetimelike if (needs_i8_conversion(values) or @@ -129,13 +129,13 @@ def _ensure_data(values, dtype=None): # we are actually coercing to int64 # until our algos support int* directly (not all do) - values = _ensure_int64(values) + values = ensure_int64(values) return values, dtype, 'int64' # we have failed, return object values = np.asarray(values) - return _ensure_object(values), 'object', 'object' + return ensure_object(values), 'object', 'object' def _reconstruct_data(values, dtype, original): @@ -475,7 +475,7 @@ def _factorize_array(values, na_sentinel=-1, size_hint=None, labels = table.get_labels(values, uniques, 0, na_sentinel, na_value=na_value) - labels = _ensure_platform_int(labels) + labels = ensure_platform_int(labels) uniques = uniques.to_array() return labels, uniques @@ -1309,7 +1309,7 @@ def _take_nd_object(arr, indexer, out, axis, fill_value, mask_info): if arr.dtype != out.dtype: arr = arr.astype(out.dtype) if arr.shape[axis] > 0: - arr.take(_ensure_platform_int(indexer), axis=axis, out=out) + arr.take(ensure_platform_int(indexer), axis=axis, out=out) if needs_masking: outindexer = [slice(None)] * arr.ndim outindexer[axis] = mask @@ -1450,7 +1450,7 @@ def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None): return func def func(arr, indexer, out, fill_value=np.nan): - indexer = _ensure_int64(indexer) + indexer = ensure_int64(indexer) _take_nd_object(arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info) @@ -1609,7 +1609,7 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None, indexer = np.arange(arr.shape[axis], dtype=np.int64) dtype, fill_value = arr.dtype, arr.dtype.type() else: - indexer = _ensure_int64(indexer, copy=False) + indexer = ensure_int64(indexer, copy=False) if not allow_fill: dtype, fill_value = arr.dtype, arr.dtype.type() mask_info = None, False @@ -1687,11 +1687,11 @@ def take_2d_multi(arr, indexer, out=None, fill_value=np.nan, mask_info=None, if row_idx is None: row_idx = np.arange(arr.shape[0], dtype=np.int64) else: - row_idx = _ensure_int64(row_idx) + row_idx = ensure_int64(row_idx) if col_idx is None: col_idx = np.arange(arr.shape[1], dtype=np.int64) else: - col_idx = _ensure_int64(col_idx) + col_idx = ensure_int64(col_idx) indexer = row_idx, col_idx if not allow_fill: dtype, fill_value = arr.dtype, arr.dtype.type() diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 7a6253dffe235..973a8af76bb07 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -17,9 +17,9 @@ coerce_indexer_dtype) from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.common import ( - _ensure_int64, - _ensure_object, - _ensure_platform_int, + ensure_int64, + ensure_object, + ensure_platform_int, is_extension_array_dtype, is_dtype_equal, is_datetimelike, @@ -1221,7 +1221,7 @@ def shift(self, periods): if codes.ndim > 1: raise NotImplementedError("Categorical with ndim > 1.") if np.prod(codes.shape) and (periods != 0): - codes = np.roll(codes, _ensure_platform_int(periods), axis=0) + codes = np.roll(codes, ensure_platform_int(periods), axis=0) if periods > 0: codes[:periods] = -1 else: @@ -2137,7 +2137,7 @@ def mode(self, dropna=True): if dropna: good = self._codes != -1 values = self._codes[good] - values = sorted(htable.mode_int64(_ensure_int64(values), dropna)) + values = sorted(htable.mode_int64(ensure_int64(values), dropna)) result = self._constructor(values=values, categories=self.categories, ordered=self.ordered, fastpath=True) return result @@ -2431,8 +2431,8 @@ def _get_codes_for_values(values, categories): from pandas.core.algorithms import _get_data_algo, _hashtables if not is_dtype_equal(values.dtype, categories.dtype): - values = _ensure_object(values) - categories = _ensure_object(categories) + values = ensure_object(values) + categories = ensure_object(categories) (hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables) (_, _), cats = _get_data_algo(categories, _hashtables) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 5835090e25de1..c5e85cb5892f4 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -21,7 +21,7 @@ is_datetime64tz_dtype, is_datetime64_dtype, is_timedelta64_dtype, - _ensure_int64) + ensure_int64) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries @@ -167,7 +167,7 @@ def _simple_new(cls, values, freq=None, tz=None, **kwargs): values = np.array(values, copy=False) if not is_datetime64_dtype(values): - values = _ensure_int64(values).view(_NS_DTYPE) + values = ensure_int64(values).view(_NS_DTYPE) result = object.__new__(cls) result._data = values diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index 4ad53e16bc439..c915b272aee8b 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -19,7 +19,7 @@ ABCSeries, ABCIntervalIndex, ABCInterval) from pandas.core.dtypes.missing import isna, notna -from pandas.core.indexes.base import Index, _ensure_index +from pandas.core.indexes.base import Index, ensure_index from pandas.util._decorators import Appender from pandas.util._doctools import _WritableDoc @@ -145,8 +145,8 @@ def _simple_new(cls, left, right, closed=None, result = IntervalMixin.__new__(cls) closed = closed or 'right' - left = _ensure_index(left, copy=copy) - right = _ensure_index(right, copy=copy) + left = ensure_index(left, copy=copy) + right = ensure_index(right, copy=copy) if dtype is not None: # GH 19262: dtype must be an IntervalDtype to override inferred diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index f027b84506164..a28f7fc9c32fa 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -11,7 +11,7 @@ from pandas import compat from pandas.core.dtypes.common import ( - _TD_DTYPE, _ensure_int64, is_timedelta64_dtype, is_list_like) + _TD_DTYPE, ensure_int64, is_timedelta64_dtype, is_list_like) from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna @@ -117,7 +117,7 @@ def _simple_new(cls, values, freq=None, **kwargs): # non-nano unit values = values.astype(_TD_DTYPE) else: - values = _ensure_int64(values).view(_TD_DTYPE) + values = ensure_int64(values).view(_TD_DTYPE) result = object.__new__(cls) result._data = values diff --git a/pandas/core/common.py b/pandas/core/common.py index 0a33873630d27..0ca776b6bfa77 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -13,7 +13,7 @@ from pandas import compat from pandas.compat import long, zip, iteritems, PY36, OrderedDict from pandas.core.config import get_option -from pandas.core.dtypes.generic import ABCSeries, ABCIndex +from pandas.core.dtypes.generic import ABCSeries, ABCIndex, ABCIndexClass from pandas.core.dtypes.common import is_integer from pandas.core.dtypes.inference import _iterable_not_string from pandas.core.dtypes.missing import isna, isnull, notnull # noqa @@ -120,11 +120,6 @@ def is_bool_indexer(key): return False -def _default_index(n): - from pandas.core.index import RangeIndex - return RangeIndex(0, n, name=None) - - def _mut_exclusive(**kwargs): item1, item2 = kwargs.items() label1, val1 = item1 @@ -299,11 +294,10 @@ def intersection(*seqs): def _asarray_tuplesafe(values, dtype=None): - from pandas.core.index import Index if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')): values = list(values) - elif isinstance(values, Index): + elif isinstance(values, ABCIndexClass): return values.values if isinstance(values, list) and dtype in [np.object_, object]: diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 0bc6ad8499934..8675d3be06287 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -8,7 +8,7 @@ from pandas._libs import tslib, lib, tslibs from pandas._libs.tslibs import iNaT from pandas.compat import string_types, text_type, PY3 -from .common import (_ensure_object, is_bool, is_integer, is_float, +from .common import (ensure_object, is_bool, is_integer, is_float, is_complex, is_datetimetz, is_categorical_dtype, is_datetimelike, is_extension_type, @@ -25,8 +25,8 @@ is_bool_dtype, is_scalar, is_string_dtype, _string_dtypes, pandas_dtype, - _ensure_int8, _ensure_int16, - _ensure_int32, _ensure_int64, + ensure_int8, ensure_int16, + ensure_int32, ensure_int64, _NS_DTYPE, _TD_DTYPE, _INT64_DTYPE, _POSSIBLY_CAST_DTYPES) from .dtypes import (ExtensionDtype, PandasExtensionDtype, DatetimeTZDtype, @@ -85,7 +85,7 @@ def trans(x): if isinstance(dtype, string_types): if dtype == 'infer': - inferred_type = lib.infer_dtype(_ensure_object(result.ravel())) + inferred_type = lib.infer_dtype(ensure_object(result.ravel())) if inferred_type == 'boolean': dtype = 'bool' elif inferred_type == 'integer': @@ -602,12 +602,12 @@ def coerce_indexer_dtype(indexer, categories): """ coerce the indexer input array to the smallest dtype possible """ length = len(categories) if length < _int8_max: - return _ensure_int8(indexer) + return ensure_int8(indexer) elif length < _int16_max: - return _ensure_int16(indexer) + return ensure_int16(indexer) elif length < _int32_max: - return _ensure_int32(indexer) - return _ensure_int64(indexer) + return ensure_int32(indexer) + return ensure_int64(indexer) def coerce_to_dtypes(result, dtypes): @@ -948,7 +948,7 @@ def try_timedelta(v): except Exception: return v.reshape(shape) - inferred_type = lib.infer_datetimelike_array(_ensure_object(v)) + inferred_type = lib.infer_datetimelike_array(ensure_object(v)) if inferred_type == 'date' and convert_dates: value = try_datetime(v) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index ef4f36dc6df33..5a2f91d775fb2 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -32,14 +32,14 @@ # oh the troubles to reduce import time _is_scipy_sparse = None -_ensure_float64 = algos.ensure_float64 -_ensure_float32 = algos.ensure_float32 +ensure_float64 = algos.ensure_float64 +ensure_float32 = algos.ensure_float32 _ensure_datetime64ns = conversion.ensure_datetime64ns _ensure_timedelta64ns = conversion.ensure_timedelta64ns -def _ensure_float(arr): +def ensure_float(arr): """ Ensure that an array object has a float dtype if possible. @@ -59,16 +59,16 @@ def _ensure_float(arr): return arr -_ensure_uint64 = algos.ensure_uint64 -_ensure_int64 = algos.ensure_int64 -_ensure_int32 = algos.ensure_int32 -_ensure_int16 = algos.ensure_int16 -_ensure_int8 = algos.ensure_int8 -_ensure_platform_int = algos.ensure_platform_int -_ensure_object = algos.ensure_object +ensure_uint64 = algos.ensure_uint64 +ensure_int64 = algos.ensure_int64 +ensure_int32 = algos.ensure_int32 +ensure_int16 = algos.ensure_int16 +ensure_int8 = algos.ensure_int8 +ensure_platform_int = algos.ensure_platform_int +ensure_object = algos.ensure_object -def _ensure_categorical(arr): +def ensure_categorical(arr): """ Ensure that an array-like object is a Categorical (if not already). diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index 7ef4a7674753e..66998aa6866f6 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -16,7 +16,7 @@ is_string_like_dtype, is_bool_dtype, is_integer_dtype, is_dtype_equal, is_extension_array_dtype, - needs_i8_conversion, _ensure_object, + needs_i8_conversion, ensure_object, pandas_dtype, is_scalar, is_object_dtype, @@ -413,7 +413,7 @@ def array_equivalent(left, right, strict_nan=False): if not strict_nan: # isna considers NaN and None to be equivalent. return lib.array_equivalent_object( - _ensure_object(left.ravel()), _ensure_object(right.ravel())) + ensure_object(left.ravel()), ensure_object(right.ravel())) for left_value, right_value in zip(left, right): if left_value is NaT and right_value is not NaT: @@ -470,7 +470,7 @@ def _infer_fill_value(val): if is_datetimelike(val): return np.array('NaT', dtype=val.dtype) elif is_object_dtype(val.dtype): - dtype = lib.infer_dtype(_ensure_object(val)) + dtype = lib.infer_dtype(ensure_object(val)) if dtype in ['datetime', 'datetime64']: return np.array('NaT', dtype=_NS_DTYPE) elif dtype in ['timedelta', 'timedelta64']: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6380944338010..4578d2ac08199 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -51,9 +51,9 @@ is_dtype_equal, needs_i8_conversion, _get_dtype_from_object, - _ensure_float64, - _ensure_int64, - _ensure_platform_int, + ensure_float64, + ensure_int64, + ensure_platform_int, is_list_like, is_nested_list_like, is_iterator, @@ -64,8 +64,8 @@ from pandas.core.generic import NDFrame, _shared_docs -from pandas.core.index import (Index, MultiIndex, _ensure_index, - _ensure_index_from_sequences) +from pandas.core.index import (Index, MultiIndex, ensure_index, + ensure_index_from_sequences) from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable, check_bool_indexer) from pandas.core.internals import (BlockManager, @@ -88,6 +88,7 @@ from pandas.core.indexes.period import PeriodIndex from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex +import pandas.core.indexes.base as ibase import pandas.core.common as com import pandas.core.nanops as nanops @@ -397,16 +398,16 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, if is_named_tuple(data[0]) and columns is None: columns = data[0]._fields arrays, columns = _to_arrays(data, columns, dtype=dtype) - columns = _ensure_index(columns) + columns = ensure_index(columns) # set the index if index is None: if isinstance(data[0], Series): index = _get_names_from_index(data) elif isinstance(data[0], Categorical): - index = com._default_index(len(data[0])) + index = ibase.default_index(len(data[0])) else: - index = com._default_index(len(data)) + index = ibase.default_index(len(data)) mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) @@ -450,7 +451,7 @@ def _init_dict(self, data, index, columns, dtype=None): # raise ValueError if only scalars in dict index = extract_index(arrays[~missing]) else: - index = _ensure_index(index) + index = ensure_index(index) # no obvious "empty" int column if missing.any() and not is_integer_dtype(dtype): @@ -491,14 +492,14 @@ def _get_axes(N, K, index=index, columns=columns): # return axes or defaults if index is None: - index = com._default_index(N) + index = ibase.default_index(N) else: - index = _ensure_index(index) + index = ensure_index(index) if columns is None: - columns = com._default_index(K) + columns = ibase.default_index(K) else: - columns = _ensure_index(columns) + columns = ensure_index(columns) return index, columns # we could have a categorical type passed or coerced to 'category' @@ -1236,7 +1237,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None, # Make a copy of the input columns so we can modify it if columns is not None: - columns = _ensure_index(columns) + columns = ensure_index(columns) if is_iterator(data): if nrows == 0: @@ -1265,7 +1266,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None, if isinstance(data, dict): if columns is None: - columns = arr_columns = _ensure_index(sorted(data)) + columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] @@ -1281,15 +1282,15 @@ def from_records(cls, data, index=None, exclude=None, columns=None, elif isinstance(data, (np.ndarray, DataFrame)): arrays, columns = _to_arrays(data, columns) if columns is not None: - columns = _ensure_index(columns) + columns = ensure_index(columns) arr_columns = columns else: arrays, arr_columns = _to_arrays(data, columns, coerce_float=coerce_float) - arr_columns = _ensure_index(arr_columns) + arr_columns = ensure_index(arr_columns) if columns is not None: - columns = _ensure_index(columns) + columns = ensure_index(columns) else: columns = arr_columns @@ -1312,8 +1313,8 @@ def from_records(cls, data, index=None, exclude=None, columns=None, try: to_remove = [arr_columns.get_loc(field) for field in index] index_data = [arrays[i] for i in to_remove] - result_index = _ensure_index_from_sequences(index_data, - names=index) + result_index = ensure_index_from_sequences(index_data, + names=index) exclude.update(index) except Exception: @@ -1480,18 +1481,18 @@ def from_items(cls, items, columns=None, orient='columns'): if orient == 'columns': if columns is not None: - columns = _ensure_index(columns) + columns = ensure_index(columns) idict = dict(items) if len(idict) < len(items): - if not columns.equals(_ensure_index(keys)): + if not columns.equals(ensure_index(keys)): raise ValueError('With non-unique item names, passed ' 'columns must be identical') arrays = values else: arrays = [idict[k] for k in columns if k in idict] else: - columns = _ensure_index(keys) + columns = ensure_index(keys) arrays = values # GH 17312 @@ -1508,7 +1509,7 @@ def from_items(cls, items, columns=None, orient='columns'): if columns is None: raise TypeError("Must pass columns with orient='index'") - keys = _ensure_index(keys) + keys = ensure_index(keys) # GH 17312 # Provide more informative error msg when scalar values passed @@ -4006,7 +4007,7 @@ def set_index(self, keys, drop=True, append=False, inplace=False, to_remove.append(col) arrays.append(level) - index = _ensure_index_from_sequences(arrays, names) + index = ensure_index_from_sequences(arrays, names) if verify_integrity and not index.is_unique: duplicates = index[index.duplicated()].unique() @@ -4188,7 +4189,7 @@ def _maybe_casted_values(index, labels=None): values, mask, np.nan) return values - new_index = com._default_index(len(new_obj)) + new_index = ibase.default_index(len(new_obj)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] @@ -4509,7 +4510,7 @@ def sort_values(self, by, axis=0, ascending=True, inplace=False, keys.append(k) indexer = lexsort_indexer(keys, orders=ascending, na_position=na_position) - indexer = _ensure_platform_int(indexer) + indexer = ensure_platform_int(indexer) else: from pandas.core.sorting import nargsort @@ -6749,14 +6750,14 @@ def corr(self, method='pearson', min_periods=1): mat = numeric_df.values if method == 'pearson': - correl = libalgos.nancorr(_ensure_float64(mat), minp=min_periods) + correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods) elif method == 'spearman': - correl = libalgos.nancorr_spearman(_ensure_float64(mat), + correl = libalgos.nancorr_spearman(ensure_float64(mat), minp=min_periods) else: if min_periods is None: min_periods = 1 - mat = _ensure_float64(mat).T + mat = ensure_float64(mat).T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) @@ -6886,7 +6887,7 @@ def cov(self, min_periods=None): baseCov = np.cov(mat.T) baseCov = baseCov.reshape((len(cols), len(cols))) else: - baseCov = libalgos.nancorr(_ensure_float64(mat), cov=True, + baseCov = libalgos.nancorr(ensure_float64(mat), cov=True, minp=min_periods) return self._constructor(baseCov, index=idx, columns=cols) @@ -7076,7 +7077,7 @@ def _count_level(self, level, axis=0, numeric_only=False): level = count_axis._get_level_number(level) level_index = count_axis.levels[level] - labels = _ensure_int64(count_axis.labels[level]) + labels = ensure_int64(count_axis.labels[level]) counts = lib.count_level_2d(mask, labels, len(level_index), axis=0) result = DataFrame(counts, index=level_index, columns=agg_axis) @@ -7608,7 +7609,7 @@ def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): arrays = _homogenize(arrays, index, dtype) # from BlockManager perspective - axes = [_ensure_index(columns), _ensure_index(index)] + axes = [ensure_index(columns), ensure_index(index)] return create_block_manager_from_arrays(arrays, arr_names, axes) @@ -7660,9 +7661,9 @@ def extract_index(data): (lengths[0], len(index))) raise ValueError(msg) else: - index = com._default_index(lengths[0]) + index = ibase.default_index(lengths[0]) - return _ensure_index(index) + return ensure_index(index) def _prep_ndarray(values, copy=True): @@ -7734,7 +7735,7 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None): dtype=dtype) elif isinstance(data[0], Categorical): if columns is None: - columns = com._default_index(len(data)) + columns = ibase.default_index(len(data)) return data, columns elif (isinstance(data, (np.ndarray, Series, Index)) and data.dtype.names is not None): @@ -7758,11 +7759,11 @@ def _masked_rec_array_to_mgr(data, index, columns, dtype, copy): if index is None: index = _get_names_from_index(fdata) if index is None: - index = com._default_index(len(data)) - index = _ensure_index(index) + index = ibase.default_index(len(data)) + index = ensure_index(index) if columns is not None: - columns = _ensure_index(columns) + columns = ensure_index(columns) arrays, arr_columns = _to_arrays(fdata, columns) # fill if needed @@ -7790,8 +7791,8 @@ def _reorder_arrays(arrays, arr_columns, columns): # reorder according to the columns if (columns is not None and len(columns) and arr_columns is not None and len(arr_columns)): - indexer = _ensure_index(arr_columns).get_indexer(columns) - arr_columns = _ensure_index([arr_columns[i] for i in indexer]) + indexer = ensure_index(arr_columns).get_indexer(columns) + arr_columns = ensure_index([arr_columns[i] for i in indexer]) arrays = [arrays[i] for i in indexer] return arrays, arr_columns @@ -7818,7 +7819,7 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): for s in data: index = getattr(s, 'index', None) if index is None: - index = com._default_index(len(s)) + index = ibase.default_index(len(s)) if id(index) in indexer_cache: indexer = indexer_cache[id(index)] @@ -7855,7 +7856,7 @@ def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): def _convert_object_array(content, columns, coerce_float=False, dtype=None): if columns is None: - columns = com._default_index(len(content)) + columns = ibase.default_index(len(content)) else: if len(columns) != len(content): # pragma: no cover # caller's responsibility to check for this... @@ -7878,7 +7879,7 @@ def convert(arr): def _get_names_from_index(data): has_some_name = any(getattr(s, 'name', None) is not None for s in data) if not has_some_name: - return com._default_index(len(data)) + return ibase.default_index(len(data)) index = lrange(len(data)) count = 0 diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8da678e0adec0..7305da4f56506 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -12,8 +12,8 @@ from pandas._libs import tslib, properties from pandas.core.dtypes.common import ( - _ensure_int64, - _ensure_object, + ensure_int64, + ensure_object, is_scalar, is_number, is_integer, is_bool, @@ -35,7 +35,7 @@ from pandas.core.dtypes.generic import ABCSeries, ABCPanel, ABCDataFrame from pandas.core.base import PandasObject, SelectionMixin -from pandas.core.index import (Index, MultiIndex, _ensure_index, +from pandas.core.index import (Index, MultiIndex, ensure_index, InvalidIndexError, RangeIndex) import pandas.core.indexing as indexing from pandas.core.indexes.datetimes import DatetimeIndex @@ -3235,7 +3235,7 @@ def _drop_axis(self, labels, axis, level=None, errors='raise'): # Case for non-unique axis else: - labels = _ensure_object(com._index_labels_to_array(labels)) + labels = ensure_object(com._index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex') @@ -3889,9 +3889,9 @@ def _reindex_with_indexers(self, reindexers, fill_value=None, copy=False, if index is None: continue - index = _ensure_index(index) + index = ensure_index(index) if indexer is not None: - indexer = _ensure_int64(indexer) + indexer = ensure_int64(indexer) # TODO: speed up on homogeneous DataFrame objects new_data = new_data.reindex_indexer(index, indexer, axis=baxis, diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 78631bfae9e01..169416d6f8211 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -18,6 +18,7 @@ from pandas.util._decorators import Substitution, Appender from pandas import compat +import pandas.core.indexes.base as ibase import pandas.core.common as com from pandas.core.panel import Panel from pandas.compat import lzip, map @@ -35,8 +36,8 @@ is_numeric_dtype, is_integer_dtype, is_interval_dtype, - _ensure_platform_int, - _ensure_int64) + ensure_platform_int, + ensure_int64) from pandas.core.dtypes.missing import isna, notna import pandas.core.algorithms as algorithms from pandas.core.frame import DataFrame @@ -1165,7 +1166,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, verify_integrity=False) if is_integer_dtype(out): - out = _ensure_int64(out) + out = ensure_int64(out) return Series(out, index=mi, name=self._selection_name) # for compat. with libgroupby.value_counts need to ensure every @@ -1196,7 +1197,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False, verify_integrity=False) if is_integer_dtype(out): - out = _ensure_int64(out) + out = ensure_int64(out) return Series(out, index=mi, name=self._selection_name) def count(self): @@ -1205,7 +1206,7 @@ def count(self): val = self.obj.get_values() mask = (ids != -1) & ~isna(val) - ids = _ensure_platform_int(ids) + ids = ensure_platform_int(ids) out = np.bincount(ids[mask], minlength=ngroups or 0) return Series(out, @@ -1567,7 +1568,7 @@ def groupby_series(obj, col=None): results = concat(results, axis=1) if not self.as_index: - results.index = com._default_index(len(results)) + results.index = ibase.default_index(len(results)) return results boxplot = boxplot_frame_groupby diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index ff2ed6970ee76..cb045b08f3629 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -28,7 +28,7 @@ class providing the base-class of operations. from pandas.core.dtypes.common import ( is_numeric_dtype, is_scalar, - _ensure_float) + ensure_float) from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas.core.dtypes.missing import isna, notna @@ -842,7 +842,7 @@ def _python_agg_general(self, func, *args, **kwargs): # since we are masking, make sure that we have a float object values = result if is_numeric_dtype(values.dtype): - values = _ensure_float(values) + values = ensure_float(values) output[name] = self._try_cast(values[mask], result) diff --git a/pandas/core/groupby/grouper.py b/pandas/core/groupby/grouper.py index d5c4c2946a632..a1511b726c705 100644 --- a/pandas/core/groupby/grouper.py +++ b/pandas/core/groupby/grouper.py @@ -16,7 +16,7 @@ from pandas.core.index import ( Index, MultiIndex, CategoricalIndex) from pandas.core.dtypes.common import ( - _ensure_categorical, + ensure_categorical, is_hashable, is_list_like, is_timedelta64_dtype, @@ -360,7 +360,7 @@ def indices(self): if isinstance(self.grouper, BaseGrouper): return self.grouper.indices - values = _ensure_categorical(self.grouper) + values = ensure_categorical(self.grouper) return values._reverse_indexer() @property diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 65b9144c0ddc9..f2c55a56b119d 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -18,12 +18,12 @@ from pandas.core.base import SelectionMixin from pandas.core.dtypes.missing import isna, _maybe_fill from pandas.core.index import ( - Index, MultiIndex, _ensure_index) + Index, MultiIndex, ensure_index) from pandas.core.dtypes.common import ( - _ensure_float64, - _ensure_platform_int, - _ensure_int64, - _ensure_object, + ensure_float64, + ensure_platform_int, + ensure_int64, + ensure_object, needs_i8_conversion, is_integer_dtype, is_complex_dtype, @@ -231,7 +231,7 @@ def size(self): """ ids, _, ngroup = self.group_info - ids = _ensure_platform_int(ids) + ids = ensure_platform_int(ids) if ngroup: out = np.bincount(ids[ids != -1], minlength=ngroup) else: @@ -260,7 +260,7 @@ def group_info(self): comp_ids, obs_group_ids = self._get_compressed_labels() ngroups = len(obs_group_ids) - comp_ids = _ensure_int64(comp_ids) + comp_ids = ensure_int64(comp_ids) return comp_ids, obs_group_ids, ngroups @cache_readonly @@ -312,7 +312,7 @@ def get_group_levels(self): name_list = [] for ping, labels in zip(self.groupings, self.recons_labels): - labels = _ensure_platform_int(labels) + labels = ensure_platform_int(labels) levels = ping.result_index.take(labels) name_list.append(levels) @@ -464,16 +464,16 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, values = values.view('int64') is_numeric = True elif is_bool_dtype(values.dtype): - values = _ensure_float64(values) + values = ensure_float64(values) elif is_integer_dtype(values): # we use iNaT for the missing value on ints # so pre-convert to guard this condition if (values == iNaT).any(): - values = _ensure_float64(values) + values = ensure_float64(values) else: values = values.astype('int64', copy=False) elif is_numeric and not is_complex_dtype(values): - values = _ensure_float64(values) + values = ensure_float64(values) else: values = values.astype(object) @@ -482,7 +482,7 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, kind, how, values, is_numeric) except NotImplementedError: if is_numeric: - values = _ensure_float64(values) + values = ensure_float64(values) func = self._get_cython_function( kind, how, values, is_numeric) else: @@ -528,7 +528,7 @@ def _cython_operation(self, kind, values, how, axis, min_count=-1, result, (counts > 0).view(np.uint8)) except ValueError: result = lib.row_bool_subset_object( - _ensure_object(result), + ensure_object(result), (counts > 0).view(np.uint8)) else: result = result[counts > 0] @@ -671,8 +671,8 @@ class BinGrouper(BaseGrouper): def __init__(self, bins, binlabels, filter_empty=False, mutated=False, indexer=None): - self.bins = _ensure_int64(bins) - self.binlabels = _ensure_index(binlabels) + self.bins = ensure_int64(bins) + self.binlabels = ensure_index(binlabels) self._filter_empty_groups = filter_empty self.mutated = mutated self.indexer = indexer @@ -737,7 +737,7 @@ def group_info(self): obs_group_ids = np.arange(ngroups) rep = np.diff(np.r_[0, self.bins]) - rep = _ensure_platform_int(rep) + rep = ensure_platform_int(rep) if ngroups == len(self.bins): comp_ids = np.repeat(np.arange(ngroups), rep) else: @@ -808,7 +808,7 @@ class DataSplitter(object): def __init__(self, data, labels, ngroups, axis=0): self.data = data - self.labels = _ensure_int64(labels) + self.labels = ensure_int64(labels) self.ngroups = ngroups self.axis = axis diff --git a/pandas/core/indexes/api.py b/pandas/core/indexes/api.py index 2286033e97d85..b409d695a73e8 100644 --- a/pandas/core/indexes/api.py +++ b/pandas/core/indexes/api.py @@ -3,8 +3,8 @@ from pandas.core.indexes.base import (Index, _new_Index, - _ensure_index, - _ensure_index_from_sequences, + ensure_index, + ensure_index_from_sequences, InvalidIndexError) # noqa from pandas.core.indexes.category import CategoricalIndex # noqa from pandas.core.indexes.multi import MultiIndex # noqa @@ -36,7 +36,7 @@ 'InvalidIndexError', 'TimedeltaIndex', 'PeriodIndex', 'DatetimeIndex', '_new_Index', 'NaT', - '_ensure_index', '_ensure_index_from_sequences', + 'ensure_index', 'ensure_index_from_sequences', '_get_combined_index', '_get_objs_combined_axis', '_union_indexes', '_get_consensus_names', @@ -66,7 +66,7 @@ def _get_combined_index(indexes, intersect=False, sort=False): index = index.intersection(other) else: index = _union_indexes(indexes, sort=sort) - index = _ensure_index(index) + index = ensure_index(index) if sort: try: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index b9639fc804a36..83b70baf4065b 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -23,10 +23,10 @@ from pandas.core.dtypes.missing import isna, array_equivalent from pandas.core.dtypes.cast import maybe_cast_to_integer_array from pandas.core.dtypes.common import ( - _ensure_int64, - _ensure_object, - _ensure_categorical, - _ensure_platform_int, + ensure_int64, + ensure_object, + ensure_categorical, + ensure_platform_int, is_integer, is_float, is_dtype_equal, @@ -1867,7 +1867,7 @@ def is_type_compatible(self, kind): def is_all_dates(self): if self._data is None: return False - return is_datetime_array(_ensure_object(self.values)) + return is_datetime_array(ensure_object(self.values)) def __reduce__(self): d = dict(data=self._data) @@ -2071,7 +2071,7 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): if kwargs: nv.validate_take(tuple(), kwargs) - indices = _ensure_platform_int(indices) + indices = ensure_platform_int(indices) if self._can_hold_na: taken = self._assert_take_fillable(self.values, indices, allow_fill=allow_fill, @@ -2087,7 +2087,7 @@ def take(self, indices, axis=0, allow_fill=True, def _assert_take_fillable(self, values, indices, allow_fill=True, fill_value=None, na_value=np.nan): """ Internal method to handle NA filling of take """ - indices = _ensure_platform_int(indices) + indices = ensure_platform_int(indices) # only fill if we are passing a non-None fill_value if allow_fill and fill_value is not None: @@ -2679,7 +2679,7 @@ def union(self, other): """ self._assert_can_do_setop(other) - other = _ensure_index(other) + other = ensure_index(other) if len(other) == 0 or self.equals(other): return self._get_consensus_name(other) @@ -2779,7 +2779,7 @@ def intersection(self, other): """ self._assert_can_do_setop(other) - other = _ensure_index(other) + other = ensure_index(other) if self.equals(other): return self._get_consensus_name(other) @@ -3234,7 +3234,7 @@ def droplevel(self, level=0): @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): method = missing.clean_reindex_fill_method(method) - target = _ensure_index(target) + target = ensure_index(target) if tolerance is not None: tolerance = self._convert_tolerance(tolerance, target) @@ -3242,7 +3242,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): # this fix False and True would be treated as 0 and 1 respectively. # (GH #16877) if target.is_boolean() and self.is_numeric(): - return _ensure_platform_int(np.repeat(-1, target.size)) + return ensure_platform_int(np.repeat(-1, target.size)) pself, ptarget = self._maybe_promote(target) if pself is not self or ptarget is not target: @@ -3273,7 +3273,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): indexer = self._engine.get_indexer(target._ndarray_values) - return _ensure_platform_int(indexer) + return ensure_platform_int(indexer) def _convert_tolerance(self, tolerance, target): # override this method on subclasses @@ -3375,7 +3375,7 @@ def _filter_indexer_tolerance(self, target, indexer, tolerance): @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): - target = _ensure_index(target) + target = ensure_index(target) if is_categorical(target): target = target.astype(target.dtype.categories.dtype) pself, ptarget = self._maybe_promote(target) @@ -3389,7 +3389,7 @@ def get_indexer_non_unique(self, target): tgt_values = target._ndarray_values indexer, missing = self._engine.get_indexer_non_unique(tgt_values) - return _ensure_platform_int(indexer), missing + return ensure_platform_int(indexer), missing def get_indexer_for(self, target, **kwargs): """ @@ -3431,7 +3431,7 @@ def groupby(self, values): from .multi import MultiIndex if isinstance(values, MultiIndex): values = values.values - values = _ensure_categorical(values) + values = ensure_categorical(values) result = values._reverse_indexer() # map to the label @@ -3619,7 +3619,7 @@ def reindex(self, target, method=None, level=None, limit=None, attrs.pop('freq', None) # don't preserve freq target = self._simple_new(None, dtype=self.dtype, **attrs) else: - target = _ensure_index(target) + target = ensure_index(target) if level is not None: if method is not None: @@ -3667,7 +3667,7 @@ def _reindex_non_unique(self, target): """ - target = _ensure_index(target) + target = ensure_index(target) indexer, missing = self.get_indexer_non_unique(target) check = indexer != -1 new_labels = self.take(indexer[check]) @@ -3676,11 +3676,11 @@ def _reindex_non_unique(self, target): if len(missing): length = np.arange(len(indexer)) - missing = _ensure_platform_int(missing) + missing = ensure_platform_int(missing) missing_labels = target.take(missing) - missing_indexer = _ensure_int64(length[~check]) + missing_indexer = ensure_int64(length[~check]) cur_labels = self.take(indexer[check]).values - cur_indexer = _ensure_int64(length[check]) + cur_indexer = ensure_int64(length[check]) new_labels = np.empty(tuple([len(indexer)]), dtype=object) new_labels[cur_indexer] = cur_labels @@ -3754,7 +3754,7 @@ def join(self, other, how='left', level=None, return_indexers=False, return self._join_level(other, level, how=how, return_indexers=return_indexers) - other = _ensure_index(other) + other = ensure_index(other) if len(other) == 0 and how in ('left', 'outer'): join_index = self._shallow_copy() @@ -3881,8 +3881,8 @@ def _join_non_unique(self, other, how='left', return_indexers=False): how=how, sort=True) - left_idx = _ensure_platform_int(left_idx) - right_idx = _ensure_platform_int(right_idx) + left_idx = ensure_platform_int(left_idx) + right_idx = ensure_platform_int(right_idx) join_index = np.asarray(self._ndarray_values.take(left_idx)) mask = left_idx == -1 @@ -3915,7 +3915,7 @@ def _get_leaf_sorter(labels): return np.empty(0, dtype='int64') if len(labels) == 1: - lab = _ensure_int64(labels[0]) + lab = ensure_int64(labels[0]) sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max()) return sorter @@ -3926,8 +3926,8 @@ def _get_leaf_sorter(labels): tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] - lab = _ensure_int64(labels[-1]) - return lib.get_level_sorter(lab, _ensure_int64(starts)) + lab = ensure_int64(labels[-1]) + return lib.get_level_sorter(lab, ensure_int64(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError('Join on level between two MultiIndex objects ' @@ -3959,7 +3959,7 @@ def _get_leaf_sorter(labels): join_index = left[left_indexer] else: - left_lev_indexer = _ensure_int64(left_lev_indexer) + left_lev_indexer = ensure_int64(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) @@ -4018,9 +4018,9 @@ def _get_leaf_sorter(labels): if return_indexers: left_indexer = (None if left_indexer is None - else _ensure_platform_int(left_indexer)) + else ensure_platform_int(left_indexer)) right_indexer = (None if right_indexer is None - else _ensure_platform_int(right_indexer)) + else ensure_platform_int(right_indexer)) return join_index, left_indexer, right_indexer else: return join_index @@ -4064,8 +4064,8 @@ def _join_monotonic(self, other, how='left', return_indexers=False): join_index = self._wrap_joined_index(join_index, other) if return_indexers: - lidx = None if lidx is None else _ensure_platform_int(lidx) - ridx = None if ridx is None else _ensure_platform_int(ridx) + lidx = None if lidx is None else ensure_platform_int(lidx) + ridx = None if ridx is None else ensure_platform_int(ridx) return join_index, lidx, ridx else: return join_index @@ -4883,7 +4883,7 @@ def _add_logical_methods_disabled(cls): Index._add_comparison_methods() -def _ensure_index_from_sequences(sequences, names=None): +def ensure_index_from_sequences(sequences, names=None): """Construct an index from sequences of data. A single sequence returns an Index. Many sequences returns a @@ -4900,18 +4900,18 @@ def _ensure_index_from_sequences(sequences, names=None): Examples -------- - >>> _ensure_index_from_sequences([[1, 2, 3]], names=['name']) + >>> ensure_index_from_sequences([[1, 2, 3]], names=['name']) Int64Index([1, 2, 3], dtype='int64', name='name') - >>> _ensure_index_from_sequences([['a', 'a'], ['a', 'b']], - names=['L1', 'L2']) + >>> ensure_index_from_sequences([['a', 'a'], ['a', 'b']], + names=['L1', 'L2']) MultiIndex(levels=[['a'], ['a', 'b']], labels=[[0, 0], [0, 1]], names=['L1', 'L2']) See Also -------- - _ensure_index + ensure_index """ from .multi import MultiIndex @@ -4923,7 +4923,7 @@ def _ensure_index_from_sequences(sequences, names=None): return MultiIndex.from_arrays(sequences, names=names) -def _ensure_index(index_like, copy=False): +def ensure_index(index_like, copy=False): """ Ensure that we have an index from some index-like object @@ -4939,19 +4939,19 @@ def _ensure_index(index_like, copy=False): Examples -------- - >>> _ensure_index(['a', 'b']) + >>> ensure_index(['a', 'b']) Index(['a', 'b'], dtype='object') - >>> _ensure_index([('a', 'a'), ('b', 'c')]) + >>> ensure_index([('a', 'a'), ('b', 'c')]) Index([('a', 'a'), ('b', 'c')], dtype='object') - >>> _ensure_index([['a', 'a'], ['b', 'c']]) + >>> ensure_index([['a', 'a'], ['b', 'c']]) MultiIndex(levels=[['a'], ['b', 'c']], labels=[[0, 0], [0, 1]]) See Also -------- - _ensure_index_from_sequences + ensure_index_from_sequences """ if isinstance(index_like, Index): if copy: @@ -5009,3 +5009,8 @@ def _trim_front(strings): def _validate_join_method(method): if method not in ['left', 'right', 'inner', 'outer']: raise ValueError('do not recognize join method %s' % method) + + +def default_index(n): + from pandas.core.index import RangeIndex + return RangeIndex(0, n, name=None) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 7c63b3c667c01..a03e478f81caf 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -9,7 +9,7 @@ from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.common import ( is_categorical_dtype, - _ensure_platform_int, + ensure_platform_int, is_list_like, is_interval_dtype, is_scalar) @@ -489,7 +489,7 @@ def reindex(self, target, method=None, level=None, limit=None, raise NotImplementedError("argument limit is not implemented for " "CategoricalIndex.reindex") - target = ibase._ensure_index(target) + target = ibase.ensure_index(target) if not is_categorical_dtype(target) and not target.is_unique: raise ValueError("cannot reindex with a non-unique indexer") @@ -554,7 +554,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): from pandas.core.arrays.categorical import _recode_for_categories method = missing.clean_reindex_fill_method(method) - target = ibase._ensure_index(target) + target = ibase.ensure_index(target) if self.is_unique and self.equals(target): return np.arange(len(self), dtype='intp') @@ -583,23 +583,23 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): codes = self.categories.get_indexer(target) indexer, _ = self._engine.get_indexer_non_unique(codes) - return _ensure_platform_int(indexer) + return ensure_platform_int(indexer) @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): - target = ibase._ensure_index(target) + target = ibase.ensure_index(target) if isinstance(target, CategoricalIndex): # Indexing on codes is more efficient if categories are the same: if target.categories is self.categories: target = target.codes indexer, missing = self._engine.get_indexer_non_unique(target) - return _ensure_platform_int(indexer), missing + return ensure_platform_int(indexer), missing target = target.values codes = self.categories.get_indexer(target) indexer, missing = self._engine.get_indexer_non_unique(codes) - return _ensure_platform_int(indexer), missing + return ensure_platform_int(indexer), missing @Appender(_index_shared_docs['_convert_scalar_indexer']) def _convert_scalar_indexer(self, key, kind=None): @@ -644,7 +644,7 @@ def _convert_index_indexer(self, keyarr): def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): nv.validate_take(tuple(), kwargs) - indices = _ensure_platform_int(indices) + indices = ensure_platform_int(indices) taken = self._assert_take_fillable(self.codes, indices, allow_fill=allow_fill, fill_value=fill_value, diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 3f0bdf18f7230..3ae5eb3a8dbf5 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -17,7 +17,7 @@ from pandas._libs.tslibs.timestamps import round_ns from pandas.core.dtypes.common import ( - _ensure_int64, + ensure_int64, is_dtype_equal, is_float, is_integer, @@ -391,7 +391,7 @@ def sort_values(self, return_indexer=False, ascending=True): def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): nv.validate_take(tuple(), kwargs) - indices = _ensure_int64(indices) + indices = ensure_int64(indices) maybe_slice = lib.maybe_indices_to_slice(indices, len(self)) if isinstance(maybe_slice, slice): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 4732178d552be..7257be421c3e1 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -25,7 +25,7 @@ is_list_like, is_scalar, pandas_dtype, - _ensure_int64) + ensure_int64) from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna @@ -504,7 +504,7 @@ def _generate_range(cls, start, end, periods, name, freq, tz=None, periods, freq) if tz is not None and getattr(index, 'tz', None) is None: - arr = conversion.tz_localize_to_utc(_ensure_int64(index), + arr = conversion.tz_localize_to_utc(ensure_int64(index), tz, ambiguous=ambiguous) @@ -563,7 +563,7 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, values = np.array(values, copy=False) if not is_datetime64_dtype(values): - values = _ensure_int64(values).view(_NS_DTYPE) + values = ensure_int64(values).view(_NS_DTYPE) values = getattr(values, 'values', values) @@ -1607,7 +1607,7 @@ def delete(self, loc): else: if is_list_like(loc): loc = lib.maybe_indices_to_slice( - _ensure_int64(np.array(loc)), len(self)) + ensure_int64(np.array(loc)), len(self)) if isinstance(loc, slice) and loc.step in (1, None): if (loc.start in (0, None) or loc.stop in (len(self), None)): freq = self.freq diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 9375a60d0964c..e92f980caf3dc 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -8,7 +8,7 @@ from pandas.core.dtypes.missing import isna from pandas.core.dtypes.cast import find_common_type, maybe_downcast_to_dtype from pandas.core.dtypes.common import ( - _ensure_platform_int, + ensure_platform_int, is_list_like, is_datetime_or_timedelta_dtype, is_datetime64tz_dtype, @@ -21,7 +21,7 @@ is_number, is_integer) from pandas.core.indexes.base import ( - Index, _ensure_index, + Index, ensure_index, default_pprint, _index_shared_docs) from pandas._libs import Timestamp, Timedelta @@ -700,7 +700,7 @@ def get_value(self, series, key): def get_indexer(self, target, method=None, limit=None, tolerance=None): self._check_method(method) - target = _ensure_index(target) + target = ensure_index(target) target = self._maybe_cast_indexed(target) if self.equals(target): @@ -724,7 +724,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): else: indexer = np.concatenate([self.get_loc(i) for i in target]) - return _ensure_platform_int(indexer) + return ensure_platform_int(indexer) def _get_reindexer(self, target): """ @@ -799,7 +799,7 @@ def _get_reindexer(self, target): @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): - target = self._maybe_cast_indexed(_ensure_index(target)) + target = self._maybe_cast_indexed(ensure_index(target)) return super(IntervalIndex, self).get_indexer_non_unique(target) @Appender(_index_shared_docs['where']) @@ -855,7 +855,7 @@ def insert(self, loc, item): def _as_like_interval_index(self, other): self._assert_can_do_setop(other) - other = _ensure_index(other) + other = ensure_index(other) if not isinstance(other, IntervalIndex): msg = ('the other index needs to be an IntervalIndex too, but ' 'was type {}').format(other.__class__.__name__) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index a791ce1d87264..0d4ceb2783bad 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -14,8 +14,8 @@ from pandas.core.dtypes.dtypes import ( ExtensionDtype, PandasExtensionDtype) from pandas.core.dtypes.common import ( - _ensure_int64, - _ensure_platform_int, + ensure_int64, + ensure_platform_int, is_categorical_dtype, is_object_dtype, is_hashable, @@ -36,7 +36,7 @@ from pandas.core.config import get_option from pandas.core.indexes.base import ( - Index, _ensure_index, + Index, ensure_index, InvalidIndexError, _index_shared_docs) from pandas.core.indexes.frozen import ( @@ -302,13 +302,13 @@ def _set_levels(self, levels, level=None, copy=False, validate=True, if level is None: new_levels = FrozenList( - _ensure_index(lev, copy=copy)._shallow_copy() + ensure_index(lev, copy=copy)._shallow_copy() for lev in levels) else: level = [self._get_level_number(l) for l in level] new_levels = list(self._levels) for l, v in zip(level, levels): - new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy() + new_levels[l] = ensure_index(v, copy=copy)._shallow_copy() new_levels = FrozenList(new_levels) if verify_integrity: @@ -1227,7 +1227,7 @@ def lexsort_depth(self): else: return 0 - int64_labels = [_ensure_int64(lab) for lab in self.labels] + int64_labels = [ensure_int64(lab) for lab in self.labels] for k in range(self.nlevels, 0, -1): if libalgos.is_lexsorted(int64_labels[:k]): return k @@ -1431,7 +1431,7 @@ def _sort_levels_monotonic(self): lev = lev.take(indexer) # indexer to reorder the labels - indexer = _ensure_int64(indexer) + indexer = ensure_int64(indexer) ri = lib.get_reverse_indexer(indexer, len(indexer)) lab = algos.take_1d(ri, lab) @@ -1594,7 +1594,7 @@ def __getitem__(self, key): def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): nv.validate_take(tuple(), kwargs) - indices = _ensure_platform_int(indices) + indices = ensure_platform_int(indices) taken = self._assert_take_fillable(self.labels, indices, allow_fill=allow_fill, fill_value=fill_value, @@ -1895,7 +1895,7 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True): if not ascending: indexer = indexer[::-1] - indexer = _ensure_platform_int(indexer) + indexer = ensure_platform_int(indexer) new_labels = [lab.take(indexer) for lab in self.labels] new_index = MultiIndex(labels=new_labels, levels=self.levels, @@ -1940,11 +1940,11 @@ def _convert_listlike_indexer(self, keyarr, kind=None): @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): method = missing.clean_reindex_fill_method(method) - target = _ensure_index(target) + target = ensure_index(target) # empty indexer if is_list_like(target) and not len(target): - return _ensure_platform_int(np.array([])) + return ensure_platform_int(np.array([])) if not isinstance(target, MultiIndex): try: @@ -1973,7 +1973,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): else: indexer = self._engine.get_indexer(target) - return _ensure_platform_int(indexer) + return ensure_platform_int(indexer) @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target): @@ -2010,12 +2010,12 @@ def reindex(self, target, method=None, level=None, limit=None, target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype), **attrs) else: - target = _ensure_index(target) + target = ensure_index(target) target, indexer, _ = self._join_level(target, level, how='right', return_indexers=True, keep_order=False) else: - target = _ensure_index(target) + target = ensure_index(target) if self.equals(target): indexer = None else: @@ -2399,7 +2399,7 @@ def convert_indexer(start, stop, step, indexer=indexer, labels=labels): # selected from pandas import Series mapper = Series(indexer) - indexer = labels.take(_ensure_platform_int(indexer)) + indexer = labels.take(ensure_platform_int(indexer)) result = Series(Index(indexer).isin(r).nonzero()[0]) m = result.map(mapper)._ndarray_values @@ -2628,7 +2628,7 @@ def equals(self, other): return False if not isinstance(other, MultiIndex): - other_vals = com._values_from_object(_ensure_index(other)) + other_vals = com._values_from_object(ensure_index(other)) return array_equivalent(self._ndarray_values, other_vals) if self.nlevels != other.nlevels: @@ -2826,7 +2826,7 @@ def insert(self, loc, item): lev_loc = level.get_loc(k) new_levels.append(level) - new_labels.append(np.insert(_ensure_int64(labels), loc, lev_loc)) + new_labels.append(np.insert(ensure_int64(labels), loc, lev_loc)) return MultiIndex(levels=new_levels, labels=new_labels, names=self.names, verify_integrity=False) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index a8e0c7f1aaa6a..841d1e69485ca 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -15,7 +15,7 @@ is_period_dtype, is_bool_dtype, pandas_dtype, - _ensure_object) + ensure_object) import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import get_freq_code as _gfc @@ -33,7 +33,7 @@ from pandas.core.arrays.period import PeriodArrayMixin from pandas.core.base import _shared_docs -from pandas.core.indexes.base import _index_shared_docs, _ensure_index +from pandas.core.indexes.base import _index_shared_docs, ensure_index from pandas import compat from pandas.util._decorators import (Appender, Substitution, cache_readonly, @@ -255,7 +255,7 @@ def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None, "floating point in construction") # anything else, likely an array of strings or periods - data = _ensure_object(data) + data = ensure_object(data) freq = freq or period.extract_freq(data) data = period.extract_ordinals(data, freq) return cls._from_ordinals(data, name=name, freq=freq) @@ -567,7 +567,7 @@ def get_value(self, series, key): @Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs) def get_indexer(self, target, method=None, limit=None, tolerance=None): - target = _ensure_index(target) + target = ensure_index(target) if hasattr(target, 'freq') and target.freq != self.freq: msg = DIFFERENT_FREQ_INDEX.format(self.freqstr, target.freqstr) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 4e192548a1f2d..939ec0b79ac6b 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -81,7 +81,7 @@ def __new__(cls, start=None, stop=None, step=None, **dict(start._get_data_as_items())) # validate the arguments - def _ensure_int(value, field): + def ensure_int(value, field): msg = ("RangeIndex(...) must be called with integers," " {value} was passed for {field}") if not is_scalar(value): @@ -102,18 +102,18 @@ def _ensure_int(value, field): elif start is None: start = 0 else: - start = _ensure_int(start, 'start') + start = ensure_int(start, 'start') if stop is None: stop = start start = 0 else: - stop = _ensure_int(stop, 'stop') + stop = ensure_int(stop, 'stop') if step is None: step = 1 elif step == 0: raise ValueError("Step must not be zero") else: - step = _ensure_int(step, 'step') + step = ensure_int(step, 'step') return cls._simple_new(start, stop, step, name) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 1ed6145f01a44..dc26c9cc0c248 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -11,7 +11,7 @@ is_timedelta64_dtype, is_timedelta64_ns_dtype, pandas_dtype, - _ensure_int64) + ensure_int64) from pandas.core.dtypes.missing import isna from pandas.core.dtypes.generic import ABCSeries @@ -736,7 +736,7 @@ def delete(self, loc): else: if is_list_like(loc): loc = lib.maybe_indices_to_slice( - _ensure_int64(np.array(loc)), len(self)) + ensure_int64(np.array(loc)), len(self)) if isinstance(loc, slice) and loc.step in (1, None): if (loc.start in (0, None) or loc.stop in (len(self), None)): freq = self.freq diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index ec06099e3bbd2..8ffc7548059b7 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -13,7 +13,7 @@ is_iterator, is_scalar, is_sparse, - _ensure_platform_int) + ensure_platform_int) from pandas.core.dtypes.missing import isna, _infer_fill_value from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender @@ -1483,7 +1483,7 @@ def _convert_for_reindex(self, key, axis=None): keyarr = labels._convert_arr_indexer(keyarr) if not labels.is_integer(): - keyarr = _ensure_platform_int(keyarr) + keyarr = ensure_platform_int(keyarr) return labels.take(keyarr) return keyarr diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 208d7b8bcf8a7..5a5418dcc1e7f 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -21,7 +21,7 @@ CategoricalDtype) from pandas.core.dtypes.common import ( _TD_DTYPE, _NS_DTYPE, - _ensure_int64, _ensure_platform_int, + ensure_int64, ensure_platform_int, is_integer, is_dtype_equal, is_timedelta64_dtype, @@ -65,7 +65,7 @@ import pandas.core.common as com import pandas.core.algorithms as algos -from pandas.core.index import Index, MultiIndex, _ensure_index +from pandas.core.index import Index, MultiIndex, ensure_index from pandas.core.indexing import maybe_convert_indices, check_setitem_lengths from pandas.core.arrays import Categorical from pandas.core.indexes.datetimes import DatetimeIndex @@ -1297,7 +1297,7 @@ def shift(self, periods, axis=0, mgr=None): axis = new_values.ndim - axis - 1 if np.prod(new_values.shape): - new_values = np.roll(new_values, _ensure_platform_int(periods), + new_values = np.roll(new_values, ensure_platform_int(periods), axis=axis) axis_indexer = [slice(None)] * self.ndim @@ -3271,7 +3271,7 @@ class BlockManager(PandasObject): '_is_consolidated', '_blknos', '_blklocs'] def __init__(self, blocks, axes, do_integrity_check=True): - self.axes = [_ensure_index(ax) for ax in axes] + self.axes = [ensure_index(ax) for ax in axes] self.blocks = tuple(blocks) for block in blocks: @@ -3296,8 +3296,8 @@ def __init__(self, blocks, axes, do_integrity_check=True): def make_empty(self, axes=None): """ return an empty BlockManager with the items axis of len 0 """ if axes is None: - axes = [_ensure_index([])] + [_ensure_index(a) - for a in self.axes[1:]] + axes = [ensure_index([])] + [ensure_index(a) + for a in self.axes[1:]] # preserve dtype if possible if self.ndim == 1: @@ -3321,7 +3321,7 @@ def ndim(self): return len(self.axes) def set_axis(self, axis, new_labels): - new_labels = _ensure_index(new_labels) + new_labels = ensure_index(new_labels) old_len = len(self.axes[axis]) new_len = len(new_labels) @@ -3444,7 +3444,7 @@ def unpickle_block(values, mgr_locs): if (isinstance(state, tuple) and len(state) >= 4 and '0.14.1' in state[3]): state = state[3]['0.14.1'] - self.axes = [_ensure_index(ax) for ax in state['axes']] + self.axes = [ensure_index(ax) for ax in state['axes']] self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs']) for b in state['blocks']) else: @@ -3452,7 +3452,7 @@ def unpickle_block(values, mgr_locs): # little while longer ax_arrays, bvalues, bitems = state[:3] - self.axes = [_ensure_index(ax) for ax in ax_arrays] + self.axes = [ensure_index(ax) for ax in ax_arrays] if len(bitems) == 1 and self.axes[0].equals(bitems[0]): # This is a workaround for pre-0.14.1 pickles that didn't @@ -4386,7 +4386,7 @@ def reindex_axis(self, new_index, axis, method=None, limit=None, """ Conform block manager to new index. """ - new_index = _ensure_index(new_index) + new_index = ensure_index(new_index) new_index, indexer = self.axes[axis].reindex(new_index, method=method, limit=limit) @@ -4665,7 +4665,7 @@ def __init__(self, block, axis, do_integrity_check=False, fastpath=False): 'more than 1 block') block = block[0] else: - self.axes = [_ensure_index(axis)] + self.axes = [ensure_index(axis)] # create the block here if isinstance(block, list): @@ -4891,7 +4891,7 @@ def form_blocks(arrays, names, axes): items_dict = defaultdict(list) extra_locs = [] - names_idx = _ensure_index(names) + names_idx = ensure_index(names) if names_idx.equals(axes[0]): names_indexer = np.arange(len(names_idx)) else: @@ -5209,7 +5209,7 @@ def _factor_indexer(shape, labels): expanded label indexer """ mult = np.array(shape)[::-1].cumprod()[::-1] - return _ensure_platform_int( + return ensure_platform_int( np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T) @@ -5229,7 +5229,7 @@ def _get_blkno_placements(blknos, blk_count, group=True): """ - blknos = _ensure_int64(blknos) + blknos = ensure_int64(blknos) # FIXME: blk_count is unused, but it may avoid the use of dicts in cython for blkno, indexer in libinternals.get_blkno_indexers(blknos, group): diff --git a/pandas/core/missing.py b/pandas/core/missing.py index e9b9a734ec5f5..16820dcbb55bc 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -18,7 +18,7 @@ is_scalar, is_integer, needs_i8_conversion, - _ensure_float64) + ensure_float64) from pandas.core.dtypes.cast import infer_dtype_from_array from pandas.core.dtypes.missing import isna @@ -480,7 +480,7 @@ def pad_1d(values, limit=None, mask=None, dtype=None): elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): _method = _pad_1d_datetime elif is_integer_dtype(values): - values = _ensure_float64(values) + values = ensure_float64(values) _method = algos.pad_inplace_float64 elif values.dtype == np.object_: _method = algos.pad_inplace_object @@ -506,7 +506,7 @@ def backfill_1d(values, limit=None, mask=None, dtype=None): elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): _method = _backfill_1d_datetime elif is_integer_dtype(values): - values = _ensure_float64(values) + values = ensure_float64(values) _method = algos.backfill_inplace_float64 elif values.dtype == np.object_: _method = algos.backfill_inplace_object @@ -533,7 +533,7 @@ def pad_2d(values, limit=None, mask=None, dtype=None): elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): _method = _pad_2d_datetime elif is_integer_dtype(values): - values = _ensure_float64(values) + values = ensure_float64(values) _method = algos.pad_2d_inplace_float64 elif values.dtype == np.object_: _method = algos.pad_2d_inplace_object @@ -564,7 +564,7 @@ def backfill_2d(values, limit=None, mask=None, dtype=None): elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype): _method = _backfill_2d_datetime elif is_integer_dtype(values): - values = _ensure_float64(values) + values = ensure_float64(values) _method = algos.backfill_2d_inplace_float64 elif values.dtype == np.object_: _method = algos.backfill_2d_inplace_object diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 1ddf77cf71a11..bccc5a587bd83 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -34,7 +34,7 @@ is_list_like, is_scalar, is_extension_array_dtype, - _ensure_object) + ensure_object) from pandas.core.dtypes.cast import ( maybe_upcast_putmask, find_common_type, construct_1d_object_array_from_listlike) @@ -1387,8 +1387,8 @@ def na_op(x, y): if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)): result = op(x, y) # when would this be hit? else: - x = _ensure_object(x) - y = _ensure_object(y) + x = ensure_object(x) + y = ensure_object(y) result = libops.vec_binop(x, y, op) else: # let null fall thru diff --git a/pandas/core/panel.py b/pandas/core/panel.py index a1812cb5801b9..16ade3fae90a1 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -17,12 +17,13 @@ import pandas.core.ops as ops import pandas.core.common as com +import pandas.core.indexes.base as ibase from pandas import compat from pandas.compat import (map, zip, range, u, OrderedDict) from pandas.compat.numpy import function as nv from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame, _shared_docs -from pandas.core.index import (Index, MultiIndex, _ensure_index, +from pandas.core.index import (Index, MultiIndex, ensure_index, _get_objs_combined_axis) from pandas.io.formats.printing import pprint_thing from pandas.core.indexing import maybe_droplevels @@ -198,7 +199,7 @@ def _init_dict(self, data, axes, dtype=None): # prefilter if haxis passed if haxis is not None: - haxis = _ensure_index(haxis) + haxis = ensure_index(haxis) data = OrderedDict((k, v) for k, v in compat.iteritems(data) if k in haxis) @@ -319,9 +320,9 @@ def _init_matrix(self, data, axes, dtype=None, copy=False): fixed_axes = [] for i, ax in enumerate(axes): if ax is None: - ax = com._default_index(shape[i]) + ax = ibase.default_index(shape[i]) else: - ax = _ensure_index(ax) + ax = ensure_index(ax) fixed_axes.append(ax) return create_block_manager_from_blocks([values], fixed_axes) @@ -1536,7 +1537,7 @@ def _extract_axis(self, data, axis=0, intersect=False): if index is None: index = Index([]) - return _ensure_index(index) + return ensure_index(index) Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0, diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index b36e9b8d900fd..1d6105cb68bf1 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -5,12 +5,13 @@ import numpy as np from pandas import compat, DataFrame, Series, Index, MultiIndex from pandas.core.index import (_get_objs_combined_axis, - _ensure_index, _get_consensus_names, + ensure_index, _get_consensus_names, _all_indexes_same) from pandas.core.arrays.categorical import (_factorize_from_iterable, _factorize_from_iterables) from pandas.core.internals import concatenate_block_managers from pandas.core import common as com +import pandas.core.indexes.base as ibase from pandas.core.generic import NDFrame import pandas.core.dtypes.concat as _concat @@ -477,7 +478,7 @@ def _get_concat_axis(self): if self.axis == 0: indexes = [x.index for x in self.objs] elif self.ignore_index: - idx = com._default_index(len(self.objs)) + idx = ibase.default_index(len(self.objs)) return idx elif self.keys is None: names = [None] * len(self.objs) @@ -497,14 +498,14 @@ def _get_concat_axis(self): if has_names: return Index(names) else: - return com._default_index(len(self.objs)) + return ibase.default_index(len(self.objs)) else: - return _ensure_index(self.keys) + return ensure_index(self.keys) else: indexes = [x._data.axes[self.axis] for x in self.objs] if self.ignore_index: - idx = com._default_index(sum(len(i) for i in indexes)) + idx = ibase.default_index(sum(len(i) for i in indexes)) return idx if self.keys is None: @@ -540,16 +541,16 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None): if levels is None: _, levels = _factorize_from_iterables(zipped) else: - levels = [_ensure_index(x) for x in levels] + levels = [ensure_index(x) for x in levels] else: zipped = [keys] if names is None: names = [None] if levels is None: - levels = [_ensure_index(keys)] + levels = [ensure_index(keys)] else: - levels = [_ensure_index(x) for x in levels] + levels = [ensure_index(x) for x in levels] if not _all_indexes_same(indexes): label_list = [] @@ -608,7 +609,7 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None): # do something a bit more speedy for hlevel, level in zip(zipped, levels): - hlevel = _ensure_index(hlevel) + hlevel = ensure_index(hlevel) mapped = level.get_indexer(hlevel) mask = mapped == -1 diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index e38c069b3c3fb..25d8cb4e804a2 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -31,9 +31,9 @@ is_bool_dtype, is_list_like, is_datetimelike, - _ensure_int64, - _ensure_float64, - _ensure_object, + ensure_int64, + ensure_float64, + ensure_object, _get_dtype) from pandas.core.dtypes.missing import na_value_for_dtype from pandas.core.internals import (items_overlap_with_suffix, @@ -1212,9 +1212,9 @@ def _asof_by_function(direction, on_type, by_type): _type_casters = { - 'int64_t': _ensure_int64, - 'double': _ensure_float64, - 'object': _ensure_object, + 'int64_t': ensure_int64, + 'double': ensure_float64, + 'object': ensure_object, } _cython_types = { @@ -1490,8 +1490,8 @@ def _get_single_indexer(join_key, index, sort=False): left_key, right_key, count = _factorize_keys(join_key, index, sort=sort) left_indexer, right_indexer = libjoin.left_outer_join( - _ensure_int64(left_key), - _ensure_int64(right_key), + ensure_int64(left_key), + ensure_int64(right_key), count, sort=sort) return left_indexer, right_indexer @@ -1553,16 +1553,16 @@ def _factorize_keys(lk, rk, sort=True): # Same categories in different orders -> recode rk = _recode_for_categories(rk.codes, rk.categories, lk.categories) - lk = _ensure_int64(lk.codes) - rk = _ensure_int64(rk) + lk = ensure_int64(lk.codes) + rk = ensure_int64(rk) elif is_int_or_datetime_dtype(lk) and is_int_or_datetime_dtype(rk): klass = libhashtable.Int64Factorizer - lk = _ensure_int64(com._values_from_object(lk)) - rk = _ensure_int64(com._values_from_object(rk)) + lk = ensure_int64(com._values_from_object(lk)) + rk = ensure_int64(com._values_from_object(rk)) else: klass = libhashtable.Factorizer - lk = _ensure_object(lk) - rk = _ensure_object(rk) + lk = ensure_object(lk) + rk = ensure_object(rk) rizer = klass(max(len(lk), len(rk))) @@ -1600,7 +1600,7 @@ def _sort_labels(uniques, left, right): labels = np.concatenate([left, right]) _, new_labels = sorting.safe_sort(uniques, labels, na_sentinel=-1) - new_labels = _ensure_int64(new_labels) + new_labels = ensure_int64(new_labels) new_left, new_right = new_labels[:llength], new_labels[llength:] return new_left, new_right diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index d5d2e594b8d6b..2f2dc1264e996 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -8,7 +8,7 @@ import numpy as np from pandas.core.dtypes.common import ( - _ensure_platform_int, + ensure_platform_int, is_list_like, is_bool_dtype, needs_i8_conversion, is_sparse, is_object_dtype) from pandas.core.dtypes.cast import maybe_promote @@ -141,7 +141,7 @@ def _make_sorted_values_labels(self): ngroups = len(obs_ids) indexer = _algos.groupsort_indexer(comp_index, ngroups)[0] - indexer = _ensure_platform_int(indexer) + indexer = ensure_platform_int(indexer) self.sorted_values = algos.take_nd(self.values, indexer, axis=0) self.sorted_labels = [l.take(indexer) for l in to_sort] @@ -156,7 +156,7 @@ def _make_selectors(self): comp_index, obs_ids = get_compressed_ids(remaining_labels, level_sizes) ngroups = len(obs_ids) - comp_index = _ensure_platform_int(comp_index) + comp_index = ensure_platform_int(comp_index) stride = self.index.levshape[self.level] + self.lift self.full_shape = ngroups, stride diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index bbdce762feee3..031c94c06d3c8 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -12,7 +12,7 @@ is_timedelta64_dtype, is_datetime64tz_dtype, is_datetime_or_timedelta_dtype, - _ensure_int64) + ensure_int64) import pandas.core.algorithms as algos import pandas.core.nanops as nanops @@ -335,7 +335,7 @@ def _bins_to_cuts(x, bins, right=True, labels=None, bins = unique_bins side = 'left' if right else 'right' - ids = _ensure_int64(bins.searchsorted(x, side=side)) + ids = ensure_int64(bins.searchsorted(x, side=side)) if include_lowest: # Numpy 1.9 support: ensure this mask is a Numpy array diff --git a/pandas/core/series.py b/pandas/core/series.py index 0bdb9d9cc23a6..77445159129f2 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -32,7 +32,7 @@ is_dict_like, is_scalar, _is_unorderable_exception, - _ensure_platform_int, + ensure_platform_int, pandas_dtype) from pandas.core.dtypes.generic import ( ABCSparseArray, ABCDataFrame, ABCIndexClass) @@ -51,7 +51,7 @@ na_value_for_dtype) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, - Float64Index, _ensure_index) + Float64Index, ensure_index) from pandas.core.indexing import check_bool_indexer, maybe_convert_indices from pandas.core import generic, base from pandas.core.internals import SingleBlockManager @@ -71,6 +71,8 @@ import pandas.core.common as com import pandas.core.nanops as nanops +import pandas.core.indexes.base as ibase + import pandas.io.formats.format as fmt from pandas.util._decorators import ( Appender, deprecate, deprecate_kwarg, Substitution) @@ -187,7 +189,7 @@ def __init__(self, data=None, index=None, dtype=None, name=None, else: if index is not None: - index = _ensure_index(index) + index = ensure_index(index) if data is None: data = {} @@ -256,7 +258,7 @@ def __init__(self, data=None, index=None, dtype=None, name=None, if index is None: if not is_list_like(data): data = [data] - index = com._default_index(len(data)) + index = ibase.default_index(len(data)) elif is_list_like(data): # a scalar numpy array is list-like but doesn't @@ -373,7 +375,7 @@ def _set_axis(self, axis, labels, fastpath=False): """ override generic, we want to set the _typ here """ if not fastpath: - labels = _ensure_index(labels) + labels = ensure_index(labels) is_all_dates = labels.is_all_dates if is_all_dates: @@ -1202,7 +1204,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): """ inplace = validate_bool_kwarg(inplace, 'inplace') if drop: - new_index = com._default_index(len(self)) + new_index = ibase.default_index(len(self)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] @@ -2079,7 +2081,7 @@ def __rmatmul__(self, other): @deprecate_kwarg(old_arg_name='v', new_arg_name='value') def searchsorted(self, value, side='left', sorter=None): if sorter is not None: - sorter = _ensure_platform_int(sorter) + sorter = ensure_platform_int(sorter) return self._values.searchsorted(Series(value)._values, side=side, sorter=sorter) @@ -2500,7 +2502,7 @@ def _try_kind_sort(arr): bad = isna(arr) good = ~bad - idx = com._default_index(len(self)) + idx = ibase.default_index(len(self)) argsorted = _try_kind_sort(arr[good]) @@ -2676,7 +2678,7 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False, indexer = nargsort(index, kind=kind, ascending=ascending, na_position=na_position) - indexer = _ensure_platform_int(indexer) + indexer = ensure_platform_int(indexer) new_index = index.take(indexer) new_index = new_index._sort_levels_monotonic() @@ -3537,7 +3539,7 @@ def memory_usage(self, index=True, deep=False): @Appender(generic._shared_docs['_take']) def _take(self, indices, axis=0, is_copy=False): - indices = _ensure_platform_int(indices) + indices = ensure_platform_int(indices) new_index = self.index.take(indices) if is_categorical_dtype(self): diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 212f44e55c489..5aa9ea658482b 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -3,8 +3,8 @@ import numpy as np from pandas.compat import long, string_types, PY3 from pandas.core.dtypes.common import ( - _ensure_platform_int, - _ensure_int64, + ensure_platform_int, + ensure_int64, is_list_like, is_categorical_dtype) from pandas.core.dtypes.cast import infer_dtype_from_array @@ -57,7 +57,7 @@ def maybe_lift(lab, size): # so that all output values are non-negative return (lab + 1, size + 1) if (lab == -1).any() else (lab, size) - labels = map(_ensure_int64, labels) + labels = map(ensure_int64, labels) if not xnull: labels, shape = map(list, zip(*map(maybe_lift, labels, shape))) @@ -338,9 +338,9 @@ def get_group_index_sorter(group_index, ngroups): do_groupsort = (count > 0 and ((alpha + beta * ngroups) < (count * np.log(count)))) if do_groupsort: - sorter, _ = algos.groupsort_indexer(_ensure_int64(group_index), + sorter, _ = algos.groupsort_indexer(ensure_int64(group_index), ngroups) - return _ensure_platform_int(sorter) + return ensure_platform_int(sorter) else: return group_index.argsort(kind='mergesort') @@ -355,7 +355,7 @@ def compress_group_index(group_index, sort=True): size_hint = min(len(group_index), hashtable._SIZE_HINT_LIMIT) table = hashtable.Int64HashTable(size_hint) - group_index = _ensure_int64(group_index) + group_index = ensure_int64(group_index) # note, group labels come out ascending (ie, 1,2,3 etc) comp_ids, obs_group_ids = table.get_labels_groupby(group_index) @@ -462,7 +462,7 @@ def sort_mixed(values): if not is_list_like(labels): raise TypeError("Only list-like objects or None are allowed to be" "passed to safe_sort as labels") - labels = _ensure_platform_int(np.asarray(labels)) + labels = ensure_platform_int(np.asarray(labels)) from pandas import Index if not assume_unique and not Index(values).is_unique: @@ -474,7 +474,7 @@ def sort_mixed(values): values, algorithms._hashtables) t = hash_klass(len(values)) t.map_locations(values) - sorter = _ensure_platform_int(t.lookup(ordered)) + sorter = ensure_platform_int(t.lookup(ordered)) reverse_indexer = np.empty(len(sorter), dtype=np.int_) reverse_indexer.put(sorter, np.arange(len(sorter))) @@ -487,4 +487,4 @@ def sort_mixed(values): new_labels = reverse_indexer.take(labels, mode='wrap') np.putmask(new_labels, mask, na_sentinel) - return ordered, _ensure_platform_int(new_labels) + return ordered, ensure_platform_int(new_labels) diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index ff58f7d104ff9..6f0ffbff22028 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -16,7 +16,7 @@ from pandas.core.dtypes.generic import ABCSparseSeries from pandas.core.dtypes.common import ( - _ensure_platform_int, + ensure_platform_int, is_float, is_integer, is_object_dtype, is_integer_dtype, @@ -468,7 +468,7 @@ def take(self, indices, axis=0, allow_fill=True, # return scalar return self[indices] - indices = _ensure_platform_int(indices) + indices = ensure_platform_int(indices) n = len(self) if allow_fill and fill_value is not None: # allow -1 to indicate self.fill_value, diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 1feddf004058a..f7071061d07ab 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -12,10 +12,10 @@ from pandas.core.dtypes.missing import isna, notna from pandas.core.dtypes.cast import maybe_upcast, find_common_type -from pandas.core.dtypes.common import _ensure_platform_int, is_scipy_sparse +from pandas.core.dtypes.common import ensure_platform_int, is_scipy_sparse from pandas.compat.numpy import function as nv -from pandas.core.index import Index, MultiIndex, _ensure_index +from pandas.core.index import Index, MultiIndex, ensure_index from pandas.core.series import Series from pandas.core.frame import DataFrame, extract_index, _prep_ndarray import pandas.core.algorithms as algos @@ -27,6 +27,7 @@ from pandas.util._decorators import Appender import pandas.core.ops as ops import pandas.core.common as com +import pandas.core.indexes.base as ibase _shared_doc_kwargs = dict(klass='SparseDataFrame') @@ -111,7 +112,7 @@ def __init__(self, data=None, index=None, columns=None, default_kind=None, if index is None: index = Index([]) else: - index = _ensure_index(index) + index = ensure_index(index) if columns is None: columns = Index([]) @@ -139,7 +140,7 @@ def _constructor(self): def _init_dict(self, data, index, columns, dtype=None): # pre-filter out columns if we passed it if columns is not None: - columns = _ensure_index(columns) + columns = ensure_index(columns) data = {k: v for k, v in compat.iteritems(data) if k in columns} else: keys = com._dict_keys_to_ordered_list(data) @@ -219,9 +220,9 @@ def _init_spmatrix(self, data, index, columns, dtype=None, def _prep_index(self, data, index, columns): N, K = data.shape if index is None: - index = com._default_index(N) + index = ibase.default_index(N) if columns is None: - columns = com._default_index(K) + columns = ibase.default_index(K) if len(columns) != K: raise ValueError('Column length mismatch: {columns} vs. {K}' @@ -650,7 +651,7 @@ def _reindex_index(self, index, method, copy, level, fill_value=np.nan, index=index, columns=self.columns).__finalize__(self) indexer = self.index.get_indexer(index, method, limit=limit) - indexer = _ensure_platform_int(indexer) + indexer = ensure_platform_int(indexer) mask = indexer == -1 need_mask = mask.any() @@ -926,7 +927,7 @@ def to_manager(sdf, columns, index): """ # from BlockManager perspective - axes = [_ensure_index(columns), _ensure_index(index)] + axes = [ensure_index(columns), ensure_index(index)] return create_block_manager_from_arrays( [sdf[c] for c in columns], columns, axes) diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index fb337d71fcf8d..96ee5b7954f45 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -11,11 +11,12 @@ from pandas.core.dtypes.missing import isna, notna from pandas.compat.numpy import function as nv -from pandas.core.index import Index, _ensure_index, InvalidIndexError +from pandas.core.index import Index, ensure_index, InvalidIndexError from pandas.core.series import Series from pandas.core.internals import SingleBlockManager from pandas.core import generic import pandas.core.common as com +import pandas.core.indexes.base as ibase import pandas.core.ops as ops import pandas._libs.index as libindex from pandas.util._decorators import Appender @@ -149,8 +150,8 @@ def __init__(self, data=None, index=None, sparse_index=None, kind='block', data.fill(v) if index is None: - index = com._default_index(sparse_index.length) - index = _ensure_index(index) + index = ibase.default_index(sparse_index.length) + index = ensure_index(index) # create/copy the manager if isinstance(data, SingleBlockManager): diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index c8204faa55cf8..83de83ab76a2c 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -14,7 +14,7 @@ _guess_datetime_format) from pandas.core.dtypes.common import ( - _ensure_object, + ensure_object, is_datetime64_ns_dtype, is_datetime64_dtype, is_datetime64tz_dtype, @@ -216,7 +216,7 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None, raise TypeError('arg must be a string, datetime, list, tuple, ' '1-d array, or Series') - arg = _ensure_object(arg) + arg = ensure_object(arg) require_iso8601 = False if infer_datetime_format and format is None: @@ -787,7 +787,7 @@ def _convert_listlike(arg, format): raise TypeError('arg must be a string, datetime, list, tuple, ' '1-d array, or Series') - arg = _ensure_object(arg) + arg = ensure_object(arg) if infer_time_format and format is None: format = _guess_time_format_for_array(arg) diff --git a/pandas/core/tools/numeric.py b/pandas/core/tools/numeric.py index f1d13ccf36cf6..4bb5c223d1bcc 100644 --- a/pandas/core/tools/numeric.py +++ b/pandas/core/tools/numeric.py @@ -6,7 +6,7 @@ is_decimal, is_datetime_or_timedelta_dtype, is_number, - _ensure_object) + ensure_object) from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas._libs import lib @@ -130,7 +130,7 @@ def to_numeric(arg, errors='raise', downcast=None): elif is_datetime_or_timedelta_dtype(values): values = values.astype(np.int64) else: - values = _ensure_object(values) + values = ensure_object(values) coerce_numeric = False if errors in ('ignore', 'raise') else True values = lib.maybe_convert_numeric(values, set(), coerce_numeric=coerce_numeric) diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index ed2659973cc6a..63ab120833ba1 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -9,7 +9,7 @@ array_to_timedelta64) from pandas.core.dtypes.common import ( - _ensure_object, + ensure_object, is_integer_dtype, is_timedelta64_dtype, is_list_like) @@ -171,7 +171,7 @@ def _convert_listlike(arg, unit='ns', box=True, errors='raise', name=None): 'timedelta64[ns]', copy=False) else: try: - value = array_to_timedelta64(_ensure_object(arg), + value = array_to_timedelta64(ensure_object(arg), unit=unit, errors=errors) value = value.astype('timedelta64[ns]', copy=False) except ValueError: diff --git a/pandas/core/window.py b/pandas/core/window.py index e20db4df2cb2a..6b6f27bcb3863 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -27,7 +27,7 @@ needs_i8_conversion, is_timedelta64_dtype, is_list_like, - _ensure_float64, + ensure_float64, is_scalar) from pandas.core.base import PandasObject, SelectionMixin @@ -208,9 +208,9 @@ def _prep_values(self, values=None, kill_inf=True): # GH #12373 : rolling functions error on float32 data # make sure the data is coerced to float64 if is_float_dtype(values.dtype): - values = _ensure_float64(values) + values = ensure_float64(values) elif is_integer_dtype(values.dtype): - values = _ensure_float64(values) + values = ensure_float64(values) elif needs_i8_conversion(values.dtype): raise NotImplementedError("ops for {action} for this " "dtype {dtype} are not " @@ -219,7 +219,7 @@ def _prep_values(self, values=None, kill_inf=True): dtype=values.dtype)) else: try: - values = _ensure_float64(values) + values = ensure_float64(values) except (ValueError, TypeError): raise TypeError("cannot handle this type -> {0}" "".format(values.dtype)) @@ -265,7 +265,7 @@ def _wrap_results(self, results, blocks, obj): """ from pandas import Series, concat - from pandas.core.index import _ensure_index + from pandas.core.index import ensure_index final = [] for result, block in zip(results, blocks): @@ -286,7 +286,7 @@ def _wrap_results(self, results, blocks, obj): if self._selection is not None: - selection = _ensure_index(self._selection) + selection = ensure_index(self._selection) # need to reorder to include original location of # the on column (if its not already there) @@ -857,7 +857,7 @@ def _apply(self, func, name=None, window=None, center=None, def func(arg, window, min_periods=None, closed=None): minp = check_minp(min_periods, window) # ensure we are only rolling on floats - arg = _ensure_float64(arg) + arg = ensure_float64(arg) return cfunc(arg, window, minp, indexi, closed, **kwargs) diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 5f97447d29cbc..f69e4a484d177 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -36,7 +36,7 @@ from pandas.core.dtypes.generic import ABCSparseArray, ABCMultiIndex from pandas.core.base import PandasObject import pandas.core.common as com -from pandas.core.index import Index, _ensure_index +from pandas.core.index import Index, ensure_index from pandas.core.config import get_option, set_option from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import PeriodIndex @@ -426,7 +426,7 @@ def __init__(self, frame, buf=None, columns=None, col_space=None, self.kwds = kwds if columns is not None: - self.columns = _ensure_index(columns) + self.columns = ensure_index(columns) self.frame = self.frame[self.columns] else: self.columns = frame.columns diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 65df2bffb4abf..486040fa52f35 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -16,7 +16,7 @@ from pandas.compat import (range, lrange, PY3, StringIO, lzip, zip, string_types, map, u) from pandas.core.dtypes.common import ( - is_integer, _ensure_object, + is_integer, ensure_object, is_list_like, is_integer_dtype, is_float, is_dtype_equal, is_object_dtype, is_string_dtype, @@ -25,7 +25,7 @@ from pandas.core.dtypes.missing import isna from pandas.core.dtypes.cast import astype_nansafe from pandas.core.index import (Index, MultiIndex, RangeIndex, - _ensure_index_from_sequences) + ensure_index_from_sequences) from pandas.core.series import Series from pandas.core.frame import DataFrame from pandas.core.arrays import Categorical @@ -1521,7 +1521,7 @@ def _agg_index(self, index, try_parse_dates=True): arrays.append(arr) names = self.index_names - index = _ensure_index_from_sequences(arrays, names) + index = ensure_index_from_sequences(arrays, names) return index @@ -1889,7 +1889,7 @@ def read(self, nrows=None): try_parse_dates=True) arrays.append(values) - index = _ensure_index_from_sequences(arrays) + index = ensure_index_from_sequences(arrays) if self.usecols is not None: names = self._filter_usecols(names) @@ -3005,7 +3005,7 @@ def converter(*date_cols): try: return tools.to_datetime( - _ensure_object(strs), + ensure_object(strs), utc=None, box=False, dayfirst=dayfirst, @@ -3222,7 +3222,7 @@ def _get_empty_meta(columns, index_col, index_names, dtype=None): index = Index([]) else: data = [Series([], dtype=dtype[name]) for name in index_names] - index = _ensure_index_from_sequences(data, names=index_names) + index = ensure_index_from_sequences(data, names=index_names) index_col.sort() for i, n in enumerate(index_col): diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 35e244bf2f9eb..f2d6fe01e0573 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -28,9 +28,9 @@ is_timedelta64_dtype, is_datetime64tz_dtype, is_datetime64_dtype, - _ensure_object, - _ensure_int64, - _ensure_platform_int) + ensure_object, + ensure_int64, + ensure_platform_int) from pandas.core.dtypes.missing import array_equivalent from pandas.core import config @@ -44,7 +44,7 @@ from pandas.core.internals import (BlockManager, make_block, _block2d_to_blocknd, _factor_indexer, _block_shape) -from pandas.core.index import _ensure_index +from pandas.core.index import ensure_index from pandas.core.computation.pytables import Expr, maybe_expression from pandas.io.common import _stringify_path @@ -3725,8 +3725,8 @@ def process_filter(field, filt): elif field in axis_values: # we need to filter on this dimension - values = _ensure_index(getattr(obj, field).values) - filt = _ensure_index(filt) + values = ensure_index(getattr(obj, field).values) + filt = ensure_index(filt) # hack until we support reversed dim flags if isinstance(obj, DataFrame): @@ -3892,8 +3892,8 @@ def read(self, where=None, columns=None, **kwargs): if len(unique(key)) == len(key): sorter, _ = algos.groupsort_indexer( - _ensure_int64(key), np.prod(N)) - sorter = _ensure_platform_int(sorter) + ensure_int64(key), np.prod(N)) + sorter = ensure_platform_int(sorter) # create the objs for c in self.values_axes: @@ -3938,7 +3938,7 @@ def read(self, where=None, columns=None, **kwargs): unique_tuples = com._asarray_tuplesafe(unique_tuples) indexer = match(unique_tuples, tuple_index) - indexer = _ensure_platform_int(indexer) + indexer = ensure_platform_int(indexer) new_index = long_index.take(indexer) new_values = lp.values.take(indexer, axis=0) @@ -4236,7 +4236,7 @@ def read(self, where=None, columns=None, **kwargs): for a in self.values_axes: # we could have a multi-index constructor here - # _ensure_index doesn't recognized our list-of-tuples here + # ensure_index doesn't recognized our list-of-tuples here if info.get('type') == 'MultiIndex': cols = MultiIndex.from_tuples(a.values) else: @@ -4437,18 +4437,18 @@ def is_transposed(self): def _reindex_axis(obj, axis, labels, other=None): ax = obj._get_axis(axis) - labels = _ensure_index(labels) + labels = ensure_index(labels) # try not to reindex even if other is provided # if it equals our current index if other is not None: - other = _ensure_index(other) + other = ensure_index(other) if (other is None or labels.equals(other)) and labels.equals(ax): return obj - labels = _ensure_index(labels.unique()) + labels = ensure_index(labels.unique()) if other is not None: - labels = _ensure_index(other.unique()) & labels + labels = ensure_index(other.unique()) & labels if not labels.equals(ax): slicer = [slice(None, None)] * obj.ndim slicer[axis] = labels @@ -4656,7 +4656,7 @@ def _convert_string_array(data, encoding, errors, itemsize=None): # create the sized dtype if itemsize is None: - ensured = _ensure_object(data.ravel()) + ensured = ensure_object(data.ravel()) itemsize = libwriters.max_len_string_array(ensured) data = np.asarray(data, dtype="S%d" % itemsize) @@ -4688,7 +4688,7 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None, encoding = _ensure_encoding(encoding) if encoding is not None and len(data): - itemsize = libwriters.max_len_string_array(_ensure_object(data)) + itemsize = libwriters.max_len_string_array(ensure_object(data)) if compat.PY3: dtype = "U{0}".format(itemsize) else: diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 4ce2ed4e36139..efd5f337fdf69 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -28,7 +28,7 @@ zip, BytesIO) from pandas.core.arrays import Categorical from pandas.core.base import StringMixin -from pandas.core.dtypes.common import (is_categorical_dtype, _ensure_object, +from pandas.core.dtypes.common import (is_categorical_dtype, ensure_object, is_datetime64_dtype) from pandas.core.frame import DataFrame from pandas.core.series import Series @@ -1818,7 +1818,7 @@ def _dtype_to_stata_type(dtype, column): if dtype.type == np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? - itemsize = max_len_string_array(_ensure_object(column.values)) + itemsize = max_len_string_array(ensure_object(column.values)) return max(itemsize, 1) elif dtype == np.float64: return 255 @@ -1863,7 +1863,7 @@ def _dtype_to_default_stata_fmt(dtype, column, dta_version=114, if not (inferred_dtype in ('string', 'unicode') or len(column) == 0): raise ValueError('Writing general object arrays is not supported') - itemsize = max_len_string_array(_ensure_object(column.values)) + itemsize = max_len_string_array(ensure_object(column.values)) if itemsize > max_str_len: if dta_version >= 117: return '%9s' @@ -2418,7 +2418,7 @@ def _dtype_to_stata_type_117(dtype, column, force_strl): if dtype.type == np.object_: # try to coerce it to the biggest string # not memory efficient, what else could we # do? - itemsize = max_len_string_array(_ensure_object(column.values)) + itemsize = max_len_string_array(ensure_object(column.values)) itemsize = max(itemsize, 1) if itemsize <= 2045: return itemsize diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index f81767156b255..5f1f6dc5bca87 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -35,8 +35,8 @@ is_bool, is_scalar, is_scipy_sparse, - _ensure_int32, - _ensure_categorical) + ensure_int32, + ensure_categorical) from pandas.util import testing as tm import pandas.util._test_decorators as td @@ -1217,19 +1217,19 @@ def test_is_scipy_sparse(spmatrix): # noqa: F811 def test_ensure_int32(): values = np.arange(10, dtype=np.int32) - result = _ensure_int32(values) + result = ensure_int32(values) assert (result.dtype == np.int32) values = np.arange(10, dtype=np.int64) - result = _ensure_int32(values) + result = ensure_int32(values) assert (result.dtype == np.int32) def test_ensure_categorical(): values = np.arange(10, dtype=np.int32) - result = _ensure_categorical(values) + result = ensure_categorical(values) assert (result.dtype == 'category') values = Categorical(values) - result = _ensure_categorical(values) + result = ensure_categorical(values) tm.assert_categorical_equal(result, values) diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py index 9a838341c7d8c..9dcc13c15736f 100644 --- a/pandas/tests/groupby/test_bin_groupby.py +++ b/pandas/tests/groupby/test_bin_groupby.py @@ -5,7 +5,7 @@ from numpy import nan import numpy as np -from pandas.core.dtypes.common import _ensure_int64 +from pandas.core.dtypes.common import ensure_int64 from pandas import Index, isna from pandas.core.groupby.ops import generate_bins_generic from pandas.util.testing import assert_almost_equal @@ -90,8 +90,8 @@ def _check(dtype): bins = np.array([6, 12, 20]) out = np.zeros((3, 4), dtype) counts = np.zeros(len(out), dtype=np.int64) - labels = _ensure_int64(np.repeat(np.arange(3), - np.diff(np.r_[0, bins]))) + labels = ensure_int64(np.repeat(np.arange(3), + np.diff(np.r_[0, bins]))) func = getattr(groupby, 'group_ohlc_%s' % dtype) func(out, counts, obj[:, None], labels) diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 7fccf1f57a886..57b04bfd82528 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -7,7 +7,7 @@ from pandas.util import testing as tm from pandas import Series, DataFrame, Timestamp, MultiIndex, concat, date_range from pandas.core.dtypes.common import ( - _ensure_platform_int, is_timedelta64_dtype) + ensure_platform_int, is_timedelta64_dtype) from pandas.compat import StringIO from pandas._libs import groupby @@ -76,7 +76,7 @@ def test_transform_fast(): grp = df.groupby('id')['val'] values = np.repeat(grp.mean().values, - _ensure_platform_int(grp.count().values)) + ensure_platform_int(grp.count().values)) expected = pd.Series(values, index=df.index, name='val') result = grp.transform(np.mean) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 639e51e9361ab..7b105390db40b 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -21,7 +21,7 @@ DataFrame, Float64Index, Int64Index, UInt64Index, CategoricalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex, isna) -from pandas.core.index import _get_combined_index, _ensure_index_from_sequences +from pandas.core.index import _get_combined_index, ensure_index_from_sequences from pandas.util.testing import assert_almost_equal from pandas.compat.numpy import np_datetime64_compat @@ -2455,7 +2455,7 @@ class TestIndexUtils(object): names=['L1', 'L2'])), ]) def test_ensure_index_from_sequences(self, data, names, expected): - result = _ensure_index_from_sequences(data, names) + result = ensure_index_from_sequences(data, names) tm.assert_index_equal(result, expected)
Also updated numpy_helper to not use things from numpy's deprecated C API. This won't get rid of the warnings since cython still causes them, but it's still nice. Not sure how to lint for this (or if we really want to), will see if google knows.
https://api.github.com/repos/pandas-dev/pandas/pulls/21870
2018-07-12T01:46:58Z
2018-07-17T12:21:04Z
2018-07-17T12:21:04Z
2020-04-05T17:41:20Z
DEPR: Warn about Series.to_csv signature alignment
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index ed4022d422b4d..a0633a2be085a 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -245,7 +245,7 @@ Deprecations - :meth:`DataFrame.to_stata`, :meth:`read_stata`, :class:`StataReader` and :class:`StataWriter` have deprecated the ``encoding`` argument. The encoding of a Stata dta file is determined by the file type and cannot be changed (:issue:`21244`). - :meth:`MultiIndex.to_hierarchical` is deprecated and will be removed in a future version (:issue:`21613`) - :meth:`Series.ptp` is deprecated. Use ``numpy.ptp`` instead (:issue:`21614`) -- +- The signature in :meth:`Series.to_csv` has been deprecated. Please follow the signature in :meth:`DataFrame.to_csv` instead (:issue:`19745`) .. _whatsnew_0240.prior_deprecations: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6380944338010..f0aa00163c902 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1710,103 +1710,6 @@ def to_panel(self): return self._constructor_expanddim(new_mgr) - def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, - columns=None, header=True, index=True, index_label=None, - mode='w', encoding=None, compression=None, quoting=None, - quotechar='"', line_terminator='\n', chunksize=None, - tupleize_cols=None, date_format=None, doublequote=True, - escapechar=None, decimal='.'): - r"""Write DataFrame to a comma-separated values (csv) file - - Parameters - ---------- - path_or_buf : string or file handle, default None - File path or object, if None is provided the result is returned as - a string. - sep : character, default ',' - Field delimiter for the output file. - na_rep : string, default '' - Missing data representation - float_format : string, default None - Format string for floating point numbers - columns : sequence, optional - Columns to write - header : boolean or list of string, default True - Write out the column names. If a list of strings is given it is - assumed to be aliases for the column names - index : boolean, default True - Write row names (index) - index_label : string or sequence, or False, default None - Column label for index column(s) if desired. If None is given, and - `header` and `index` are True, then the index names are used. A - sequence should be given if the DataFrame uses MultiIndex. If - False do not print fields for index names. Use index_label=False - for easier importing in R - mode : str - Python write mode, default 'w' - encoding : string, optional - A string representing the encoding to use in the output file, - defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. - compression : {'infer', 'gzip', 'bz2', 'xz', None}, default None - If 'infer' and `path_or_buf` is path-like, then detect compression - from the following extensions: '.gz', '.bz2' or '.xz' - (otherwise no compression). - line_terminator : string, default ``'\n'`` - The newline character or character sequence to use in the output - file - quoting : optional constant from csv module - defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` - then floats are converted to strings and thus csv.QUOTE_NONNUMERIC - will treat them as non-numeric - quotechar : string (length 1), default '\"' - character used to quote fields - doublequote : boolean, default True - Control quoting of `quotechar` inside a field - escapechar : string (length 1), default None - character used to escape `sep` and `quotechar` when appropriate - chunksize : int or None - rows to write at a time - tupleize_cols : boolean, default False - .. deprecated:: 0.21.0 - This argument will be removed and will always write each row - of the multi-index as a separate row in the CSV file. - - Write MultiIndex columns as a list of tuples (if True) or in - the new, expanded format, where each MultiIndex column is a row - in the CSV (if False). - date_format : string, default None - Format string for datetime objects - decimal: string, default '.' - Character recognized as decimal separator. E.g. use ',' for - European data - - """ - - if tupleize_cols is not None: - warnings.warn("The 'tupleize_cols' parameter is deprecated and " - "will be removed in a future version", - FutureWarning, stacklevel=2) - else: - tupleize_cols = False - - from pandas.io.formats.csvs import CSVFormatter - formatter = CSVFormatter(self, path_or_buf, - line_terminator=line_terminator, sep=sep, - encoding=encoding, - compression=compression, quoting=quoting, - na_rep=na_rep, float_format=float_format, - cols=columns, header=header, index=index, - index_label=index_label, mode=mode, - chunksize=chunksize, quotechar=quotechar, - tupleize_cols=tupleize_cols, - date_format=date_format, - doublequote=doublequote, - escapechar=escapechar, decimal=decimal) - formatter.save() - - if path_or_buf is None: - return formatter.path_or_buf.getvalue() - @Appender(_shared_docs['to_excel'] % _shared_doc_kwargs) def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='', float_format=None, columns=None, header=True, index=True, diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8da678e0adec0..3dfce88e7e8ed 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9161,6 +9161,107 @@ def first_valid_index(self): def last_valid_index(self): return self._find_valid_index('last') + def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None, + columns=None, header=True, index=True, index_label=None, + mode='w', encoding=None, compression=None, quoting=None, + quotechar='"', line_terminator='\n', chunksize=None, + tupleize_cols=None, date_format=None, doublequote=True, + escapechar=None, decimal='.'): + r"""Export to a comma-separated values (CSV) file + + Parameters + ---------- + path_or_buf : string or file handle, default None + File path or object, if None is provided the result is returned as + a string. + sep : character, default ',' + Field delimiter for the output file. + na_rep : string, default '' + Missing data representation + float_format : string, default None + Format string for floating point numbers + columns : sequence, optional + Columns to write + header : boolean or list of string, default True + Write out the column names. If a list of strings is given it is + assumed to be aliases for the column names + index : boolean, default True + Write row names (index) + index_label : string or sequence, or False, default None + Column label for index column(s) if desired. If None is given, and + `header` and `index` are True, then the index names are used. A + sequence should be given if the DataFrame uses MultiIndex. If + False do not print fields for index names. Use index_label=False + for easier importing in R + mode : str + Python write mode, default 'w' + encoding : string, optional + A string representing the encoding to use in the output file, + defaults to 'ascii' on Python 2 and 'utf-8' on Python 3. + compression : {'infer', 'gzip', 'bz2', 'xz', None}, default None + If 'infer' and `path_or_buf` is path-like, then detect compression + from the following extensions: '.gz', '.bz2' or '.xz' + (otherwise no compression). + line_terminator : string, default ``'\n'`` + The newline character or character sequence to use in the output + file + quoting : optional constant from csv module + defaults to csv.QUOTE_MINIMAL. If you have set a `float_format` + then floats are converted to strings and thus csv.QUOTE_NONNUMERIC + will treat them as non-numeric + quotechar : string (length 1), default '\"' + character used to quote fields + doublequote : boolean, default True + Control quoting of `quotechar` inside a field + escapechar : string (length 1), default None + character used to escape `sep` and `quotechar` when appropriate + chunksize : int or None + rows to write at a time + tupleize_cols : boolean, default False + .. deprecated:: 0.21.0 + This argument will be removed and will always write each row + of the multi-index as a separate row in the CSV file. + + Write MultiIndex columns as a list of tuples (if True) or in + the new, expanded format, where each MultiIndex column is a row + in the CSV (if False). + date_format : string, default None + Format string for datetime objects + decimal: string, default '.' + Character recognized as decimal separator. E.g. use ',' for + European data + + """ + + from pandas.core.frame import DataFrame + from pandas.io.formats.csvs import CSVFormatter + + df = self if isinstance(self, DataFrame) else DataFrame(self) + + if tupleize_cols is not None: + warnings.warn("The 'tupleize_cols' parameter is deprecated and " + "will be removed in a future version", + FutureWarning, stacklevel=2) + else: + tupleize_cols = False + + formatter = CSVFormatter(df, path_or_buf, + line_terminator=line_terminator, sep=sep, + encoding=encoding, + compression=compression, quoting=quoting, + na_rep=na_rep, float_format=float_format, + cols=columns, header=header, index=index, + index_label=index_label, mode=mode, + chunksize=chunksize, quotechar=quotechar, + tupleize_cols=tupleize_cols, + date_format=date_format, + doublequote=doublequote, + escapechar=escapechar, decimal=decimal) + formatter.save() + + if path_or_buf is None: + return formatter.path_or_buf.getvalue() + def _doc_parms(cls): """Return a tuple of the doc parms.""" diff --git a/pandas/core/series.py b/pandas/core/series.py index 0bdb9d9cc23a6..df2528b00df4a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -3760,24 +3760,30 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None, return result - def to_csv(self, path=None, index=True, sep=",", na_rep='', - float_format=None, header=False, index_label=None, + def to_csv(self, path_or_buf=None, index=True, sep=",", na_rep='', + float_format=None, header=None, index_label=None, mode='w', encoding=None, compression=None, date_format=None, - decimal='.'): - """ - Write Series to a comma-separated values (csv) file + decimal='.', **kwargs): + """Export to a comma-separated values (CSV) file + + .. deprecated:: 0.24.0 + The signature will aligned to that of :func:`DataFrame.to_csv`. + + :func:`Series.to_csv` will align its signature with that of + `DataFrame.to_csv`. Please pass in keyword arguments in accordance + with that signature instead. Parameters ---------- - path : string or file handle, default None + path_or_buf : string or file handle, default None File path or object, if None is provided the result is returned as a string. na_rep : string, default '' Missing data representation float_format : string, default None Format string for floating point numbers - header : boolean, default False - Write out series name + header : boolean, default None + Write out Series name. By default, the name will be omitted. index : boolean, default True Write row names (index) index_label : string or sequence, default None @@ -3800,15 +3806,47 @@ def to_csv(self, path=None, index=True, sep=",", na_rep='', Character recognized as decimal separator. E.g. use ',' for European data """ + from pandas.core.frame import DataFrame df = DataFrame(self) - # result is only a string if no path provided, otherwise None - result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep, - float_format=float_format, header=header, - index_label=index_label, mode=mode, - encoding=encoding, compression=compression, - date_format=date_format, decimal=decimal) - if path is None: + + new_path_key = "path_or_buf" + old_path_key = "path" + emit_warning = False + + # For backwards compatibility, override the `path_of_buf` + # argument if a `path` keyword argument is provided. + if kwargs.get(old_path_key, None) is not None: + kwargs[new_path_key] = kwargs.pop(old_path_key) + emit_warning = True + + if header is None: + emit_warning = True + header = False + + if emit_warning: + warnings.warn("The signature of `Series.to_csv` will be " + "aligned to that of `DataFrame.to_csv` in the " + "future. Note that some of the default arguments " + "and argument names are different, so please refer " + "to the documentation for `DataFrame.to_csv` when " + "changing your function calls.", + FutureWarning, stacklevel=2) + header = False + + to_csv_kwargs = dict(path_or_buf=path_or_buf, index=index, sep=sep, + na_rep=na_rep, float_format=float_format, + header=header, index_label=index_label, + mode=mode, encoding=encoding, + compression=compression, + date_format=date_format, + decimal=decimal) + to_csv_kwargs.update(**kwargs) + + # Result is only a string if no path provided, otherwise None. + result = df.to_csv(**to_csv_kwargs) + + if to_csv_kwargs[new_path_key] is None: return result @Appender(generic._shared_docs['to_excel'] % _shared_doc_kwargs) diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 3ad25ae73109e..45ed424f006cd 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -893,14 +893,16 @@ def test_to_csv_line_terminators(self): def test_to_csv_from_csv_categorical(self): - # CSV with categoricals should result in the same output as when one - # would add a "normal" Series/DataFrame. + # CSV with Categoricals should result in the same output + # as when one would add a "normal" Series/DataFrame. s = Series(pd.Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])) s2 = Series(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']) res = StringIO() - s.to_csv(res) + + s.to_csv(res, header=False) exp = StringIO() - s2.to_csv(exp) + + s2.to_csv(exp, header=False) assert res.getvalue() == exp.getvalue() df = DataFrame({"s": s}) diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 814d794d45c18..6d653445b324b 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -37,7 +37,7 @@ def read_csv(self, path, **kwargs): def test_from_csv_deprecation(self): # see gh-17812 with ensure_clean() as path: - self.ts.to_csv(path) + self.ts.to_csv(path, header=False) with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): @@ -45,10 +45,28 @@ def test_from_csv_deprecation(self): depr_ts = Series.from_csv(path) assert_series_equal(depr_ts, ts) + @pytest.mark.parametrize("arg", ["path", "header", "both"]) + def test_to_csv_deprecation(self, arg): + # see gh-19745 + with ensure_clean() as path: + if arg == "path": + kwargs = dict(path=path, header=False) + elif arg == "header": + kwargs = dict(path_or_buf=path) + else: # Both discrepancies match. + kwargs = dict(path=path) + + with tm.assert_produces_warning(FutureWarning): + self.ts.to_csv(**kwargs) + + # Make sure roundtrip still works. + ts = self.read_csv(path) + assert_series_equal(self.ts, ts, check_names=False) + def test_from_csv(self): with ensure_clean() as path: - self.ts.to_csv(path) + self.ts.to_csv(path, header=False) ts = self.read_csv(path) assert_series_equal(self.ts, ts, check_names=False) @@ -65,7 +83,7 @@ def test_from_csv(self): ts_h = self.read_csv(path, header=0) assert ts_h.name == "ts" - self.series.to_csv(path) + self.series.to_csv(path, header=False) series = self.read_csv(path) assert_series_equal(self.series, series, check_names=False) @@ -92,13 +110,13 @@ def test_to_csv(self): import io with ensure_clean() as path: - self.ts.to_csv(path) + self.ts.to_csv(path, header=False) with io.open(path, newline=None) as f: lines = f.readlines() assert (lines[1] != '\n') - self.ts.to_csv(path, index=False) + self.ts.to_csv(path, index=False, header=False) arr = np.loadtxt(path) assert_almost_equal(arr, self.ts.values) @@ -106,7 +124,7 @@ def test_to_csv_unicode_index(self): buf = StringIO() s = Series([u("\u05d0"), "d2"], index=[u("\u05d0"), u("\u05d1")]) - s.to_csv(buf, encoding="UTF-8") + s.to_csv(buf, encoding="UTF-8", header=False) buf.seek(0) s2 = self.read_csv(buf, index_col=0, encoding="UTF-8") @@ -116,7 +134,7 @@ def test_to_csv_float_format(self): with ensure_clean() as filename: ser = Series([0.123456, 0.234567, 0.567567]) - ser.to_csv(filename, float_format="%.2f") + ser.to_csv(filename, float_format="%.2f", header=False) rs = self.read_csv(filename) xp = Series([0.12, 0.23, 0.57]) @@ -128,14 +146,15 @@ def test_to_csv_list_entries(self): split = s.str.split(r'\s+and\s+') buf = StringIO() - split.to_csv(buf) + split.to_csv(buf, header=False) def test_to_csv_path_is_none(self): - # GH 8215 + # see gh-8215 + # # Series.to_csv() was returning None, inconsistent with # DataFrame.to_csv() which returned string s = Series([1, 2, 3]) - csv_str = s.to_csv(path=None) + csv_str = s.to_csv(None, header=False) assert isinstance(csv_str, str) @pytest.mark.parametrize('s,encoding', [ @@ -150,8 +169,8 @@ def test_to_csv_compression(self, s, encoding, compression): with ensure_clean() as filename: - s.to_csv(filename, compression=compression, encoding=encoding, - header=True) + s.to_csv(filename, compression=compression, + encoding=encoding, header=True) # test the round trip - to_csv -> read_csv result = pd.read_csv(filename, compression=compression, encoding=encoding, index_col=0, squeeze=True) diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index 61f838eeeeb30..21f34aa3995d7 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -2,6 +2,7 @@ import pytest import os +import warnings import collections from functools import partial @@ -15,6 +16,14 @@ import pandas.util.testing as tm +def catch_to_csv_depr(): + # Catching warnings because Series.to_csv has + # been deprecated. Remove this context when + # Series.to_csv has been aligned. + + return warnings.catch_warnings(record=True) + + def test_mut_exclusive(): msg = "mutually exclusive arguments: '[ab]' and '[ab]'" with tm.assert_raises_regex(TypeError, msg): @@ -219,11 +228,12 @@ def test_standardize_mapping(): def test_compression_size(obj, method, compression_only): with tm.ensure_clean() as filename: - getattr(obj, method)(filename, compression=compression_only) - compressed = os.path.getsize(filename) - getattr(obj, method)(filename, compression=None) - uncompressed = os.path.getsize(filename) - assert uncompressed > compressed + with catch_to_csv_depr(): + getattr(obj, method)(filename, compression=compression_only) + compressed = os.path.getsize(filename) + getattr(obj, method)(filename, compression=None) + uncompressed = os.path.getsize(filename) + assert uncompressed > compressed @pytest.mark.parametrize('obj', [ @@ -236,16 +246,22 @@ def test_compression_size_fh(obj, method, compression_only): with tm.ensure_clean() as filename: f, _handles = _get_handle(filename, 'w', compression=compression_only) - with f: - getattr(obj, method)(f) - assert not f.closed + + with catch_to_csv_depr(): + with f: + getattr(obj, method)(f) + assert not f.closed assert f.closed compressed = os.path.getsize(filename) + with tm.ensure_clean() as filename: f, _handles = _get_handle(filename, 'w', compression=None) - with f: - getattr(obj, method)(f) - assert not f.closed + + with catch_to_csv_depr(): + with f: + getattr(obj, method)(f) + assert not f.closed + assert f.closed uncompressed = os.path.getsize(filename) assert uncompressed > compressed
Warns about aligning `Series.to_csv`'s signature with that of `DataFrame.to_csv`'s. In anticipation, we have moved `DataFrame.to_csv` to `generic.py` so that we can later delete the `Series.to_csv` implementation, and allow it to adopt `DataFrame`'s `to_csv` due to inheritance. Closes #19745. cc @dahlbaek
https://api.github.com/repos/pandas-dev/pandas/pulls/21868
2018-07-12T00:27:23Z
2018-07-25T16:48:39Z
null
2023-05-11T01:18:04Z
BUG: np array indexer modifed in iloc
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index ed4022d422b4d..e255f1208869e 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -363,8 +363,7 @@ Indexing - ``DataFrame.__getitem__`` now accepts dictionaries and dictionary keys as list-likes of labels, consistently with ``Series.__getitem__`` (:issue:`21294`) - Fixed ``DataFrame[np.nan]`` when columns are non-unique (:issue:`21428`) - Bug when indexing :class:`DatetimeIndex` with nanosecond resolution dates and timezones (:issue:`11679`) - -- +- Bug where indexing with a Numpy array containing negative values would mutate the indexer (:issue:`21867`) Missing ^^^^^^^ diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 80396a9149d5a..ec06099e3bbd2 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2596,6 +2596,7 @@ def maybe_convert_indices(indices, n): mask = indices < 0 if mask.any(): + indices = indices.copy() indices[mask] += n mask = (indices >= n) | (indices < 0) diff --git a/pandas/tests/indexing/test_iloc.py b/pandas/tests/indexing/test_iloc.py index 49047e1da0996..81397002abd2b 100644 --- a/pandas/tests/indexing/test_iloc.py +++ b/pandas/tests/indexing/test_iloc.py @@ -126,6 +126,21 @@ def test_iloc_getitem_neg_int(self): typs=['labels', 'mixed', 'ts', 'floats', 'empty'], fails=IndexError) + def test_iloc_array_not_mutating_negative_indices(self): + + # GH 21867 + array_with_neg_numbers = np.array([1, 2, -1]) + array_copy = array_with_neg_numbers.copy() + df = pd.DataFrame({ + 'A': [100, 101, 102], + 'B': [103, 104, 105], + 'C': [106, 107, 108]}, + index=[1, 2, 3]) + df.iloc[array_with_neg_numbers] + tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy) + df.iloc[:, array_with_neg_numbers] + tm.assert_numpy_array_equal(array_with_neg_numbers, array_copy) + def test_iloc_getitem_list_int(self): # list of ints
- [ x ] closes #20852 - [ x ] tests passed - [ x ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/21867
2018-07-11T21:55:39Z
2018-07-14T14:50:22Z
2018-07-14T14:50:22Z
2018-07-14T14:50:29Z
ENH: Change DatetimeIndex.time to also return timezone info
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index 9e01296d9c9c7..d5824ee2283ea 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -723,7 +723,7 @@ There are several time/date properties that one can access from ``Timestamp`` or microsecond,"The microseconds of the datetime" nanosecond,"The nanoseconds of the datetime" date,"Returns datetime.date (does not contain timezone information)" - time,"Returns datetime.time (does not contain timezone information)" + time,"Returns datetime.time (contains timezone information)" dayofyear,"The ordinal day of year" weekofyear,"The week ordinal of the year" week,"The week ordinal of the year" diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 034a56b2ac0cb..a67eef11649c2 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -84,7 +84,7 @@ Other Enhancements - :meth:`Series.nlargest`, :meth:`Series.nsmallest`, :meth:`DataFrame.nlargest`, and :meth:`DataFrame.nsmallest` now accept the value ``"all"`` for the ``keep`` argument. This keeps all ties for the nth largest/smallest value (:issue:`16818`) - :class:`IntervalIndex` has gained the :meth:`~IntervalIndex.set_closed` method to change the existing ``closed`` value (:issue:`21670`) - :func:`~DataFrame.to_csv` and :func:`~DataFrame.to_json` now support ``compression='infer'`` to infer compression based on filename (:issue:`15008`) -- +- :attr:`DatetimeIndex.time` now also returns timezone information. (:issue:`21358`) .. _whatsnew_0240.api_breaking: diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 281e497945c5f..bdf7808d0bb5d 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -74,7 +74,7 @@ cdef inline object create_time_from_ts( int64_t value, pandas_datetimestruct dts, object tz, object freq): """ convenience routine to construct a datetime.time from its parts """ - return time(dts.hour, dts.min, dts.sec, dts.us) + return time(dts.hour, dts.min, dts.sec, dts.us, tz) def ints_to_pydatetime(ndarray[int64_t] arr, tz=None, freq=None, diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 0d1c5241c5a93..a6521b8e16ac8 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -576,16 +576,9 @@ def day_name(self, locale=None): def time(self): """ Returns numpy array of datetime.time. The time part of the Timestamps. + Time returned is in local time with associated timezone information. """ - # If the Timestamps have a timezone that is not UTC, - # convert them into their i8 representation while - # keeping their timezone and not using UTC - if self.tz is not None and self.tz is not utc: - timestamps = self._local_timestamps() - else: - timestamps = self.asi8 - - return tslib.ints_to_pydatetime(timestamps, box="time") + return tslib.ints_to_pydatetime(self.asi8, self.tz, box="time") @property def date(self): diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index 3697d183d2fc6..6f5ed365c55af 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -718,15 +718,16 @@ def test_date_accessor(self, dtype): tm.assert_numpy_array_equal(result, expected) - @pytest.mark.parametrize("dtype", [ - None, 'datetime64[ns, CET]', - 'datetime64[ns, EST]', 'datetime64[ns, UTC]' + @pytest.mark.parametrize("tz", [ + None, pytz.timezone('CET'), pytz.timezone('EST'), + pytz.timezone('UTC') ]) - def test_time_accessor(self, dtype): + def test_time_accessor(self, tz): # Regression test for GH#21267 - expected = np.array([time(10, 20, 30), pd.NaT]) + # Changed test to account for GH#21358 + expected = np.array([time(10, 20, 30, tzinfo=tz), pd.NaT]) - index = DatetimeIndex(['2018-06-04 10:20:30', pd.NaT], dtype=dtype) + index = DatetimeIndex(['2018-06-04 10:20:30', pd.NaT], tz=tz) result = index.time tm.assert_numpy_array_equal(result, expected)
- [ ] closes #21358 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Mainly just reverted changes from GH#21281 and updated docs. Also had to change one test added in GH#21281.
https://api.github.com/repos/pandas-dev/pandas/pulls/21865
2018-07-11T18:50:57Z
2018-07-19T23:13:34Z
null
2018-07-25T18:38:53Z
Replaced PANDAS_DATETIMEUNIT with NPY_DATETIMEUNIT
diff --git a/pandas/_libs/src/datetime/np_datetime.c b/pandas/_libs/src/datetime/np_datetime.c index 89753ccf7d773..11e1e6522ef3b 100644 --- a/pandas/_libs/src/datetime/np_datetime.c +++ b/pandas/_libs/src/datetime/np_datetime.c @@ -21,6 +21,7 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #include <numpy/arrayobject.h> #include <numpy/arrayscalars.h> +#include <numpy/ndarraytypes.h> #include "np_datetime.h" #if PY_MAJOR_VERSION >= 3 @@ -511,21 +512,21 @@ int convert_pydatetime_to_datetimestruct(PyObject *obj, return -1; } -npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr, +npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, pandas_datetimestruct *d) { - npy_datetime result = PANDAS_DATETIME_NAT; + npy_datetime result = NPY_DATETIME_NAT; convert_datetimestruct_to_datetime(fr, d, &result); return result; } -void pandas_datetime_to_datetimestruct(npy_datetime val, PANDAS_DATETIMEUNIT fr, +void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, pandas_datetimestruct *result) { convert_datetime_to_datetimestruct(fr, val, result); } void pandas_timedelta_to_timedeltastruct(npy_timedelta val, - PANDAS_DATETIMEUNIT fr, + NPY_DATETIMEUNIT fr, pandas_timedeltastruct *result) { convert_timedelta_to_timedeltastruct(fr, val, result); } @@ -537,15 +538,15 @@ void pandas_timedelta_to_timedeltastruct(npy_timedelta val, * * Returns 0 on success, -1 on failure. */ -int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, +int convert_datetimestruct_to_datetime(NPY_DATETIMEUNIT base, const pandas_datetimestruct *dts, npy_datetime *out) { npy_datetime ret; - if (base == PANDAS_FR_Y) { + if (base == NPY_FR_Y) { /* Truncate to the year */ ret = dts->year - 1970; - } else if (base == PANDAS_FR_M) { + } else if (base == NPY_FR_M) { /* Truncate to the month */ ret = 12 * (dts->year - 1970) + (dts->month - 1); } else { @@ -553,7 +554,7 @@ int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, npy_int64 days = get_datetimestruct_days(dts); switch (base) { - case PANDAS_FR_W: + case NPY_FR_W: /* Truncate to weeks */ if (days >= 0) { ret = days / 7; @@ -561,31 +562,31 @@ int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, ret = (days - 6) / 7; } break; - case PANDAS_FR_D: + case NPY_FR_D: ret = days; break; - case PANDAS_FR_h: + case NPY_FR_h: ret = days * 24 + dts->hour; break; - case PANDAS_FR_m: + case NPY_FR_m: ret = (days * 24 + dts->hour) * 60 + dts->min; break; - case PANDAS_FR_s: + case NPY_FR_s: ret = ((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec; break; - case PANDAS_FR_ms: + case NPY_FR_ms: ret = (((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec) * 1000 + dts->us / 1000; break; - case PANDAS_FR_us: + case NPY_FR_us: ret = (((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec) * 1000000 + dts->us; break; - case PANDAS_FR_ns: + case NPY_FR_ns: ret = ((((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec) * 1000000 + @@ -593,7 +594,7 @@ int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, 1000 + dts->ps / 1000; break; - case PANDAS_FR_ps: + case NPY_FR_ps: ret = ((((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec) * 1000000 + @@ -601,7 +602,7 @@ int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, 1000000 + dts->ps; break; - case PANDAS_FR_fs: + case NPY_FR_fs: /* only 2.6 hours */ ret = (((((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec) * @@ -612,7 +613,7 @@ int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, 1000 + dts->as / 1000; break; - case PANDAS_FR_as: + case NPY_FR_as: /* only 9.2 secs */ ret = (((((days * 24 + dts->hour) * 60 + dts->min) * 60 + dts->sec) * @@ -640,7 +641,7 @@ int convert_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT base, /* * Converts a datetime based on the given metadata into a datetimestruct */ -int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, +int convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, npy_datetime dt, pandas_datetimestruct *out) { npy_int64 perday; @@ -656,11 +657,11 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, * for negative values. */ switch (base) { - case PANDAS_FR_Y: + case NPY_FR_Y: out->year = 1970 + dt; break; - case PANDAS_FR_M: + case NPY_FR_M: if (dt >= 0) { out->year = 1970 + dt / 12; out->month = dt % 12 + 1; @@ -670,16 +671,16 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, } break; - case PANDAS_FR_W: + case NPY_FR_W: /* A week is 7 days */ set_datetimestruct_days(dt * 7, out); break; - case PANDAS_FR_D: + case NPY_FR_D: set_datetimestruct_days(dt, out); break; - case PANDAS_FR_h: + case NPY_FR_h: perday = 24LL; if (dt >= 0) { @@ -693,7 +694,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->hour = dt; break; - case PANDAS_FR_m: + case NPY_FR_m: perday = 24LL * 60; if (dt >= 0) { @@ -708,7 +709,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->min = dt % 60; break; - case PANDAS_FR_s: + case NPY_FR_s: perday = 24LL * 60 * 60; if (dt >= 0) { @@ -724,7 +725,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->sec = dt % 60; break; - case PANDAS_FR_ms: + case NPY_FR_ms: perday = 24LL * 60 * 60 * 1000; if (dt >= 0) { @@ -741,7 +742,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->us = (dt % 1000LL) * 1000; break; - case PANDAS_FR_us: + case NPY_FR_us: perday = 24LL * 60LL * 60LL * 1000LL * 1000LL; if (dt >= 0) { @@ -758,7 +759,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->us = dt % 1000000LL; break; - case PANDAS_FR_ns: + case NPY_FR_ns: perday = 24LL * 60LL * 60LL * 1000LL * 1000LL * 1000LL; if (dt >= 0) { @@ -776,7 +777,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->ps = (dt % 1000LL) * 1000; break; - case PANDAS_FR_ps: + case NPY_FR_ps: perday = 24LL * 60 * 60 * 1000 * 1000 * 1000 * 1000; if (dt >= 0) { @@ -794,7 +795,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, out->ps = dt % 1000000LL; break; - case PANDAS_FR_fs: + case NPY_FR_fs: /* entire range is only +- 2.6 hours */ if (dt >= 0) { out->hour = dt / (60 * 60 * 1000000000000000LL); @@ -821,7 +822,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, } break; - case PANDAS_FR_as: + case NPY_FR_as: /* entire range is only +- 9.2 seconds */ if (dt >= 0) { out->sec = (dt / 1000000000000000000LL) % 60; @@ -861,7 +862,7 @@ int convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, * * Returns 0 on success, -1 on failure. */ -int convert_timedelta_to_timedeltastruct(PANDAS_DATETIMEUNIT base, +int convert_timedelta_to_timedeltastruct(NPY_DATETIMEUNIT base, npy_timedelta td, pandas_timedeltastruct *out) { npy_int64 frac; @@ -874,7 +875,7 @@ int convert_timedelta_to_timedeltastruct(PANDAS_DATETIMEUNIT base, memset(out, 0, sizeof(pandas_timedeltastruct)); switch (base) { - case PANDAS_FR_ns: + case NPY_FR_ns: // put frac in seconds if (td < 0 && td % (1000LL * 1000LL * 1000LL) != 0) diff --git a/pandas/_libs/src/datetime/np_datetime.h b/pandas/_libs/src/datetime/np_datetime.h index b6c0852bfe764..5644ac036f198 100644 --- a/pandas/_libs/src/datetime/np_datetime.h +++ b/pandas/_libs/src/datetime/np_datetime.h @@ -19,29 +19,6 @@ This file is derived from NumPy 1.7. See NUMPY_LICENSE.txt #include <numpy/ndarraytypes.h> -typedef enum { - PANDAS_FR_Y = 0, // Years - PANDAS_FR_M = 1, // Months - PANDAS_FR_W = 2, // Weeks - // Gap where NPY_FR_B was - PANDAS_FR_D = 4, // Days - PANDAS_FR_h = 5, // hours - PANDAS_FR_m = 6, // minutes - PANDAS_FR_s = 7, // seconds - PANDAS_FR_ms = 8, // milliseconds - PANDAS_FR_us = 9, // microseconds - PANDAS_FR_ns = 10, // nanoseconds - PANDAS_FR_ps = 11, // picoseconds - PANDAS_FR_fs = 12, // femtoseconds - PANDAS_FR_as = 13, // attoseconds - PANDAS_FR_GENERIC = 14 // Generic, unbound units, can - // convert to anything -} PANDAS_DATETIMEUNIT; - -#define PANDAS_DATETIME_NUMUNITS 13 - -#define PANDAS_DATETIME_NAT NPY_MIN_INT64 - typedef struct { npy_int64 year; npy_int32 month, day, hour, min, sec, us, ps, as; @@ -61,14 +38,14 @@ extern const pandas_datetimestruct _NS_MAX_DTS; int convert_pydatetime_to_datetimestruct(PyObject *obj, pandas_datetimestruct *out); -npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr, +npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, pandas_datetimestruct *d); -void pandas_datetime_to_datetimestruct(npy_datetime val, PANDAS_DATETIMEUNIT fr, +void pandas_datetime_to_datetimestruct(npy_datetime val, NPY_DATETIMEUNIT fr, pandas_datetimestruct *result); void pandas_timedelta_to_timedeltastruct(npy_timedelta val, - PANDAS_DATETIMEUNIT fr, + NPY_DATETIMEUNIT fr, pandas_timedeltastruct *result); int dayofweek(int y, int m, int d); @@ -103,7 +80,7 @@ add_minutes_to_datetimestruct(pandas_datetimestruct *dts, int minutes); int -convert_datetime_to_datetimestruct(PANDAS_DATETIMEUNIT base, +convert_datetime_to_datetimestruct(NPY_DATETIMEUNIT base, npy_datetime dt, pandas_datetimestruct *out); diff --git a/pandas/_libs/src/datetime/np_datetime_strings.c b/pandas/_libs/src/datetime/np_datetime_strings.c index 2ea69e2ac1636..b1852094c301e 100644 --- a/pandas/_libs/src/datetime/np_datetime_strings.c +++ b/pandas/_libs/src/datetime/np_datetime_strings.c @@ -27,7 +27,8 @@ This file implements string parsing and creation for NumPy datetime. #include <time.h> #include <numpy/arrayobject.h> -#include "numpy/arrayscalars.h" +#include <numpy/arrayscalars.h> +#include <numpy/ndarraytypes.h> #include "np_datetime.h" #include "np_datetime_strings.h" @@ -514,37 +515,36 @@ int parse_iso_8601_datetime(char *str, int len, * Provides a string length to use for converting datetime * objects with the given local and unit settings. */ -int get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base) { +int get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base) { int len = 0; switch (base) { /* Generic units can only be used to represent NaT */ - /*case PANDAS_FR_GENERIC:*/ /* return 4;*/ - case PANDAS_FR_as: + case NPY_FR_as: len += 3; /* "###" */ - case PANDAS_FR_fs: + case NPY_FR_fs: len += 3; /* "###" */ - case PANDAS_FR_ps: + case NPY_FR_ps: len += 3; /* "###" */ - case PANDAS_FR_ns: + case NPY_FR_ns: len += 3; /* "###" */ - case PANDAS_FR_us: + case NPY_FR_us: len += 3; /* "###" */ - case PANDAS_FR_ms: + case NPY_FR_ms: len += 4; /* ".###" */ - case PANDAS_FR_s: + case NPY_FR_s: len += 3; /* ":##" */ - case PANDAS_FR_m: + case NPY_FR_m: len += 3; /* ":##" */ - case PANDAS_FR_h: + case NPY_FR_h: len += 3; /* "T##" */ - case PANDAS_FR_D: - case PANDAS_FR_W: + case NPY_FR_D: + case NPY_FR_W: len += 3; /* "-##" */ - case PANDAS_FR_M: + case NPY_FR_M: len += 3; /* "-##" */ - case PANDAS_FR_Y: + case NPY_FR_Y: len += 21; /* 64-bit year */ break; default: @@ -552,7 +552,7 @@ int get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base) { break; } - if (base >= PANDAS_FR_h) { + if (base >= NPY_FR_h) { if (local) { len += 5; /* "+####" or "-####" */ } else { @@ -581,7 +581,7 @@ int get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base) { * string was too short). */ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, - PANDAS_DATETIMEUNIT base) { + NPY_DATETIMEUNIT base) { char *substr = outstr, sublen = outlen; int tmplen; @@ -591,8 +591,8 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, * TODO: Could print weeks with YYYY-Www format if the week * epoch is a Monday. */ - if (base == PANDAS_FR_W) { - base = PANDAS_FR_D; + if (base == NPY_FR_W) { + base = NPY_FR_D; } /* YEAR */ @@ -614,7 +614,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= tmplen; /* Stop if the unit is years */ - if (base == PANDAS_FR_Y) { + if (base == NPY_FR_Y) { if (sublen > 0) { *substr = '\0'; } @@ -638,7 +638,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is months */ - if (base == PANDAS_FR_M) { + if (base == NPY_FR_M) { if (sublen > 0) { *substr = '\0'; } @@ -662,7 +662,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is days */ - if (base == PANDAS_FR_D) { + if (base == NPY_FR_D) { if (sublen > 0) { *substr = '\0'; } @@ -686,7 +686,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is hours */ - if (base == PANDAS_FR_h) { + if (base == NPY_FR_h) { goto add_time_zone; } @@ -707,7 +707,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is minutes */ - if (base == PANDAS_FR_m) { + if (base == NPY_FR_m) { goto add_time_zone; } @@ -728,7 +728,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is seconds */ - if (base == PANDAS_FR_s) { + if (base == NPY_FR_s) { goto add_time_zone; } @@ -753,7 +753,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 4; /* Stop if the unit is milliseconds */ - if (base == PANDAS_FR_ms) { + if (base == NPY_FR_ms) { goto add_time_zone; } @@ -774,7 +774,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is microseconds */ - if (base == PANDAS_FR_us) { + if (base == NPY_FR_us) { goto add_time_zone; } @@ -795,7 +795,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is nanoseconds */ - if (base == PANDAS_FR_ns) { + if (base == NPY_FR_ns) { goto add_time_zone; } @@ -816,7 +816,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is picoseconds */ - if (base == PANDAS_FR_ps) { + if (base == NPY_FR_ps) { goto add_time_zone; } @@ -837,7 +837,7 @@ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, sublen -= 3; /* Stop if the unit is femtoseconds */ - if (base == PANDAS_FR_fs) { + if (base == NPY_FR_fs) { goto add_time_zone; } diff --git a/pandas/_libs/src/datetime/np_datetime_strings.h b/pandas/_libs/src/datetime/np_datetime_strings.h index ef7fe200aa58e..ff1d26e5168b5 100644 --- a/pandas/_libs/src/datetime/np_datetime_strings.h +++ b/pandas/_libs/src/datetime/np_datetime_strings.h @@ -60,7 +60,7 @@ parse_iso_8601_datetime(char *str, int len, * objects with the given local and unit settings. */ int -get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base); +get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base); /* * Converts an pandas_datetimestruct to an (almost) ISO 8601 @@ -74,6 +74,6 @@ get_datetime_iso_8601_strlen(int local, PANDAS_DATETIMEUNIT base); */ int make_iso_8601_datetime(pandas_datetimestruct *dts, char *outstr, int outlen, - PANDAS_DATETIMEUNIT base); + NPY_DATETIMEUNIT base); #endif // PANDAS__LIBS_SRC_DATETIME_NP_DATETIME_STRINGS_H_ diff --git a/pandas/_libs/src/period_helper.c b/pandas/_libs/src/period_helper.c index cb6f0a220fafe..5011d33d189c2 100644 --- a/pandas/_libs/src/period_helper.c +++ b/pandas/_libs/src/period_helper.c @@ -54,7 +54,7 @@ npy_int64 unix_date_from_ymd(int year, int month, int day) { dts.year = year; dts.month = month; dts.day = day; - unix_date = pandas_datetimestruct_to_datetime(PANDAS_FR_D, &dts); + unix_date = pandas_datetimestruct_to_datetime(NPY_FR_D, &dts); return unix_date; } @@ -151,7 +151,7 @@ static npy_int64 DtoB(pandas_datetimestruct *dts, static npy_int64 asfreq_DTtoA(npy_int64 ordinal, asfreq_info *af_info) { pandas_datetimestruct dts; ordinal = downsample_daytime(ordinal, af_info); - pandas_datetime_to_datetimestruct(ordinal, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts); if (dts.month > af_info->to_end) { return (npy_int64)(dts.year + 1 - 1970); } else { @@ -163,7 +163,7 @@ static int DtoQ_yq(npy_int64 ordinal, asfreq_info *af_info, int *year) { pandas_datetimestruct dts; int quarter; - pandas_datetime_to_datetimestruct(ordinal, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts); if (af_info->to_end != 12) { dts.month -= af_info->to_end; if (dts.month <= 0) { @@ -192,7 +192,7 @@ static npy_int64 asfreq_DTtoM(npy_int64 ordinal, asfreq_info *af_info) { ordinal = downsample_daytime(ordinal, af_info); - pandas_datetime_to_datetimestruct(ordinal, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(ordinal, NPY_FR_D, &dts); return (npy_int64)((dts.year - 1970) * 12 + dts.month - 1); } @@ -205,7 +205,7 @@ static npy_int64 asfreq_DTtoB(npy_int64 ordinal, asfreq_info *af_info) { int roll_back; pandas_datetimestruct dts; npy_int64 unix_date = downsample_daytime(ordinal, af_info); - pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); // This usage defines roll_back the opposite way from the others roll_back = 1 - af_info->is_end; @@ -265,7 +265,7 @@ static npy_int64 asfreq_WtoB(npy_int64 ordinal, asfreq_info *af_info) { pandas_datetimestruct dts; npy_int64 unix_date = asfreq_WtoDT(ordinal, af_info); - pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); roll_back = af_info->is_end; return DtoB(&dts, roll_back, unix_date); } @@ -305,7 +305,7 @@ static npy_int64 asfreq_MtoB(npy_int64 ordinal, asfreq_info *af_info) { pandas_datetimestruct dts; npy_int64 unix_date = asfreq_MtoDT(ordinal, af_info); - pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); roll_back = af_info->is_end; return DtoB(&dts, roll_back, unix_date); } @@ -360,7 +360,7 @@ static npy_int64 asfreq_QtoB(npy_int64 ordinal, asfreq_info *af_info) { pandas_datetimestruct dts; npy_int64 unix_date = asfreq_QtoDT(ordinal, af_info); - pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); roll_back = af_info->is_end; return DtoB(&dts, roll_back, unix_date); } @@ -417,7 +417,7 @@ static npy_int64 asfreq_AtoB(npy_int64 ordinal, asfreq_info *af_info) { pandas_datetimestruct dts; npy_int64 unix_date = asfreq_AtoDT(ordinal, af_info); - pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, &dts); + pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, &dts); roll_back = af_info->is_end; return DtoB(&dts, roll_back, unix_date); } diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index e7f334b267461..eaa4eca44c15b 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -43,6 +43,7 @@ Numeric decoder derived from from TCL library #include <math.h> // NOLINT(build/include_order) #include <numpy/arrayobject.h> // NOLINT(build/include_order) #include <numpy/arrayscalars.h> // NOLINT(build/include_order) +#include <numpy/ndarraytypes.h> // NOLINT(build/include_order) #include <numpy/npy_math.h> // NOLINT(build/include_order) #include <stdio.h> // NOLINT(build/include_order) #include <ultrajson.h> // NOLINT(build/include_order) @@ -138,7 +139,7 @@ typedef struct __PyObjectEncoder { TypeContext basicTypeContext; int datetimeIso; - PANDAS_DATETIMEUNIT datetimeUnit; + NPY_DATETIMEUNIT datetimeUnit; // output format style for pandas data types int outputFormat; @@ -444,7 +445,7 @@ static void *PyUnicodeToUTF8(JSOBJ _obj, JSONTypeContext *tc, void *outValue, static void *PandasDateTimeStructToJSON(pandas_datetimestruct *dts, JSONTypeContext *tc, void *outValue, size_t *_outLen) { - PANDAS_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; + NPY_DATETIMEUNIT base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; if (((PyObjectEncoder *)tc->encoder)->datetimeIso) { PRINTMARK(); @@ -482,7 +483,7 @@ static void *NpyDateTimeScalarToJSON(JSOBJ _obj, JSONTypeContext *tc, PRINTMARK(); pandas_datetime_to_datetimestruct( - obj->obval, (PANDAS_DATETIMEUNIT)obj->obmeta.base, &dts); + obj->obval, (NPY_DATETIMEUNIT)obj->obmeta.base, &dts); return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen); } @@ -512,7 +513,7 @@ static void *NpyDatetime64ToJSON(JSOBJ _obj, JSONTypeContext *tc, PRINTMARK(); pandas_datetime_to_datetimestruct((npy_datetime)GET_TC(tc)->longValue, - PANDAS_FR_ns, &dts); + NPY_FR_ns, &dts); return PandasDateTimeStructToJSON(&dts, tc, outValue, _outLen); } @@ -1864,15 +1865,15 @@ void Object_beginTypeContext(JSOBJ _obj, JSONTypeContext *tc) { base = ((PyObjectEncoder *)tc->encoder)->datetimeUnit; switch (base) { - case PANDAS_FR_ns: + case NPY_FR_ns: break; - case PANDAS_FR_us: + case NPY_FR_us: value /= 1000LL; break; - case PANDAS_FR_ms: + case NPY_FR_ms: value /= 1000000LL; break; - case PANDAS_FR_s: + case NPY_FR_s: value /= 1000000000LL; break; } @@ -2358,7 +2359,7 @@ PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs) { pyEncoder.npyType = -1; pyEncoder.npyValue = NULL; pyEncoder.datetimeIso = 0; - pyEncoder.datetimeUnit = PANDAS_FR_ms; + pyEncoder.datetimeUnit = NPY_FR_ms; pyEncoder.outputFormat = COLUMNS; pyEncoder.defaultHandler = 0; pyEncoder.basicTypeContext.newObj = NULL; @@ -2416,13 +2417,13 @@ PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs) { if (sdateFormat != NULL) { if (strcmp(sdateFormat, "s") == 0) { - pyEncoder.datetimeUnit = PANDAS_FR_s; + pyEncoder.datetimeUnit = NPY_FR_s; } else if (strcmp(sdateFormat, "ms") == 0) { - pyEncoder.datetimeUnit = PANDAS_FR_ms; + pyEncoder.datetimeUnit = NPY_FR_ms; } else if (strcmp(sdateFormat, "us") == 0) { - pyEncoder.datetimeUnit = PANDAS_FR_us; + pyEncoder.datetimeUnit = NPY_FR_us; } else if (strcmp(sdateFormat, "ns") == 0) { - pyEncoder.datetimeUnit = PANDAS_FR_ns; + pyEncoder.datetimeUnit = NPY_FR_ns; } else { PyErr_Format(PyExc_ValueError, "Invalid value '%s' for option 'date_unit'", diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index a3b7d6c59200c..b948be606645d 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -21,11 +21,10 @@ PyDateTime_IMPORT from np_datetime cimport (check_dts_bounds, pandas_datetimestruct, pandas_datetime_to_datetimestruct, _string_to_dts, - PANDAS_DATETIMEUNIT, PANDAS_FR_ns, npy_datetime, dt64_to_dtstruct, dtstruct_to_dt64, get_datetime64_unit, get_datetime64_value, - pydatetime_to_dt64) + pydatetime_to_dt64, NPY_DATETIMEUNIT, NPY_FR_ns) from np_datetime import OutOfBoundsDatetime from util cimport (is_string_object, @@ -62,13 +61,13 @@ cdef inline int64_t get_datetime64_nanos(object val) except? -1: """ cdef: pandas_datetimestruct dts - PANDAS_DATETIMEUNIT unit + NPY_DATETIMEUNIT unit npy_datetime ival unit = get_datetime64_unit(val) ival = get_datetime64_value(val) - if unit != PANDAS_FR_ns: + if unit != NPY_FR_ns: pandas_datetime_to_datetimestruct(ival, unit, &dts) check_dts_bounds(&dts) ival = dtstruct_to_dt64(&dts) @@ -93,7 +92,7 @@ def ensure_datetime64ns(ndarray arr, copy=True): cdef: Py_ssize_t i, n = arr.size ndarray[int64_t] ivalues, iresult - PANDAS_DATETIMEUNIT unit + NPY_DATETIMEUNIT unit pandas_datetimestruct dts shape = (<object> arr).shape @@ -107,7 +106,7 @@ def ensure_datetime64ns(ndarray arr, copy=True): return result unit = get_datetime64_unit(arr.flat[0]) - if unit == PANDAS_FR_ns: + if unit == NPY_FR_ns: if copy: arr = arr.copy() result = arr diff --git a/pandas/_libs/tslibs/np_datetime.pxd b/pandas/_libs/tslibs/np_datetime.pxd index 1a0baa8271643..7c91c5551dc47 100644 --- a/pandas/_libs/tslibs/np_datetime.pxd +++ b/pandas/_libs/tslibs/np_datetime.pxd @@ -11,7 +11,7 @@ cdef extern from "numpy/ndarrayobject.h": cdef extern from "numpy/ndarraytypes.h": ctypedef struct PyArray_DatetimeMetaData: - PANDAS_DATETIMEUNIT base + NPY_DATETIMEUNIT base int64_t num cdef extern from "numpy/arrayscalars.h": @@ -34,24 +34,24 @@ cdef extern from "../src/datetime/np_datetime.h": int64_t days int32_t hrs, min, sec, ms, us, ns, seconds, microseconds, nanoseconds - ctypedef enum PANDAS_DATETIMEUNIT: - PANDAS_FR_Y - PANDAS_FR_M - PANDAS_FR_W - PANDAS_FR_D - PANDAS_FR_B - PANDAS_FR_h - PANDAS_FR_m - PANDAS_FR_s - PANDAS_FR_ms - PANDAS_FR_us - PANDAS_FR_ns - PANDAS_FR_ps - PANDAS_FR_fs - PANDAS_FR_as + ctypedef enum NPY_DATETIMEUNIT: + NPY_FR_Y + NPY_FR_M + NPY_FR_W + NPY_FR_D + NPY_FR_B + NPY_FR_h + NPY_FR_m + NPY_FR_s + NPY_FR_ms + NPY_FR_us + NPY_FR_ns + NPY_FR_ps + NPY_FR_fs + NPY_FR_as void pandas_datetime_to_datetimestruct(npy_datetime val, - PANDAS_DATETIMEUNIT fr, + NPY_DATETIMEUNIT fr, pandas_datetimestruct *result) nogil @@ -70,7 +70,7 @@ cdef int64_t pydate_to_dt64(date val, pandas_datetimestruct *dts) cdef npy_datetime get_datetime64_value(object obj) nogil cdef npy_timedelta get_timedelta64_value(object obj) nogil -cdef PANDAS_DATETIMEUNIT get_datetime64_unit(object obj) nogil +cdef NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil cdef int _string_to_dts(object val, pandas_datetimestruct* dts, int* out_local, int* out_tzoffset) except? -1 diff --git a/pandas/_libs/tslibs/np_datetime.pyx b/pandas/_libs/tslibs/np_datetime.pyx index 7f861a50f03b8..e58ec0702adcc 100644 --- a/pandas/_libs/tslibs/np_datetime.pyx +++ b/pandas/_libs/tslibs/np_datetime.pyx @@ -19,16 +19,16 @@ cdef extern from "../src/datetime/np_datetime.h": int cmp_pandas_datetimestruct(pandas_datetimestruct *a, pandas_datetimestruct *b) - npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr, + npy_datetime pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, pandas_datetimestruct *d ) nogil void pandas_datetime_to_datetimestruct(npy_datetime val, - PANDAS_DATETIMEUNIT fr, + NPY_DATETIMEUNIT fr, pandas_datetimestruct *result) nogil void pandas_timedelta_to_timedeltastruct(npy_timedelta val, - PANDAS_DATETIMEUNIT fr, + NPY_DATETIMEUNIT fr, pandas_timedeltastruct *result ) nogil @@ -59,11 +59,11 @@ cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: return (<PyTimedeltaScalarObject*>obj).obval -cdef inline PANDAS_DATETIMEUNIT get_datetime64_unit(object obj) nogil: +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: """ returns the unit part of the dtype for a numpy datetime64 object. """ - return <PANDAS_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base + return <NPY_DATETIMEUNIT>(<PyDatetimeScalarObject*>obj).obmeta.base # ---------------------------------------------------------------------- # Comparison @@ -127,22 +127,22 @@ cdef inline check_dts_bounds(pandas_datetimestruct *dts): cdef inline int64_t dtstruct_to_dt64(pandas_datetimestruct* dts) nogil: """Convenience function to call pandas_datetimestruct_to_datetime - with the by-far-most-common frequency PANDAS_FR_ns""" - return pandas_datetimestruct_to_datetime(PANDAS_FR_ns, dts) + with the by-far-most-common frequency NPY_FR_ns""" + return pandas_datetimestruct_to_datetime(NPY_FR_ns, dts) cdef inline void dt64_to_dtstruct(int64_t dt64, pandas_datetimestruct* out) nogil: """Convenience function to call pandas_datetime_to_datetimestruct - with the by-far-most-common frequency PANDAS_FR_ns""" - pandas_datetime_to_datetimestruct(dt64, PANDAS_FR_ns, out) + with the by-far-most-common frequency NPY_FR_ns""" + pandas_datetime_to_datetimestruct(dt64, NPY_FR_ns, out) return cdef inline void td64_to_tdstruct(int64_t td64, pandas_timedeltastruct* out) nogil: """Convenience function to call pandas_timedelta_to_timedeltastruct - with the by-far-most-common frequency PANDAS_FR_ns""" - pandas_timedelta_to_timedeltastruct(td64, PANDAS_FR_ns, out) + with the by-far-most-common frequency NPY_FR_ns""" + pandas_timedelta_to_timedeltastruct(td64, NPY_FR_ns, out) return diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index 1796a764ae326..0ec5d25beeeb9 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -24,12 +24,11 @@ PyDateTime_IMPORT from np_datetime cimport (pandas_datetimestruct, dtstruct_to_dt64, dt64_to_dtstruct, - PANDAS_FR_D, pandas_datetime_to_datetimestruct, - PANDAS_DATETIMEUNIT) + NPY_DATETIMEUNIT, NPY_FR_D) cdef extern from "../src/datetime/np_datetime.h": - int64_t pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr, + int64_t pandas_datetimestruct_to_datetime(NPY_DATETIMEUNIT fr, pandas_datetimestruct *d ) nogil @@ -188,7 +187,7 @@ cdef int64_t get_period_ordinal(pandas_datetimestruct *dts, int freq) nogil: elif freq == FR_MTH: return (dts.year - 1970) * 12 + dts.month - 1 - unix_date = pandas_datetimestruct_to_datetime(PANDAS_FR_D, dts) + unix_date = pandas_datetimestruct_to_datetime(NPY_FR_D, dts) if freq >= FR_SEC: seconds = unix_date * 86400 + dts.hour * 3600 + dts.min * 60 + dts.sec @@ -315,7 +314,7 @@ cdef void date_info_from_days_and_time(pandas_datetimestruct *dts, # abstime >= 0.0 and abstime <= 86400 # Calculate the date - pandas_datetime_to_datetimestruct(unix_date, PANDAS_FR_D, dts) + pandas_datetime_to_datetimestruct(unix_date, NPY_FR_D, dts) # Calculate the time inttime = <int>abstime
progress towards #21852 - [X] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Working on my C-fu as well. There may be a better way to do this (ex: even adding NPY_DATETIMEUNIT to Cython/includes) but figured I'd offer this up for review @jbrockmendel
https://api.github.com/repos/pandas-dev/pandas/pulls/21863
2018-07-11T17:47:47Z
2018-07-12T10:02:51Z
2018-07-12T10:02:51Z
2018-07-12T21:18:55Z
CLN: miscellaneous cleanups / fixes
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 034a56b2ac0cb..00379c7e9d511 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -301,6 +301,7 @@ Datetimelike ^^^^^^^^^^^^ - Fixed bug where two :class:`DateOffset` objects with different ``normalize`` attributes could evaluate as equal (:issue:`21404`) +- Fixed bug where :meth:`Timestamp.resolution` incorrectly returned 1-microsecond ``timedelta`` instead of 1-nanosecond :class:`Timedelta` (:issue:`21336`,:issue:`21365`) Timedelta ^^^^^^^^^ @@ -369,6 +370,7 @@ Missing ^^^^^^^ - Bug in :func:`DataFrame.fillna` where a ``ValueError`` would raise when one column contained a ``datetime64[ns, tz]`` dtype (:issue:`15522`) +- Bug in :func:`Series.hasnans` that could be incorrectly cached and return incorrect answers if null elements are introduced after an initial call (:issue:`19700`) MultiIndex ^^^^^^^^^^ diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 711db7cc8fbe2..864950ff03eae 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -407,6 +407,15 @@ cdef class _Timestamp(datetime): def asm8(self): return np.datetime64(self.value, 'ns') + @property + def resolution(self): + """ + Return resolution describing the smallest difference between two + times that can be represented by Timestamp object_state + """ + # GH#21336, GH#21365 + return Timedelta(nanoseconds=1) + def timestamp(self): """Return POSIX timestamp as float.""" # py27 compat, see GH#17329 diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index d747e69d1ff39..a0456630c9a0f 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -278,10 +278,15 @@ def is_list_like(obj): False >>> is_list_like(1) False + >>> is_list_like(np.array([2])) + True + >>> is_list_like(np.array(2))) + False """ return (isinstance(obj, Iterable) and - not isinstance(obj, string_and_binary_types)) + not isinstance(obj, string_and_binary_types) and + not (isinstance(obj, np.ndarray) and obj.ndim == 0)) def is_array_like(obj): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4e6ddf64145a8..6380944338010 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -7679,6 +7679,9 @@ def convert(v): try: if is_list_like(values[0]) or hasattr(values[0], 'len'): values = np.array([convert(v) for v in values]) + elif isinstance(values[0], np.ndarray) and values[0].ndim == 0: + # GH#21861 + values = np.array([convert(v) for v in values]) else: values = convert(values) except: diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index b8a89ac26c9d9..217bb3e7d1734 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -93,7 +93,7 @@ def _dt_index_cmp(opname, cls): def wrapper(self, other): func = getattr(super(DatetimeIndex, self), opname) - if isinstance(other, (datetime, compat.string_types)): + if isinstance(other, (datetime, np.datetime64, compat.string_types)): if isinstance(other, datetime): # GH#18435 strings get a pass from tzawareness compat self._assert_tzawareness_compat(other) @@ -105,8 +105,7 @@ def wrapper(self, other): else: if isinstance(other, list): other = DatetimeIndex(other) - elif not isinstance(other, (np.datetime64, np.ndarray, - Index, ABCSeries)): + elif not isinstance(other, (np.ndarray, Index, ABCSeries)): # Following Timestamp convention, __eq__ is all-False # and __ne__ is all True, others raise TypeError. if opname == '__eq__': diff --git a/pandas/core/series.py b/pandas/core/series.py index a63c4be98f738..0bdb9d9cc23a6 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -166,6 +166,10 @@ class Series(base.IndexOpsMixin, generic.NDFrame): ['asobject', 'sortlevel', 'reshape', 'get_value', 'set_value', 'from_csv', 'valid']) + # Override cache_readonly bc Series is mutable + hasnans = property(base.IndexOpsMixin.hasnans.func, + doc=base.IndexOpsMixin.hasnans.__doc__) + def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False): diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py index 29618fb4dec52..ed2659973cc6a 100644 --- a/pandas/core/tools/timedeltas.py +++ b/pandas/core/tools/timedeltas.py @@ -85,7 +85,7 @@ def to_timedelta(arg, unit='ns', box=True, errors='raise'): elif isinstance(arg, ABCIndexClass): return _convert_listlike(arg, unit=unit, box=box, errors=errors, name=arg.name) - elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 0: + elif isinstance(arg, np.ndarray) and arg.ndim == 0: # extract array scalar and process below arg = arg.item() elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 1: diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 65527ac1b278f..f81767156b255 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -67,13 +67,14 @@ def __getitem__(self): [ [], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]), - Series([]), Series(['a']).str]) + Series([]), Series(['a']).str, + np.array([2])]) def test_is_list_like_passes(ll): assert inference.is_list_like(ll) @pytest.mark.parametrize( - "ll", [1, '2', object(), str]) + "ll", [1, '2', object(), str, np.array(2)]) def test_is_list_like_fails(ll): assert not inference.is_list_like(ll) diff --git a/pandas/tests/scalar/timestamp/test_timestamp.py b/pandas/tests/scalar/timestamp/test_timestamp.py index 5272059163a07..4172bfd41b9db 100644 --- a/pandas/tests/scalar/timestamp/test_timestamp.py +++ b/pandas/tests/scalar/timestamp/test_timestamp.py @@ -172,6 +172,11 @@ def test_woy_boundary(self): 2005, 1, 1), (2005, 1, 2)]]) assert (result == [52, 52, 53, 53]).all() + def test_resolution(self): + # GH#21336, GH#21365 + dt = Timestamp('2100-01-01 00:00:00') + assert dt.resolution == Timedelta(nanoseconds=1) + class TestTimestampConstructors(object): diff --git a/pandas/tests/series/test_internals.py b/pandas/tests/series/test_internals.py index 79e23459ac992..506e7e14ffc4f 100644 --- a/pandas/tests/series/test_internals.py +++ b/pandas/tests/series/test_internals.py @@ -11,6 +11,7 @@ from pandas import Series from pandas.core.indexes.datetimes import Timestamp import pandas._libs.lib as lib +import pandas as pd from pandas.util.testing import assert_series_equal import pandas.util.testing as tm @@ -309,3 +310,16 @@ def test_convert_preserve_all_bool(self): r = s._convert(datetime=True, numeric=True) e = Series([False, True, False, False], dtype=bool) tm.assert_series_equal(r, e) + + +def test_hasnans_unchached_for_series(): + # GH#19700 + idx = pd.Index([0, 1]) + assert not idx.hasnans + assert 'hasnans' in idx._cache + ser = idx.to_series() + assert not ser.hasnans + assert not hasattr(ser, '_cache') + ser.iloc[-1] = np.nan + assert ser.hasnans + assert pd.Series.hasnans.__doc__ == pd.Index.hasnans.__doc__
#21365 made good progress on #21336 so I copied the test from there and referenced it in the whatsnew. closes #19700 closes #19011 closes #21336 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21861
2018-07-11T15:46:10Z
2018-07-12T00:13:56Z
2018-07-12T00:13:56Z
2018-07-12T00:44:21Z
[CLN] resolve circular Period dependency, prepare setup.py
diff --git a/pandas/_libs/__init__.py b/pandas/_libs/__init__.py index ad72980105c4f..b02c423b79f43 100644 --- a/pandas/_libs/__init__.py +++ b/pandas/_libs/__init__.py @@ -1,9 +1,5 @@ # -*- coding: utf-8 -*- # flake8: noqa -from .tslibs import iNaT, NaT, Timestamp, Timedelta, OutOfBoundsDatetime - -# TODO -# period is directly dependent on tslib and imports python -# modules, so exposing Period as an alias is currently not possible -# from period import Period +from .tslibs import ( + iNaT, NaT, Timestamp, Timedelta, OutOfBoundsDatetime, Period) diff --git a/pandas/_libs/tslibs/__init__.py b/pandas/_libs/tslibs/__init__.py index 22307f70ebe52..c7765a2c2b89c 100644 --- a/pandas/_libs/tslibs/__init__.py +++ b/pandas/_libs/tslibs/__init__.py @@ -4,5 +4,6 @@ from .conversion import normalize_date, localize_pydatetime, tz_convert_single from .nattype import NaT, iNaT from .np_datetime import OutOfBoundsDatetime +from .period import Period, IncompatibleFrequency from .timestamps import Timestamp from .timedeltas import delta_to_nanoseconds, ints_to_pytimedelta, Timedelta diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index a0be630aade9d..a53d794b48cfa 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -28,8 +28,6 @@ from timestamps import Timestamp from pandas._libs.properties import cache_readonly -from pandas.core.algorithms import unique # TODO: Avoid this non-cython import - # ---------------------------------------------------------------------- # Constants @@ -574,6 +572,10 @@ cdef class _FrequencyInferer(object): if len(self.ydiffs) > 1: return None + # lazy import to prevent circularity + # TODO: Avoid non-cython dependency + from pandas.core.algorithms import unique + if len(unique(self.fields['M'])) > 1: return None @@ -618,6 +620,10 @@ cdef class _FrequencyInferer(object): # if not lib.ismember(wdiffs, set([4, 5, -47, -49, -48])).all(): # return None + # lazy import to prevent circularity + # TODO: Avoid non-cython dependency + from pandas.core.algorithms import unique + weekdays = unique(self.index.weekday) if len(weekdays) > 1: return None diff --git a/pandas/tests/tslibs/test_api.py b/pandas/tests/tslibs/test_api.py index 387a63f61179d..fb9355dfed645 100644 --- a/pandas/tests/tslibs/test_api.py +++ b/pandas/tests/tslibs/test_api.py @@ -24,6 +24,8 @@ def test_namespace(): api = ['NaT', 'iNaT', 'OutOfBoundsDatetime', + 'Period', + 'IncompatibleFrequency', 'Timedelta', 'Timestamp', 'delta_to_nanoseconds', diff --git a/setup.py b/setup.py index 8018d71b74655..4910fcf292ca6 100755 --- a/setup.py +++ b/setup.py @@ -24,23 +24,6 @@ def is_platform_windows(): return sys.platform == 'win32' or sys.platform == 'cygwin' -def is_platform_linux(): - return sys.platform == 'linux2' - - -def is_platform_mac(): - return sys.platform == 'darwin' - - -min_cython_ver = '0.28.2' -try: - import Cython - ver = Cython.__version__ - _CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver) -except ImportError: - _CYTHON_INSTALLED = False - - min_numpy_ver = '1.9.0' setuptools_kwargs = { 'install_requires': [ @@ -53,24 +36,29 @@ def is_platform_mac(): } +min_cython_ver = '0.28.2' +try: + import Cython + ver = Cython.__version__ + _CYTHON_INSTALLED = ver >= LooseVersion(min_cython_ver) +except ImportError: + _CYTHON_INSTALLED = False + +# The import of Extension must be after the import of Cython, otherwise +# we do not get the appropriately patched class. +# See https://cython.readthedocs.io/en/latest/src/reference/compilation.html from distutils.extension import Extension # noqa:E402 from distutils.command.build import build # noqa:E402 -from distutils.command.build_ext import build_ext as _build_ext # noqa:E402 try: if not _CYTHON_INSTALLED: raise ImportError('No supported version of Cython installed.') - try: - from Cython.Distutils.old_build_ext import old_build_ext as _build_ext # noqa:F811,E501 - except ImportError: - # Pre 0.25 - from Cython.Distutils import build_ext as _build_ext + from Cython.Distutils.old_build_ext import old_build_ext as _build_ext cython = True except ImportError: + from distutils.command.build_ext import build_ext as _build_ext cython = False - - -if cython: +else: try: try: from Cython import Tempita as tempita @@ -103,27 +91,30 @@ def is_platform_mac(): class build_ext(_build_ext): - def build_extensions(self): + @classmethod + def render_templates(cls, pxifiles): + for pxifile in pxifiles: + # build pxifiles first, template extension must be .pxi.in + assert pxifile.endswith('.pxi.in') + outfile = pxifile[:-3] - # if builing from c files, don't need to - # generate template output - if cython: - for pxifile in _pxifiles: - # build pxifiles first, template extension must be .pxi.in - assert pxifile.endswith('.pxi.in') - outfile = pxifile[:-3] - - if (os.path.exists(outfile) and - os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime): - # if .pxi.in is not updated, no need to output .pxi - continue + if (os.path.exists(outfile) and + os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime): + # if .pxi.in is not updated, no need to output .pxi + continue - with open(pxifile, "r") as f: - tmpl = f.read() - pyxcontent = tempita.sub(tmpl) + with open(pxifile, "r") as f: + tmpl = f.read() + pyxcontent = tempita.sub(tmpl) - with open(outfile, "w") as f: - f.write(pyxcontent) + with open(outfile, "w") as f: + f.write(pyxcontent) + + def build_extensions(self): + # if building from c files, don't need to + # generate template output + if cython: + self.render_templates(_pxifiles) numpy_incl = pkg_resources.resource_filename('numpy', 'core/include') @@ -360,7 +351,6 @@ def run(self): class CheckingBuildExt(build_ext): """ Subclass build_ext to get clearer report if Cython is necessary. - """ def check_cython_extensions(self, extensions): @@ -379,9 +369,11 @@ def build_extensions(self): class CythonCommand(build_ext): - """Custom distutils command subclassed from Cython.Distutils.build_ext + """ + Custom distutils command subclassed from Cython.Distutils.build_ext to compile pyx->c, and stop there. All this does is override the - C-compile method build_extension() with a no-op.""" + C-compile method build_extension() with a no-op. + """ def build_extension(self, ext): pass @@ -445,7 +437,6 @@ def srcpath(name=None, suffix='.pyx', subdir='src'): lib_depends.append('pandas/_libs/src/util.pxd') else: lib_depends = [] - plib_depends = [] common_include = ['pandas/_libs/src/klib', 'pandas/_libs/src'] @@ -471,8 +462,6 @@ def pxd(name): tseries_depends = np_datetime_headers + ['pandas/_libs/tslibs/np_datetime.pxd'] -# some linux distros require it -libraries = ['m'] if not is_platform_windows() else [] ext_data = { '_libs.algos': {
For a long time there has been a comment in `_libs.__init__` saying it would be nice to import `Period` directly but that is not possible due to circular imports. This resolves that issue. Also does some cleanup in setup.py, motivated by the goals of a) using `cythonize` and b) implementing test coverage for cython files. I've gotten those working locally, but they involve big diffs, so this gets some of the easy stuff out of the way.
https://api.github.com/repos/pandas-dev/pandas/pulls/21854
2018-07-11T03:17:59Z
2018-07-12T10:10:14Z
2018-07-12T10:10:14Z
2020-04-05T17:41:56Z
BUG: datetime rolling min/max segfaults when closed=left (#21704)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 034a56b2ac0cb..798e414b3e60c 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -398,6 +398,9 @@ Groupby/Resample/Rolling - - +- Multiple bugs in :func:`pandas.core.Rolling.min` with ``closed='left'` and a + datetime-like index leading to incorrect results and also segfault. (:issue:`21704`) + Sparse ^^^^^^ diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index 9e704a9bd8d3f..bd6cd476595f3 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -1218,141 +1218,188 @@ cdef _roll_min_max(ndarray[numeric] input, int64_t win, int64_t minp, Moving min/max of 1d array of any numeric type along axis=0 ignoring NaNs. """ - cdef: - numeric ai - bint is_variable, should_replace - int64_t N, i, removed, window_i - Py_ssize_t nobs = 0 - deque Q[int64_t] ndarray[int64_t] starti, endi - ndarray[numeric, ndim=1] output - cdef: - int64_t* death - numeric* ring - numeric* minvalue - numeric* end - numeric* last - - cdef: - cdef numeric r + int64_t N + bint is_variable starti, endi, N, win, minp, is_variable = get_window_indexer( input, win, minp, index, closed) - output = np.empty(N, dtype=input.dtype) + if is_variable: + return _roll_min_max_variable(input, starti, endi, N, win, minp, + is_max) + else: + return _roll_min_max_fixed(input, starti, endi, N, win, minp, is_max) + +cdef _roll_min_max_variable(ndarray[numeric] input, + ndarray[int64_t] starti, + ndarray[int64_t] endi, + int64_t N, + int64_t win, + int64_t minp, + bint is_max): + cdef: + numeric ai + int64_t i, close_offset, curr_win_size + Py_ssize_t nobs = 0 + deque Q[int64_t] # min/max always the front + deque W[int64_t] # track the whole window for nobs compute + ndarray[double_t, ndim=1] output + + output = np.empty(N, dtype=float) Q = deque[int64_t]() + W = deque[int64_t]() - if is_variable: + with nogil: - with nogil: + # This is using a modified version of the C++ code in this + # SO post: http://bit.ly/2nOoHlY + # The original impl didn't deal with variable window sizes + # So the code was optimized for that - # This is using a modified version of the C++ code in this - # SO post: http://bit.ly/2nOoHlY - # The original impl didn't deal with variable window sizes - # So the code was optimized for that + for i from starti[0] <= i < endi[0]: + ai = init_mm(input[i], &nobs, is_max) - for i from starti[0] <= i < endi[0]: - ai = init_mm(input[i], &nobs, is_max) + # Discard previous entries if we find new min or max + if is_max: + while not Q.empty() and ((ai >= input[Q.back()]) or + (input[Q.back()] != input[Q.back()])): + Q.pop_back() + else: + while not Q.empty() and ((ai <= input[Q.back()]) or + (input[Q.back()] != input[Q.back()])): + Q.pop_back() + Q.push_back(i) + W.push_back(i) + + # if right is open then the first window is empty + close_offset = 0 if endi[0] > starti[0] else 1 + + for i in range(endi[0], endi[N-1]): + if not Q.empty(): + output[i-1+close_offset] = calc_mm( + minp, nobs, input[Q.front()]) + else: + output[i-1+close_offset] = NaN - if is_max: - while not Q.empty() and ai >= input[Q.back()]: - Q.pop_back() - else: - while not Q.empty() and ai <= input[Q.back()]: - Q.pop_back() - Q.push_back(i) + ai = init_mm(input[i], &nobs, is_max) + + # Discard previous entries if we find new min or max + if is_max: + while not Q.empty() and ((ai >= input[Q.back()]) or + (input[Q.back()] != input[Q.back()])): + Q.pop_back() + else: + while not Q.empty() and ((ai <= input[Q.back()]) or + (input[Q.back()] != input[Q.back()])): + Q.pop_back() - for i from endi[0] <= i < N: - output[i-1] = calc_mm(minp, nobs, input[Q.front()]) + # Maintain window/nobs retention + curr_win_size = endi[i + close_offset] - starti[i + close_offset] + while not Q.empty() and Q.front() <= i - curr_win_size: + Q.pop_front() + while not W.empty() and W.front() <= i - curr_win_size: + remove_mm(input[W.front()], &nobs) + W.pop_front() - ai = init_mm(input[i], &nobs, is_max) + Q.push_back(i) + W.push_back(i) - if is_max: - while not Q.empty() and ai >= input[Q.back()]: - Q.pop_back() - else: - while not Q.empty() and ai <= input[Q.back()]: - Q.pop_back() + output[N-1] = calc_mm(minp, nobs, input[Q.front()]) - while not Q.empty() and Q.front() <= i - (endi[i] - starti[i]): - Q.pop_front() + return output - Q.push_back(i) - output[N-1] = calc_mm(minp, nobs, input[Q.front()]) +cdef _roll_min_max_fixed(ndarray[numeric] input, + ndarray[int64_t] starti, + ndarray[int64_t] endi, + int64_t N, + int64_t win, + int64_t minp, + bint is_max): + cdef: + numeric ai + bint should_replace + int64_t i, removed, window_i, + Py_ssize_t nobs = 0 + int64_t* death + numeric* ring + numeric* minvalue + numeric* end + numeric* last + ndarray[double_t, ndim=1] output - else: - # setup the rings of death! - ring = <numeric *>malloc(win * sizeof(numeric)) - death = <int64_t *>malloc(win * sizeof(int64_t)) - - end = ring + win - last = ring - minvalue = ring - ai = input[0] - minvalue[0] = init_mm(input[0], &nobs, is_max) - death[0] = win - nobs = 0 + output = np.empty(N, dtype=float) + # setup the rings of death! + ring = <numeric *>malloc(win * sizeof(numeric)) + death = <int64_t *>malloc(win * sizeof(int64_t)) + + end = ring + win + last = ring + minvalue = ring + ai = input[0] + minvalue[0] = init_mm(input[0], &nobs, is_max) + death[0] = win + nobs = 0 - with nogil: + with nogil: - for i in range(N): - ai = init_mm(input[i], &nobs, is_max) + for i in range(N): + ai = init_mm(input[i], &nobs, is_max) - if i >= win: - remove_mm(input[i - win], &nobs) + if i >= win: + remove_mm(input[i - win], &nobs) - if death[minvalue - ring] == i: - minvalue = minvalue + 1 - if minvalue >= end: - minvalue = ring + if death[minvalue - ring] == i: + minvalue = minvalue + 1 + if minvalue >= end: + minvalue = ring - if is_max: - should_replace = ai >= minvalue[0] - else: - should_replace = ai <= minvalue[0] - if should_replace: + if is_max: + should_replace = ai >= minvalue[0] + else: + should_replace = ai <= minvalue[0] + if should_replace: - minvalue[0] = ai - death[minvalue - ring] = i + win - last = minvalue + minvalue[0] = ai + death[minvalue - ring] = i + win + last = minvalue - else: + else: + if is_max: + should_replace = last[0] <= ai + else: + should_replace = last[0] >= ai + while should_replace: + if last == ring: + last = end + last -= 1 if is_max: should_replace = last[0] <= ai else: should_replace = last[0] >= ai - while should_replace: - if last == ring: - last = end - last -= 1 - if is_max: - should_replace = last[0] <= ai - else: - should_replace = last[0] >= ai - last += 1 - if last == end: - last = ring - last[0] = ai - death[last - ring] = i + win + last += 1 + if last == end: + last = ring + last[0] = ai + death[last - ring] = i + win - output[i] = calc_mm(minp, nobs, minvalue[0]) + output[i] = calc_mm(minp, nobs, minvalue[0]) - for i in range(minp - 1): - if numeric in cython.floating: - output[i] = NaN - else: - output[i] = 0 + for i in range(minp - 1): + if numeric in cython.floating: + output[i] = NaN + else: + output[i] = 0 - free(ring) - free(death) + free(ring) + free(death) - # print("output: {0}".format(output)) return output diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 78d1fa84cc5db..14966177978f4 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -464,6 +464,60 @@ def test_closed(self): with pytest.raises(ValueError): df.rolling(window=3, closed='neither') + @pytest.mark.parametrize("input_dtype", ['int', 'float']) + @pytest.mark.parametrize("func,closed,expected", [ + ('min', 'right', [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]), + ('min', 'both', [0.0, 0, 0, 0, 1, 2, 3, 4, 5, 6]), + ('min', 'neither', [np.nan, 0, 0, 1, 2, 3, 4, 5, 6, 7]), + ('min', 'left', [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, 6]), + ('max', 'right', [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + ('max', 'both', [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), + ('max', 'neither', [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]), + ('max', 'left', [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + ]) + def test_closed_min_max_datetime(self, input_dtype, + func, closed, + expected): + # see gh-21704 + ser = pd.Series(data=np.arange(10).astype(input_dtype), + index=pd.date_range('2000', periods=10)) + + result = getattr(ser.rolling('3D', closed=closed), func)() + expected = pd.Series(expected, index=ser.index) + tm.assert_series_equal(result, expected) + + def test_closed_uneven(self): + # see gh-21704 + ser = pd.Series(data=np.arange(10), + index=pd.date_range('2000', periods=10)) + + # uneven + ser = ser.drop(index=ser.index[[1, 5]]) + result = ser.rolling('3D', closed='left').min() + expected = pd.Series([np.nan, 0, 0, 2, 3, 4, 6, 6], + index=ser.index) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("func,closed,expected", [ + ('min', 'right', [np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan, np.nan]), + ('min', 'both', [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, np.nan]), + ('min', 'neither', [np.nan, np.nan, 0, 1, 2, 3, 4, 5, np.nan, np.nan]), + ('min', 'left', [np.nan, np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan]), + ('max', 'right', [np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan, np.nan]), + ('max', 'both', [np.nan, 1, 2, 3, 4, 5, 6, 6, 6, np.nan]), + ('max', 'neither', [np.nan, np.nan, 1, 2, 3, 4, 5, 6, np.nan, np.nan]), + ('max', 'left', [np.nan, np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan]) + ]) + def test_closed_min_max_minp(self, func, closed, expected): + # see gh-21704 + ser = pd.Series(data=np.arange(10), + index=pd.date_range('2000', periods=10)) + ser[ser.index[-3:]] = np.nan + result = getattr(ser.rolling('3D', min_periods=2, closed=closed), + func)() + expected = pd.Series(expected, index=ser.index) + tm.assert_series_equal(result, expected) + @pytest.mark.parametrize('roller', ['1s', 1]) def tests_empty_df_rolling(self, roller): # GH 15819 Verifies that datetime and integer rolling windows can be
User reported that `df.rolling(to_offset('3D'), closed='left').max()` segfaults when df has a datetime index. The bug was in PR #19549. In that PR, in https://github.com/pandas-dev/pandas/blame/master/pandas/_libs/window.pyx#L1268 `i` is initialized to `endi[0]`, which is 0 when `closed=left`. So in the next line when it tries to set `output[i-1]` it goes out of bounds. In addition, there are 2 more bugs in the `roll_min_max` code. The second bug is that for variable size windows, the `nobs` is never updated when elements leave the window. The third bug is at the end of the fixed window where all output elements up to `minp` are initialized to 0 if the input is not float. This PR fixes all three of the aforementioned bugs, at the cost of casting the output array to floating point even if the input is integer. This is less than ideal if the output has no NaNs, but is still consistent with roll_sum behavior. - [x] closes #21704 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21853
2018-07-11T03:16:01Z
2018-07-20T15:58:35Z
2018-07-20T15:58:35Z
2018-09-04T23:49:30Z
ENH: support NaT values into datetime series for interpolation (#11701)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index d7feb6e547b22..df31c609d50bb 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -182,6 +182,8 @@ Other Enhancements - :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`) - :class:`Series` and :class:`DataFrame` now support :class:`Iterable` in constructor (:issue:`2193`) - :class:`DatetimeIndex` gained :attr:`DatetimeIndex.timetz` attribute. Returns local time with timezone information. (:issue:`21358`) +- Implement interpolating ``NaT`` values in ``datetime`` series (:issue:`11701`) +- .. _whatsnew_0240.api_breaking: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 9bdf34113ccf0..e0be913199a92 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -6316,6 +6316,14 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False, raise NotImplementedError("Interpolation with NaNs in the index " "has not been implemented. Try filling " "those NaNs before interpolating.") + is_datetime = False + datetime_timezone = None + if is_datetime64_any_dtype(_maybe_transposed_self): + _datetime_nat_values = _maybe_transposed_self.isnull() + datetime_timezone = _maybe_transposed_self.dt.tz + _maybe_transposed_self = _maybe_transposed_self.astype('int') + _maybe_transposed_self[_datetime_nat_values] = np.nan + is_datetime = True data = _maybe_transposed_self._data new_data = data.interpolate(method=method, axis=ax, index=index, values=_maybe_transposed_self, limit=limit, @@ -6324,6 +6332,11 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False, inplace=inplace, downcast=downcast, **kwargs) + if is_datetime: + new_data = self._constructor(new_data) + new_data = pd.to_datetime(new_data, utc=True).dt.tz_convert( + datetime_timezone) + if inplace: if axis == 1: new_data = self._constructor(new_data).T._data diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index ab3fdd8cbf84f..21a2a850549d1 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -1317,3 +1317,16 @@ def test_series_interpolate_intraday(self): result = ts.reindex(new_index).interpolate(method='time') tm.assert_numpy_array_equal(result.values, exp.values) + + @pytest.mark.parametrize("inplace", [True, False]) + def test_series_interpolate_nat(self, tz_naive_fixture, inplace): + # GH 11701 + expected = pd.Series(pd.date_range('2015-01-01', + '2015-01-30', tz=tz_naive_fixture)) + result = expected.copy() + result[[3, 4, 5, 13, 14, 15]] = pd.NaT + if inplace: + result.interpolate(inplace=inplace) + else: + result = result.interpolate() + tm.assert_series_equal(result, expected)
- [ ] closes #11701 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21851
2018-07-11T01:24:29Z
2018-11-23T03:30:48Z
null
2018-11-23T03:30:48Z
BUG: groupby with no non-empty groups, #21624
diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 65b9144c0ddc9..f54539a4945b8 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -235,7 +235,7 @@ def size(self): if ngroup: out = np.bincount(ids[ids != -1], minlength=ngroup) else: - out = ids + out = [] return Series(out, index=self.result_index, dtype='int64') diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 7fccf1f57a886..758f215c00fa5 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -782,3 +782,16 @@ def test_any_all_np_func(func): res = df.groupby('key')['val'].transform(func) tm.assert_series_equal(res, exp) + + +@pytest.mark.parametrize("input_df, expected", [ + (DataFrame({'groups': [np.nan, np.nan, np.nan], 'values': [1, 2, 3]}), + Series([np.nan, np.nan, np.nan], name='values')), + (DataFrame({'groups': [np.nan, 'A', 'A', 'B', 'B'], 'values': range(5)}), + Series([np.nan, 3, 3, 7, 7], name='values')) +]) +def test_transform_with_all_nan(input_df, expected): + # GH 21624 + grouped = input_df.groupby('groups') + result = grouped['values'].transform('sum') + tm.assert_series_equal(result, expected)
Should close #21624. Didn't see any issues running test_fast.sh on my laptop so I don't think it broke anything in the process. The problem here is that when there is only a null group, then `self.result_index` is empty, but `ids` is just a list of -1s, so a ValueError is raised when trying to create the series.
https://api.github.com/repos/pandas-dev/pandas/pulls/21849
2018-07-11T00:42:53Z
2018-11-23T03:41:48Z
null
2018-11-23T03:41:49Z
Propagate name in more cases when using `to_datetime`
diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index c8204faa55cf8..f89c894853ca6 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -98,13 +98,13 @@ def _convert_and_box_cache(arg, cache_array, box, errors, name=None): result = Series(arg).map(cache_array) if box: if errors == 'ignore': - return Index(result) + return Index(result, name=name) else: return DatetimeIndex(result, name=name) return result.values -def _return_parsed_timezone_results(result, timezones, box, tz): +def _return_parsed_timezone_results(result, timezones, tz): """ Return results from array_strptime if a %z or %Z directive was passed. @@ -114,28 +114,21 @@ def _return_parsed_timezone_results(result, timezones, box, tz): int64 date representations of the dates timezones : ndarray pytz timezone objects - box : boolean - True boxes result as an Index-like, False returns an ndarray tz : object None or pytz timezone object + Returns ------- tz_result : ndarray of parsed dates with timezone - Returns: - - - Index-like if box=True - - ndarray of Timestamps if box=False - """ if tz is not None: raise ValueError("Cannot pass a tz argument when " "parsing strings with timezone " "information.") - tz_results = np.array([Timestamp(res).tz_localize(zone) for res, zone - in zip(result, timezones)]) - if box: - from pandas import Index - return Index(tz_results) + tz_results = np.array([ + Timestamp(res).tz_localize(zone) + for res, zone in zip(result, timezones) + ]) return tz_results @@ -208,9 +201,9 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None, if box: if errors == 'ignore': from pandas import Index - return Index(result) - - return DatetimeIndex(result, tz=tz, name=name) + return Index(result, name=name) + else: + return DatetimeIndex(result, tz=tz, name=name) return result elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a string, datetime, list, tuple, ' @@ -250,8 +243,13 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None, result, timezones = array_strptime( arg, format, exact=exact, errors=errors) if '%Z' in format or '%z' in format: - return _return_parsed_timezone_results( - result, timezones, box, tz) + tz_results = _return_parsed_timezone_results( + result, timezones, tz) + if box: + from pandas import Index + return Index(tz_results, name=name) + else: + return tz_results except tslibs.OutOfBoundsDatetime: if errors == 'raise': raise
Closes #21697 Haven't ran tests yet
https://api.github.com/repos/pandas-dev/pandas/pulls/21848
2018-07-10T23:45:03Z
2018-09-25T16:46:55Z
null
2018-09-25T16:46:55Z
PR: Allow diff & shift to take lists of periods? #19298
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 1c8b1ccd0fe8d..a1d48454b720a 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -5823,7 +5823,7 @@ def melt(self, id_vars=None, value_vars=None, var_name=None, # ---------------------------------------------------------------------- # Time series-related - def diff(self, periods=1, axis=0): + def diff(self, periods=1, axis=0, suffix=None): """ First discrete difference of element. @@ -5911,8 +5911,27 @@ def diff(self, periods=1, axis=0): 5 NaN NaN NaN """ bm_axis = self._get_block_manager_axis(axis) - new_data = self._data.diff(n=periods, axis=bm_axis) - return self._constructor(new_data) + + def _diff(x): + new_data = self._data.diff(n=x, axis=bm_axis) + return self._constructor(new_data) + + if isinstance(periods, int): + return _diff(periods) + elif isinstance(periods, list): + if axis == 1: + raise ValueError('cannot do multi period diffs with axis == 1') + if len(periods) == 0: + raise ValueError('Must provide non empty list to periods') + else: + result = _diff(periods[0]).add_suffix('{}{}'.format(suffix, periods[0])) # this needs to be smarter + for period in periods[1:]: + next_addition = _diff(period).add_suffix('{}{}'.format(suffix, period)) + result = result.join(next_addition) + return result + else: + raise TypeError('`periods` must be an integer or a list') + # ---------------------------------------------------------------------- # Function application diff --git a/pandas/core/series.py b/pandas/core/series.py index 96f9f6c87f969..555d75d65da1f 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1942,7 +1942,7 @@ def cov(self, other, min_periods=None): return nanops.nancov(this.values, other.values, min_periods=min_periods) - def diff(self, periods=1): + def diff(self, periods=1, suffix=None): """ First discrete difference of element. @@ -2002,8 +2002,25 @@ def diff(self, periods=1): 5 NaN dtype: float64 """ - result = algorithms.diff(com._values_from_object(self), periods) - return self._constructor(result, index=self.index).__finalize__(self) + def _diff(x): + result = algorithms.diff(com._values_from_object(self), x) + return self._constructor(result, index=self.index).__finalize__(self) + + if isinstance(periods, int): + return _diff(periods) + elif isinstance(periods, list): + if len(periods) == 0: + raise ValueError('Must provide non empty list to periods') + else: + result = _diff(periods[0]).to_frame().add_suffix('{}{}'.format(suffix, periods[0])) + for period in periods[1:]: + next_addition = _diff(period).to_frame().add_suffix('{}{}'.format(suffix, period)) + result = result.join(next_addition) + return result + else: + raise TypeError('`periods` must be an integer or a list') + + def autocorr(self, lag=1): """
- [x] implement for Series.diff and DataFrame.diff - [ ] implement for Series.shift and DataFrame.shift - [ ] closes #19298 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21847
2018-07-10T23:42:35Z
2018-10-11T01:56:10Z
null
2018-10-11T01:56:10Z
Move constructor helpers to EAMixins
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 0d1c5241c5a93..d7dfa73c53d8d 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from datetime import timedelta +from datetime import datetime, timedelta import warnings import numpy as np @@ -22,6 +22,8 @@ _ensure_int64) from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.algorithms import checked_add_with_arr + from pandas.tseries.frequencies import to_offset, DateOffset from pandas.tseries.offsets import Tick @@ -281,6 +283,39 @@ def _add_offset(self, offset): return type(self)(result, freq='infer') + def _sub_datelike(self, other): + # subtract a datetime from myself, yielding a ndarray[timedelta64[ns]] + if isinstance(other, (DatetimeArrayMixin, np.ndarray)): + if isinstance(other, np.ndarray): + # if other is an ndarray, we assume it is datetime64-dtype + other = type(self)(other) + if not self._has_same_tz(other): + # require tz compat + raise TypeError("{cls} subtraction must have the same " + "timezones or no timezones" + .format(cls=type(self).__name__)) + result = self._sub_datelike_dti(other) + elif isinstance(other, (datetime, np.datetime64)): + assert other is not NaT + other = Timestamp(other) + if other is NaT: + return self - NaT + # require tz compat + elif not self._has_same_tz(other): + raise TypeError("Timestamp subtraction must have the same " + "timezones or no timezones") + else: + i8 = self.asi8 + result = checked_add_with_arr(i8, -other.value, + arr_mask=self._isnan) + result = self._maybe_mask_results(result, + fill_value=iNaT) + else: + raise TypeError("cannot subtract {cls} and {typ}" + .format(cls=type(self).__name__, + typ=type(other).__name__)) + return result.view('timedelta64[ns]') + def _add_delta(self, delta): """ Add a timedelta-like, DateOffset, or TimedeltaIndex-like object @@ -517,6 +552,47 @@ def to_pydatetime(self): """ return tslib.ints_to_pydatetime(self.asi8, tz=self.tz) + def normalize(self): + """ + Convert times to midnight. + + The time component of the date-time is converted to midnight i.e. + 00:00:00. This is useful in cases, when the time does not matter. + Length is unaltered. The timezones are unaffected. + + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on Datetime Array/Index. + + Returns + ------- + DatetimeArray, DatetimeIndex or Series + The same type as the original data. Series will have the same + name and index. DatetimeIndex will have the same name. + + See Also + -------- + floor : Floor the datetimes to the specified freq. + ceil : Ceil the datetimes to the specified freq. + round : Round the datetimes to the specified freq. + + Examples + -------- + >>> idx = pd.DatetimeIndex(start='2014-08-01 10:00', freq='H', + ... periods=3, tz='Asia/Calcutta') + >>> idx + DatetimeIndex(['2014-08-01 10:00:00+05:30', + '2014-08-01 11:00:00+05:30', + '2014-08-01 12:00:00+05:30'], + dtype='datetime64[ns, Asia/Calcutta]', freq='H') + >>> idx.normalize() + DatetimeIndex(['2014-08-01 00:00:00+05:30', + '2014-08-01 00:00:00+05:30', + '2014-08-01 00:00:00+05:30'], + dtype='datetime64[ns, Asia/Calcutta]', freq=None) + """ + new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) + return type(self)(new_values, freq='infer').tz_localize(self.tz) + # ----------------------------------------------------------------- # Properties - Vectorized Timestamp Properties/Methods diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 35baa3262d3dd..000775361061e 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -8,7 +8,7 @@ from pandas._libs.tslib import NaT, iNaT from pandas._libs.tslibs.period import ( Period, IncompatibleFrequency, DIFFERENT_FREQ_INDEX, - get_period_field_arr, period_asfreq_arr) + get_period_field_arr, period_asfreq_arr, _quarter_to_myear) from pandas._libs.tslibs import period as libperiod from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds from pandas._libs.tslibs.fields import isleapyear_arr @@ -19,6 +19,9 @@ from pandas.core.dtypes.common import ( is_integer_dtype, is_float_dtype, is_period_dtype) from pandas.core.dtypes.dtypes import PeriodDtype +from pandas.core.dtypes.generic import ABCSeries + +import pandas.core.common as com from pandas.tseries import frequencies from pandas.tseries.offsets import Tick, DateOffset @@ -157,6 +160,25 @@ def _from_ordinals(cls, values, freq=None): result._freq = Period._maybe_convert_freq(freq) return result + @classmethod + def _generate_range(cls, start, end, periods, freq, fields): + if freq is not None: + freq = Period._maybe_convert_freq(freq) + + field_count = len(fields) + if com._count_not_none(start, end) > 0: + if field_count > 0: + raise ValueError('Can either instantiate from fields ' + 'or endpoints, but not both') + subarr, freq = _get_ordinal_range(start, end, periods, freq) + elif field_count > 0: + subarr, freq = _range_from_fields(freq=freq, **fields) + else: + raise ValueError('Not enough parameters to construct ' + 'Period range') + + return subarr, freq + # -------------------------------------------------------------------- # Vectorized analogues of Period properties @@ -371,3 +393,102 @@ def _add_comparison_methods(cls): PeriodArrayMixin._add_comparison_methods() + + +# ------------------------------------------------------------------- +# Constructor Helpers + +def _get_ordinal_range(start, end, periods, freq, mult=1): + if com._count_not_none(start, end, periods) != 2: + raise ValueError('Of the three parameters: start, end, and periods, ' + 'exactly two must be specified') + + if freq is not None: + _, mult = frequencies.get_freq_code(freq) + + if start is not None: + start = Period(start, freq) + if end is not None: + end = Period(end, freq) + + is_start_per = isinstance(start, Period) + is_end_per = isinstance(end, Period) + + if is_start_per and is_end_per and start.freq != end.freq: + raise ValueError('start and end must have same freq') + if (start is NaT or end is NaT): + raise ValueError('start and end must not be NaT') + + if freq is None: + if is_start_per: + freq = start.freq + elif is_end_per: + freq = end.freq + else: # pragma: no cover + raise ValueError('Could not infer freq from start/end') + + if periods is not None: + periods = periods * mult + if start is None: + data = np.arange(end.ordinal - periods + mult, + end.ordinal + 1, mult, + dtype=np.int64) + else: + data = np.arange(start.ordinal, start.ordinal + periods, mult, + dtype=np.int64) + else: + data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) + + return data, freq + + +def _range_from_fields(year=None, month=None, quarter=None, day=None, + hour=None, minute=None, second=None, freq=None): + if hour is None: + hour = 0 + if minute is None: + minute = 0 + if second is None: + second = 0 + if day is None: + day = 1 + + ordinals = [] + + if quarter is not None: + if freq is None: + freq = 'Q' + base = frequencies.FreqGroup.FR_QTR + else: + base, mult = frequencies.get_freq_code(freq) + if base != frequencies.FreqGroup.FR_QTR: + raise AssertionError("base must equal FR_QTR") + + year, quarter = _make_field_arrays(year, quarter) + for y, q in compat.zip(year, quarter): + y, m = _quarter_to_myear(y, q, freq) + val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) + ordinals.append(val) + else: + base, mult = frequencies.get_freq_code(freq) + arrays = _make_field_arrays(year, month, day, hour, minute, second) + for y, mth, d, h, mn, s in compat.zip(*arrays): + ordinals.append(libperiod.period_ordinal( + y, mth, d, h, mn, s, 0, 0, base)) + + return np.array(ordinals, dtype=np.int64), freq + + +def _make_field_arrays(*fields): + length = None + for x in fields: + if isinstance(x, (list, np.ndarray, ABCSeries)): + if length is not None and len(x) != length: + raise ValueError('Mismatched Period array lengths') + elif length is None: + length = len(x) + + arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries)) + else np.repeat(x, length) for x in fields] + + return arrays diff --git a/pandas/core/arrays/timedelta.py b/pandas/core/arrays/timedelta.py index f093cadec5a38..dbd481aae4f37 100644 --- a/pandas/core/arrays/timedelta.py +++ b/pandas/core/arrays/timedelta.py @@ -3,7 +3,7 @@ import numpy as np -from pandas._libs import tslibs +from pandas._libs import tslibs, lib from pandas._libs.tslibs import Timedelta, NaT from pandas._libs.tslibs.fields import get_timedelta_field from pandas._libs.tslibs.timedeltas import array_to_timedelta64 @@ -15,6 +15,8 @@ from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna +import pandas.core.common as com + from pandas.tseries.offsets import Tick, DateOffset from pandas.tseries.frequencies import to_offset @@ -70,11 +72,27 @@ def _simple_new(cls, values, freq=None, **kwargs): result._freq = freq return result - def __new__(cls, values, freq=None): + def __new__(cls, values, freq=None, start=None, end=None, periods=None, + closed=None): if (freq is not None and not isinstance(freq, DateOffset) and freq != 'infer'): freq = to_offset(freq) + if periods is not None: + if lib.is_float(periods): + periods = int(periods) + elif not lib.is_integer(periods): + raise TypeError('`periods` must be a number, got {periods}' + .format(periods=periods)) + + if values is None: + if freq is None and com._any_none(periods, start, end): + raise ValueError('Must provide freq argument if no data is ' + 'supplied') + else: + return cls._generate(start, end, periods, freq, + closed=closed) + result = cls._simple_new(values, freq=freq) if freq == 'infer': inferred = result.inferred_freq @@ -83,6 +101,52 @@ def __new__(cls, values, freq=None): return result + @classmethod + def _generate(cls, start, end, periods, freq, closed=None, **kwargs): + # **kwargs are for compat with TimedeltaIndex, which includes `name` + if com._count_not_none(start, end, periods, freq) != 3: + raise ValueError('Of the four parameters: start, end, periods, ' + 'and freq, exactly three must be specified') + + if start is not None: + start = Timedelta(start) + + if end is not None: + end = Timedelta(end) + + left_closed = False + right_closed = False + + if start is None and end is None: + if closed is not None: + raise ValueError("Closed has to be None if not both of start" + "and end are defined") + + if closed is None: + left_closed = True + right_closed = True + elif closed == "left": + left_closed = True + elif closed == "right": + right_closed = True + else: + raise ValueError("Closed has to be either 'left', 'right' or None") + + if freq is not None: + index = _generate_regular_range(start, end, periods, freq) + index = cls._simple_new(index, freq=freq, **kwargs) + else: + index = np.linspace(start.value, end.value, periods).astype('i8') + # TODO: shouldn't we pass `name` here? (via **kwargs) + index = cls._simple_new(index, freq=freq) + + if not left_closed: + index = index[1:] + if not right_closed: + index = index[:-1] + + return index + # ---------------------------------------------------------------- # Arithmetic Methods @@ -173,6 +237,45 @@ def total_seconds(self): the return type is a Float64Index. When the calling object is a Series, the return type is Series of type `float64` whose index is the same as the original. + + See Also + -------- + datetime.timedelta.total_seconds : Standard library version + of this method. + TimedeltaIndex.components : Return a DataFrame with components of + each Timedelta. + + Examples + -------- + **Series** + + >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d')) + >>> s + 0 0 days + 1 1 days + 2 2 days + 3 3 days + 4 4 days + dtype: timedelta64[ns] + + >>> s.dt.total_seconds() + 0 0.0 + 1 86400.0 + 2 172800.0 + 3 259200.0 + 4 345600.0 + dtype: float64 + + **TimedeltaIndex** + + >>> idx = pd.to_timedelta(np.arange(5), unit='d') + >>> idx + TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], + dtype='timedelta64[ns]', freq=None) + + >>> idx.total_seconds() + Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0], + dtype='float64') """ return self._maybe_mask_results(1e-9 * self.asi8) @@ -198,3 +301,55 @@ def to_pytimedelta(self): nanoseconds = _field_accessor("nanoseconds", "nanoseconds", "\nNumber of nanoseconds (>= 0 and less " "than 1 microsecond) for each\nelement.\n") + + @property + def components(self): + """ + Return a dataframe of the components (days, hours, minutes, + seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas. + + Returns + ------- + a DataFrame + """ + from pandas import DataFrame + + columns = ['days', 'hours', 'minutes', 'seconds', + 'milliseconds', 'microseconds', 'nanoseconds'] + hasnans = self.hasnans + if hasnans: + def f(x): + if isna(x): + return [np.nan] * len(columns) + return x.components + else: + def f(x): + return x.components + + result = DataFrame([f(x) for x in self], columns=columns) + if not hasnans: + result = result.astype('int64') + return result + + +# --------------------------------------------------------------------- +# Constructor Helpers + +def _generate_regular_range(start, end, periods, offset): + stride = offset.nanos + if periods is None: + b = Timedelta(start).value + e = Timedelta(end).value + e += stride - e % stride + elif start is not None: + b = Timedelta(start).value + e = b + periods * stride + elif end is not None: + e = Timedelta(end).value + stride + b = e - periods * stride + else: + raise ValueError("at least 'start' or 'end' should be specified " + "if a 'period' is given.") + + data = np.arange(b, e, stride, dtype=np.int64) + return data diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index b8a89ac26c9d9..bc0185bfaaafe 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -31,7 +31,6 @@ from pandas.core.dtypes.missing import isna import pandas.core.dtypes.concat as _concat -from pandas.core.algorithms import checked_add_with_arr from pandas.core.arrays.datetimes import DatetimeArrayMixin from pandas.core.indexes.base import Index, _index_shared_docs @@ -786,38 +785,6 @@ def __setstate__(self, state): raise Exception("invalid pickle state") _unpickle_compat = __setstate__ - def _sub_datelike(self, other): - # subtract a datetime from myself, yielding a ndarray[timedelta64[ns]] - if isinstance(other, (DatetimeIndex, np.ndarray)): - # if other is an ndarray, we assume it is datetime64-dtype - other = DatetimeIndex(other) - # require tz compat - if not self._has_same_tz(other): - raise TypeError("{cls} subtraction must have the same " - "timezones or no timezones" - .format(cls=type(self).__name__)) - result = self._sub_datelike_dti(other) - elif isinstance(other, (datetime, np.datetime64)): - assert other is not tslibs.NaT - other = Timestamp(other) - if other is tslibs.NaT: - return self - tslibs.NaT - # require tz compat - elif not self._has_same_tz(other): - raise TypeError("Timestamp subtraction must have the same " - "timezones or no timezones") - else: - i8 = self.asi8 - result = checked_add_with_arr(i8, -other.value, - arr_mask=self._isnan) - result = self._maybe_mask_results(result, - fill_value=tslibs.iNaT) - else: - raise TypeError("cannot subtract {cls} and {typ}" - .format(cls=type(self).__name__, - typ=type(other).__name__)) - return result.view('timedelta64[ns]') - def _maybe_update_attributes(self, attrs): """ Update Index attributes (e.g. freq) depending on op """ freq = attrs.get('freq', None) @@ -1585,48 +1552,11 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): is_year_end = _wrap_field_accessor('is_year_end') is_leap_year = _wrap_field_accessor('is_leap_year') + @Appender(DatetimeArrayMixin.normalize.__doc__) def normalize(self): - """ - Convert times to midnight. - - The time component of the date-time is converted to midnight i.e. - 00:00:00. This is useful in cases, when the time does not matter. - Length is unaltered. The timezones are unaffected. - - This method is available on Series with datetime values under - the ``.dt`` accessor, and directly on DatetimeIndex. - - Returns - ------- - DatetimeIndex or Series - The same type as the original data. Series will have the same - name and index. DatetimeIndex will have the same name. - - See Also - -------- - floor : Floor the datetimes to the specified freq. - ceil : Ceil the datetimes to the specified freq. - round : Round the datetimes to the specified freq. - - Examples - -------- - >>> idx = pd.DatetimeIndex(start='2014-08-01 10:00', freq='H', - ... periods=3, tz='Asia/Calcutta') - >>> idx - DatetimeIndex(['2014-08-01 10:00:00+05:30', - '2014-08-01 11:00:00+05:30', - '2014-08-01 12:00:00+05:30'], - dtype='datetime64[ns, Asia/Calcutta]', freq='H') - >>> idx.normalize() - DatetimeIndex(['2014-08-01 00:00:00+05:30', - '2014-08-01 00:00:00+05:30', - '2014-08-01 00:00:00+05:30'], - dtype='datetime64[ns, Asia/Calcutta]', freq=None) - """ - new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) - return DatetimeIndex(new_values, - freq='infer', - name=self.name).tz_localize(self.tz) + result = DatetimeArrayMixin.normalize(self) + result.name = self.name + return result @Substitution(klass='DatetimeIndex') @Appender(_shared_docs['searchsorted']) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index a531a57eb031f..a8e0c7f1aaa6a 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -16,7 +16,6 @@ is_bool_dtype, pandas_dtype, _ensure_object) -from pandas.core.dtypes.generic import ABCSeries import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import get_freq_code as _gfc @@ -29,7 +28,7 @@ from pandas._libs import tslib, index as libindex from pandas._libs.tslibs.period import (Period, IncompatibleFrequency, DIFFERENT_FREQ_INDEX, - _validate_end_alias, _quarter_to_myear) + _validate_end_alias) from pandas._libs.tslibs import resolution, period from pandas.core.arrays.period import PeriodArrayMixin @@ -39,7 +38,6 @@ from pandas import compat from pandas.util._decorators import (Appender, Substitution, cache_readonly, deprecate_kwarg) -from pandas.compat import zip import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -266,25 +264,6 @@ def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None, def _engine(self): return self._engine_type(lambda: self, len(self)) - @classmethod - def _generate_range(cls, start, end, periods, freq, fields): - if freq is not None: - freq = Period._maybe_convert_freq(freq) - - field_count = len(fields) - if com._count_not_none(start, end) > 0: - if field_count > 0: - raise ValueError('Can either instantiate from fields ' - 'or endpoints, but not both') - subarr, freq = _get_ordinal_range(start, end, periods, freq) - elif field_count > 0: - subarr, freq = _range_from_fields(freq=freq, **fields) - else: - raise ValueError('Not enough parameters to construct ' - 'Period range') - - return subarr, freq - @classmethod def _simple_new(cls, values, name=None, freq=None, **kwargs): """ @@ -877,102 +856,6 @@ def tz_localize(self, tz, ambiguous='raise'): PeriodIndex._add_datetimelike_methods() -def _get_ordinal_range(start, end, periods, freq, mult=1): - if com._count_not_none(start, end, periods) != 2: - raise ValueError('Of the three parameters: start, end, and periods, ' - 'exactly two must be specified') - - if freq is not None: - _, mult = _gfc(freq) - - if start is not None: - start = Period(start, freq) - if end is not None: - end = Period(end, freq) - - is_start_per = isinstance(start, Period) - is_end_per = isinstance(end, Period) - - if is_start_per and is_end_per and start.freq != end.freq: - raise ValueError('start and end must have same freq') - if (start is tslib.NaT or end is tslib.NaT): - raise ValueError('start and end must not be NaT') - - if freq is None: - if is_start_per: - freq = start.freq - elif is_end_per: - freq = end.freq - else: # pragma: no cover - raise ValueError('Could not infer freq from start/end') - - if periods is not None: - periods = periods * mult - if start is None: - data = np.arange(end.ordinal - periods + mult, - end.ordinal + 1, mult, - dtype=np.int64) - else: - data = np.arange(start.ordinal, start.ordinal + periods, mult, - dtype=np.int64) - else: - data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) - - return data, freq - - -def _range_from_fields(year=None, month=None, quarter=None, day=None, - hour=None, minute=None, second=None, freq=None): - if hour is None: - hour = 0 - if minute is None: - minute = 0 - if second is None: - second = 0 - if day is None: - day = 1 - - ordinals = [] - - if quarter is not None: - if freq is None: - freq = 'Q' - base = frequencies.FreqGroup.FR_QTR - else: - base, mult = _gfc(freq) - if base != frequencies.FreqGroup.FR_QTR: - raise AssertionError("base must equal FR_QTR") - - year, quarter = _make_field_arrays(year, quarter) - for y, q in zip(year, quarter): - y, m = _quarter_to_myear(y, q, freq) - val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) - ordinals.append(val) - else: - base, mult = _gfc(freq) - arrays = _make_field_arrays(year, month, day, hour, minute, second) - for y, mth, d, h, mn, s in zip(*arrays): - ordinals.append(period.period_ordinal( - y, mth, d, h, mn, s, 0, 0, base)) - - return np.array(ordinals, dtype=np.int64), freq - - -def _make_field_arrays(*fields): - length = None - for x in fields: - if isinstance(x, (list, np.ndarray, ABCSeries)): - if length is not None and len(x) != length: - raise ValueError('Mismatched Period array lengths') - elif length is None: - length = len(x) - - arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries)) - else np.repeat(x, length) for x in fields] - - return arrays - - def pnow(freq=None): # deprecation, xref #13790 warnings.warn("pd.pnow() and pandas.core.indexes.period.pnow() " diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 3af825455caac..eb1171c45b1e5 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -1,7 +1,5 @@ """ implement the TimedeltaIndex """ -from datetime import timedelta - import numpy as np from pandas.core.dtypes.common import ( _TD_DTYPE, @@ -17,7 +15,8 @@ from pandas.core.dtypes.missing import isna from pandas.core.dtypes.generic import ABCSeries -from pandas.core.arrays.timedelta import TimedeltaArrayMixin +from pandas.core.arrays.timedelta import ( + TimedeltaArrayMixin, _is_convertible_to_td) from pandas.core.indexes.base import Index from pandas.core.indexes.numeric import Int64Index import pandas.compat as compat @@ -33,7 +32,7 @@ TimelikeOps, DatetimeIndexOpsMixin) from pandas.core.tools.timedeltas import ( to_timedelta, _coerce_scalar_to_timedelta_type) -from pandas.tseries.offsets import Tick, DateOffset +from pandas.tseries.offsets import DateOffset from pandas._libs import (lib, index as libindex, join as libjoin, Timedelta, NaT, iNaT) @@ -268,46 +267,11 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, @classmethod def _generate(cls, start, end, periods, name, freq, closed=None): - if com._count_not_none(start, end, periods, freq) != 3: - raise ValueError('Of the four parameters: start, end, periods, ' - 'and freq, exactly three must be specified') - - if start is not None: - start = Timedelta(start) - - if end is not None: - end = Timedelta(end) - - left_closed = False - right_closed = False - - if start is None and end is None: - if closed is not None: - raise ValueError("Closed has to be None if not both of start" - "and end are defined") - - if closed is None: - left_closed = True - right_closed = True - elif closed == "left": - left_closed = True - elif closed == "right": - right_closed = True - else: - raise ValueError("Closed has to be either 'left', 'right' or None") - - if freq is not None: - index = _generate_regular_range(start, end, periods, freq) - index = cls._simple_new(index, name=name, freq=freq) - else: - index = to_timedelta(np.linspace(start.value, end.value, periods)) - - if not left_closed: - index = index[1:] - if not right_closed: - index = index[:-1] - - return index + # TimedeltaArray gets `name` via **kwargs, so we need to explicitly + # override it if name is passed as a positional argument + return super(TimedeltaIndex, cls)._generate(start, end, + periods, freq, + name=name, closed=closed) @classmethod def _simple_new(cls, values, name=None, freq=None, **kwargs): @@ -383,90 +347,8 @@ def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs): microseconds = _wrap_field_accessor("microseconds") nanoseconds = _wrap_field_accessor("nanoseconds") - @property - def components(self): - """ - Return a dataframe of the components (days, hours, minutes, - seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas. - - Returns - ------- - a DataFrame - """ - from pandas import DataFrame - - columns = ['days', 'hours', 'minutes', 'seconds', - 'milliseconds', 'microseconds', 'nanoseconds'] - hasnans = self.hasnans - if hasnans: - def f(x): - if isna(x): - return [np.nan] * len(columns) - return x.components - else: - def f(x): - return x.components - - result = DataFrame([f(x) for x in self]) - result.columns = columns - if not hasnans: - result = result.astype('int64') - return result - + @Appender(TimedeltaArrayMixin.total_seconds.__doc__) def total_seconds(self): - """ - Return total duration of each element expressed in seconds. - - This method is available directly on TimedeltaIndex and on Series - containing timedelta values under the ``.dt`` namespace. - - Returns - ------- - seconds : Float64Index or Series - When the calling object is a TimedeltaIndex, the return type is a - Float64Index. When the calling object is a Series, the return type - is Series of type `float64` whose index is the same as the - original. - - See Also - -------- - datetime.timedelta.total_seconds : Standard library version - of this method. - TimedeltaIndex.components : Return a DataFrame with components of - each Timedelta. - - Examples - -------- - **Series** - - >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit='d')) - >>> s - 0 0 days - 1 1 days - 2 2 days - 3 3 days - 4 4 days - dtype: timedelta64[ns] - - >>> s.dt.total_seconds() - 0 0.0 - 1 86400.0 - 2 172800.0 - 3 259200.0 - 4 345600.0 - dtype: float64 - - **TimedeltaIndex** - - >>> idx = pd.to_timedelta(np.arange(5), unit='d') - >>> idx - TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], - dtype='timedelta64[ns]', freq=None) - - >>> idx.total_seconds() - Float64Index([0.0, 86400.0, 172800.0, 259200.00000000003, 345600.0], - dtype='float64') - """ result = TimedeltaArrayMixin.total_seconds(self) return Index(result, name=self.name) @@ -915,11 +797,6 @@ def _is_convertible_to_index(other): return False -def _is_convertible_to_td(key): - return isinstance(key, (Tick, timedelta, - np.timedelta64, compat.string_types)) - - def _to_m8(key): """ Timedelta-like => dt64 @@ -932,28 +809,6 @@ def _to_m8(key): return np.int64(key.value).view(_TD_DTYPE) -def _generate_regular_range(start, end, periods, offset): - stride = offset.nanos - if periods is None: - b = Timedelta(start).value - e = Timedelta(end).value - e += stride - e % stride - elif start is not None: - b = Timedelta(start).value - e = b + periods * stride - elif end is not None: - e = Timedelta(end).value + stride - b = e - periods * stride - else: - raise ValueError("at least 'start' or 'end' should be specified " - "if a 'period' is given.") - - data = np.arange(b, e, stride, dtype=np.int64) - data = TimedeltaIndex._simple_new(data, None) - - return data - - def timedelta_range(start=None, end=None, periods=None, freq=None, name=None, closed=None): """
Takes the place of #21843, porting constructor helpers. While this is in review I'm going to start porting tests in earnest.
https://api.github.com/repos/pandas-dev/pandas/pulls/21845
2018-07-10T20:19:09Z
2018-07-12T00:29:28Z
2018-07-12T00:29:28Z
2018-07-12T00:44:42Z
[CLN] cleanup import reverse-dependencies
diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index b3f40b3a2429c..141a5d2389db5 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -1,8 +1,11 @@ """ io on the clipboard """ -from pandas import compat, get_option, option_context, DataFrame -from pandas.compat import StringIO, PY2, PY3 import warnings +from pandas.compat import StringIO, PY2, PY3 + +from pandas.core.dtypes.generic import ABCDataFrame +from pandas import compat, get_option, option_context + def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover r""" @@ -131,7 +134,7 @@ def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover elif sep is not None: warnings.warn('to_clipboard with excel=False ignores the sep argument') - if isinstance(obj, DataFrame): + if isinstance(obj, ABCDataFrame): # str(df) has various unhelpful defaults, like truncation with option_context('display.max_colwidth', 999999): objstr = obj.to_string(**kwargs) diff --git a/pandas/io/excel.py b/pandas/io/excel.py index fa3a1bd74eda5..39131d390c69f 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -9,29 +9,32 @@ import os import abc import warnings -import numpy as np +from textwrap import fill from io import UnsupportedOperation +from distutils.version import LooseVersion + +import numpy as np + +import pandas._libs.json as json +from pandas.util._decorators import Appender, deprecate_kwarg +from pandas.errors import EmptyDataError + +import pandas.compat as compat +from pandas.compat import (map, zip, reduce, range, lrange, u, add_metaclass, + string_types, OrderedDict) from pandas.core.dtypes.common import ( is_integer, is_float, is_bool, is_list_like) +from pandas.core import config from pandas.core.frame import DataFrame + from pandas.io.parsers import TextParser -from pandas.errors import EmptyDataError from pandas.io.common import (_is_url, _urlopen, _validate_header_arg, get_filepath_or_buffer, _NA_VALUES, _stringify_path) -import pandas._libs.json as json -from pandas.compat import (map, zip, reduce, range, lrange, u, add_metaclass, - string_types, OrderedDict) -from pandas.core import config from pandas.io.formats.printing import pprint_thing -import pandas.compat as compat -from warnings import warn -from distutils.version import LooseVersion -from pandas.util._decorators import Appender, deprecate_kwarg -from textwrap import fill __all__ = ["read_excel", "ExcelWriter", "ExcelFile"] @@ -527,8 +530,8 @@ def _parse_excel(self, "is not implemented") if parse_dates is True and index_col is None: - warn("The 'parse_dates=True' keyword of read_excel was provided" - " without an 'index_col' keyword value.") + warnings.warn("The 'parse_dates=True' keyword of read_excel was " + "provided without an 'index_col' keyword value.") import xlrd from xlrd import (xldate, XL_CELL_DATE, diff --git a/pandas/io/formats/csvs.py b/pandas/io/formats/csvs.py index 60518f596e9af..0796888554a46 100644 --- a/pandas/io/formats/csvs.py +++ b/pandas/io/formats/csvs.py @@ -9,18 +9,20 @@ import csv as csvlib from zipfile import ZipFile + import numpy as np -from pandas.core.dtypes.missing import notna -from pandas.core.index import Index, MultiIndex +from pandas._libs import writers as libwriters + from pandas import compat -from pandas.compat import (StringIO, range, zip) +from pandas.compat import StringIO, range, zip + +from pandas.core.dtypes.missing import notna +from pandas.core.dtypes.generic import ( + ABCMultiIndex, ABCPeriodIndex, ABCDatetimeIndex, ABCIndexClass) from pandas.io.common import (_get_handle, UnicodeWriter, _expand_user, _stringify_path) -from pandas._libs import writers as libwriters -from pandas.core.indexes.datetimes import DatetimeIndex -from pandas.core.indexes.period import PeriodIndex class CSVFormatter(object): @@ -68,7 +70,7 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', self.date_format = date_format self.tupleize_cols = tupleize_cols - self.has_mi_columns = (isinstance(obj.columns, MultiIndex) and + self.has_mi_columns = (isinstance(obj.columns, ABCMultiIndex) and not self.tupleize_cols) # validate mi options @@ -78,7 +80,7 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', "columns") if cols is not None: - if isinstance(cols, Index): + if isinstance(cols, ABCIndexClass): cols = cols.to_native_types(na_rep=na_rep, float_format=float_format, date_format=date_format, @@ -90,7 +92,7 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', # update columns to include possible multiplicity of dupes # and make sure sure cols is just a list of labels cols = self.obj.columns - if isinstance(cols, Index): + if isinstance(cols, ABCIndexClass): cols = cols.to_native_types(na_rep=na_rep, float_format=float_format, date_format=date_format, @@ -111,8 +113,9 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', self.chunksize = int(chunksize) self.data_index = obj.index - if (isinstance(self.data_index, (DatetimeIndex, PeriodIndex)) and + if (isinstance(self.data_index, (ABCDatetimeIndex, ABCPeriodIndex)) and date_format is not None): + from pandas import Index self.data_index = Index([x.strftime(date_format) if notna(x) else '' for x in self.data_index]) @@ -197,7 +200,8 @@ def _save_header(self): header = self.header encoded_labels = [] - has_aliases = isinstance(header, (tuple, list, np.ndarray, Index)) + has_aliases = isinstance(header, (tuple, list, np.ndarray, + ABCIndexClass)) if not (has_aliases or self.header): return if has_aliases: @@ -214,7 +218,7 @@ def _save_header(self): # should write something for index label if index_label is not False: if index_label is None: - if isinstance(obj.index, MultiIndex): + if isinstance(obj.index, ABCMultiIndex): index_label = [] for i, name in enumerate(obj.index.names): if name is None: @@ -227,7 +231,7 @@ def _save_header(self): else: index_label = [index_label] elif not isinstance(index_label, - (list, tuple, np.ndarray, Index)): + (list, tuple, np.ndarray, ABCIndexClass)): # given a string for a DF with Index index_label = [index_label] diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index 76ffd41f93090..ec95ce7a970ad 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -8,12 +8,15 @@ import numpy as np from pandas.compat import reduce -from pandas.io.formats.css import CSSResolver, CSSWarning -from pandas.io.formats.printing import pprint_thing import pandas.core.common as com + from pandas.core.dtypes.common import is_float, is_scalar from pandas.core.dtypes import missing -from pandas import Index, MultiIndex, PeriodIndex +from pandas.core.dtypes.generic import ABCMultiIndex, ABCPeriodIndex +from pandas import Index + +from pandas.io.formats.css import CSSResolver, CSSWarning +from pandas.io.formats.printing import pprint_thing from pandas.io.formats.format import get_level_lengths @@ -414,7 +417,7 @@ def _format_header_mi(self): coloffset = 0 lnum = 0 - if self.index and isinstance(self.df.index, MultiIndex): + if self.index and isinstance(self.df.index, ABCMultiIndex): coloffset = len(self.df.index[0]) - 1 if self.merge_cells: @@ -449,7 +452,7 @@ def _format_header_regular(self): if self.index: coloffset = 1 - if isinstance(self.df.index, MultiIndex): + if isinstance(self.df.index, ABCMultiIndex): coloffset = len(self.df.index[0]) colnames = self.columns @@ -466,7 +469,7 @@ def _format_header_regular(self): header_style) def _format_header(self): - if isinstance(self.columns, MultiIndex): + if isinstance(self.columns, ABCMultiIndex): gen = self._format_header_mi() else: gen = self._format_header_regular() @@ -483,7 +486,7 @@ def _format_header(self): def _format_body(self): - if isinstance(self.df.index, MultiIndex): + if isinstance(self.df.index, ABCMultiIndex): return self._format_hierarchical_rows() else: return self._format_regular_rows() @@ -507,7 +510,7 @@ def _format_regular_rows(self): else: index_label = self.df.index.names[0] - if isinstance(self.columns, MultiIndex): + if isinstance(self.columns, ABCMultiIndex): self.rowcounter += 1 if index_label and self.header is not False: @@ -516,7 +519,7 @@ def _format_regular_rows(self): # write index_values index_values = self.df.index - if isinstance(self.df.index, PeriodIndex): + if isinstance(self.df.index, ABCPeriodIndex): index_values = self.df.index.to_timestamp() for idx, idxval in enumerate(index_values): @@ -548,7 +551,7 @@ def _format_hierarchical_rows(self): # with index names (blank if None) for # unambigous round-trip, unless not merging, # in which case the names all go on one row Issue #11328 - if isinstance(self.columns, MultiIndex) and self.merge_cells: + if isinstance(self.columns, ABCMultiIndex) and self.merge_cells: self.rowcounter += 1 # if index labels are not empty go ahead and dump diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 86a686783eaf3..5f97447d29cbc 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -12,7 +12,7 @@ import numpy as np from pandas._libs import lib -from pandas._libs.tslibs import iNaT, Timestamp, Timedelta +from pandas._libs.tslibs import NaT, iNaT, Timestamp, Timedelta from pandas._libs.tslib import format_array_from_datetime from pandas import compat @@ -33,20 +33,18 @@ is_datetime64_dtype, is_timedelta64_dtype, is_list_like) -from pandas.core.dtypes.generic import ABCSparseArray +from pandas.core.dtypes.generic import ABCSparseArray, ABCMultiIndex from pandas.core.base import PandasObject import pandas.core.common as com -from pandas.core.index import Index, MultiIndex, _ensure_index +from pandas.core.index import Index, _ensure_index from pandas.core.config import get_option, set_option from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import PeriodIndex from pandas.io.formats.terminal import get_terminal_size -from pandas.io.common import (_expand_user, _stringify_path) +from pandas.io.common import _expand_user, _stringify_path from pandas.io.formats.printing import adjoin, justify, pprint_thing -import pandas as pd - common_docstring = """ Parameters @@ -248,7 +246,7 @@ def _get_footer(self): def _get_formatted_index(self): index = self.tr_series.index - is_multi = isinstance(index, MultiIndex) + is_multi = isinstance(index, ABCMultiIndex) if is_multi: have_header = any(name for name in index.names) @@ -768,7 +766,7 @@ def _get_formatted_column_labels(self, frame): columns = frame.columns - if isinstance(columns, MultiIndex): + if isinstance(columns, ABCMultiIndex): fmt_columns = columns.format(sparsify=False, adjoin=False) fmt_columns = lzip(*fmt_columns) dtypes = self.frame.dtypes._values @@ -824,7 +822,7 @@ def _get_formatted_index(self, frame): fmt = self._get_formatter('__index__') - if isinstance(index, MultiIndex): + if isinstance(index, ABCMultiIndex): fmt_index = index.format(sparsify=self.sparsify, adjoin=False, names=show_index_names, formatter=fmt) else: @@ -850,7 +848,7 @@ def _get_formatted_index(self, frame): def _get_column_name_list(self): names = [] columns = self.frame.columns - if isinstance(columns, MultiIndex): + if isinstance(columns, ABCMultiIndex): names.extend('' if name is None else name for name in columns.names) else: @@ -937,7 +935,7 @@ def _format(x): if self.na_rep is not None and is_scalar(x) and isna(x): if x is None: return 'None' - elif x is pd.NaT: + elif x is NaT: return 'NaT' return self.na_rep elif isinstance(x, PandasObject): @@ -1415,7 +1413,7 @@ def _cond(values): def _has_names(index): - if isinstance(index, MultiIndex): + if isinstance(index, ABCMultiIndex): return com._any_not_none(*index.names) else: return index.name is not None diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index a43c55a220292..20be903f54967 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -8,12 +8,14 @@ from textwrap import dedent -import pandas.core.common as com -from pandas.core.index import MultiIndex from pandas import compat from pandas.compat import (lzip, range, map, zip, u, OrderedDict, unichr) + +import pandas.core.common as com +from pandas.core.dtypes.generic import ABCMultiIndex from pandas.core.config import get_option + from pandas.io.formats.printing import pprint_thing from pandas.io.formats.format import (get_level_lengths, buffer_put_lines) @@ -117,7 +119,7 @@ def write_style(self): ('tbody tr th', 'vertical-align', 'top')] - if isinstance(self.columns, MultiIndex): + if isinstance(self.columns, ABCMultiIndex): element_props.append(('thead tr th', 'text-align', 'left')) @@ -205,7 +207,7 @@ def _column_header(): else: row = [] - if isinstance(self.columns, MultiIndex): + if isinstance(self.columns, ABCMultiIndex): if self.fmt.has_column_names and self.fmt.index: row.append(single_column_table(self.columns.names)) else: @@ -224,7 +226,7 @@ def _column_header(): indent += self.indent_delta - if isinstance(self.columns, MultiIndex): + if isinstance(self.columns, ABCMultiIndex): template = 'colspan="{span:d}" halign="left"' if self.fmt.sparsify: @@ -337,7 +339,7 @@ def _write_body(self, indent): # write values if self.fmt.index: - if isinstance(self.frame.index, MultiIndex): + if isinstance(self.frame.index, ABCMultiIndex): self._write_hierarchical_rows(fmt_values, indent) else: self._write_regular_rows(fmt_values, indent) diff --git a/pandas/io/formats/latex.py b/pandas/io/formats/latex.py index 666f124e7d544..fbbad763dd97b 100644 --- a/pandas/io/formats/latex.py +++ b/pandas/io/formats/latex.py @@ -2,14 +2,16 @@ """ Module for formatting output data in Latex. """ - from __future__ import print_function -from pandas.core.index import MultiIndex +import numpy as np + from pandas import compat from pandas.compat import range, map, zip, u + +from pandas.core.dtypes.generic import ABCMultiIndex + from pandas.io.formats.format import TableFormatter -import numpy as np class LatexFormatter(TableFormatter): @@ -63,7 +65,7 @@ def get_col_type(dtype): return 'l' # reestablish the MultiIndex that has been joined by _to_str_column - if self.fmt.index and isinstance(self.frame.index, MultiIndex): + if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex): out = self.frame.index.format( adjoin=False, sparsify=self.fmt.sparsify, names=self.fmt.has_index_names, na_rep=self.fmt.na_rep diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index bb31e8927cba3..35e244bf2f9eb 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -11,6 +11,16 @@ import itertools import warnings import os +from distutils.version import LooseVersion + +import numpy as np + +from pandas._libs import algos, lib, writers as libwriters +from pandas._libs.tslibs import timezones + +from pandas.errors import PerformanceWarning +from pandas import compat +from pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter from pandas.core.dtypes.common import ( is_list_like, @@ -23,17 +33,10 @@ _ensure_platform_int) from pandas.core.dtypes.missing import array_equivalent -import numpy as np -from pandas import (Series, DataFrame, Panel, Index, - MultiIndex, Int64Index, isna, concat, to_datetime, - SparseSeries, SparseDataFrame, PeriodIndex, - DatetimeIndex, TimedeltaIndex) from pandas.core import config -from pandas.io.common import _stringify_path +from pandas.core.config import get_option from pandas.core.sparse.array import BlockIndex, IntIndex from pandas.core.base import StringMixin -from pandas.io.formats.printing import adjoin, pprint_thing -from pandas.errors import PerformanceWarning import pandas.core.common as com from pandas.core.algorithms import match, unique from pandas.core.arrays.categorical import (Categorical, @@ -42,15 +45,15 @@ _block2d_to_blocknd, _factor_indexer, _block_shape) from pandas.core.index import _ensure_index -from pandas import compat -from pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter -from pandas.core.config import get_option from pandas.core.computation.pytables import Expr, maybe_expression -from pandas._libs import algos, lib, writers as libwriters -from pandas._libs.tslibs import timezones +from pandas.io.common import _stringify_path +from pandas.io.formats.printing import adjoin, pprint_thing -from distutils.version import LooseVersion +from pandas import (Series, DataFrame, Panel, Index, + MultiIndex, Int64Index, isna, concat, to_datetime, + SparseSeries, SparseDataFrame, PeriodIndex, + DatetimeIndex, TimedeltaIndex) # versioning attribute _version = '0.15.2' diff --git a/pandas/io/sas/sas7bdat.py b/pandas/io/sas/sas7bdat.py index 4d187a8282859..b2d930c1be5e7 100644 --- a/pandas/io/sas/sas7bdat.py +++ b/pandas/io/sas/sas7bdat.py @@ -13,16 +13,20 @@ Reference for binary data compression: http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm """ +from datetime import datetime +import struct + +import numpy as np -import pandas as pd from pandas import compat -from pandas.io.common import get_filepath_or_buffer, BaseIterator from pandas.errors import EmptyDataError -import numpy as np -import struct + +from pandas.io.common import get_filepath_or_buffer, BaseIterator import pandas.io.sas.sas_constants as const from pandas.io.sas._sas import Parser +import pandas as pd + class _subheader_pointer(object): pass @@ -169,7 +173,7 @@ def _get_properties(self): self.encoding or self.default_encoding) # Timestamp is epoch 01/01/1960 - epoch = pd.datetime(1960, 1, 1) + epoch = datetime(1960, 1, 1) x = self._read_float(const.date_created_offset + align1, const.date_created_length) self.date_created = epoch + pd.to_timedelta(x, unit='s') diff --git a/pandas/io/sas/sas_xport.py b/pandas/io/sas/sas_xport.py index cb01b7a652157..52b25898fc67e 100644 --- a/pandas/io/sas/sas_xport.py +++ b/pandas/io/sas/sas_xport.py @@ -9,13 +9,16 @@ """ from datetime import datetime -import pandas as pd -from pandas.io.common import get_filepath_or_buffer, BaseIterator -from pandas import compat import struct +import warnings + import numpy as np + from pandas.util._decorators import Appender -import warnings +from pandas import compat + +from pandas.io.common import get_filepath_or_buffer, BaseIterator +import pandas as pd _correct_line1 = ("HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!" "000000000000000000000000000000 ") diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 297a24fa3a149..4ce2ed4e36139 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -14,14 +14,15 @@ import struct import sys from collections import OrderedDict +import warnings import numpy as np from dateutil.relativedelta import relativedelta + from pandas._libs.lib import infer_dtype from pandas._libs.tslibs import NaT, Timestamp from pandas._libs.writers import max_len_string_array -import pandas as pd from pandas import compat, to_timedelta, to_datetime, isna, DatetimeIndex from pandas.compat import (lrange, lmap, lzip, text_type, string_types, range, zip, BytesIO) @@ -317,12 +318,12 @@ def convert_delta_safe(base, deltas, unit): ms = dates conv_dates = convert_delta_safe(base, ms, 'ms') elif fmt.startswith(("%tC", "tC")): - from warnings import warn - warn("Encountered %tC format. Leaving in Stata Internal Format.") + warnings.warn("Encountered %tC format. Leaving in Stata " + "Internal Format.") conv_dates = Series(dates, dtype=np.object) if has_bad_values: - conv_dates[bad_locs] = pd.NaT + conv_dates[bad_locs] = NaT return conv_dates # Delta days relative to base elif fmt.startswith(("%td", "td", "%d", "d")): @@ -425,8 +426,7 @@ def parse_dates_safe(dates, delta=False, year=False, days=False): d = parse_dates_safe(dates, delta=True) conv_dates = d.delta / 1000 elif fmt in ["%tC", "tC"]: - from warnings import warn - warn("Stata Internal Format tC not supported.") + warnings.warn("Stata Internal Format tC not supported.") conv_dates = dates elif fmt in ["%td", "td"]: d = parse_dates_safe(dates, delta=True) @@ -580,8 +580,6 @@ def _cast_to_stata_types(data): raise ValueError(msg.format(col, value, float64_max)) if ws: - import warnings - warnings.warn(ws, PossiblePrecisionLoss) return data @@ -627,7 +625,6 @@ def __init__(self, catarray): category = vl[1] if not isinstance(category, string_types): category = str(category) - import warnings warnings.warn(value_label_mismatch_doc.format(catarray.name), ValueLabelTypeMismatch) @@ -1425,7 +1422,6 @@ def _read_strls(self): @Appender(_data_method_doc) def data(self, **kwargs): - import warnings warnings.warn("'data' is deprecated, use 'read' instead") if self._data_read: @@ -2105,7 +2101,6 @@ def _check_column_names(self, data): del self._convert_dates[o] if converted_names: - import warnings conversion_warning = [] for orig_name, name in converted_names.items(): # need to possibly encode the orig name if its unicode diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 97e0d0b4608ae..beebf84b8a033 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -11,6 +11,9 @@ from matplotlib.ticker import Formatter, AutoLocator, Locator from matplotlib.transforms import nonsingular +from pandas._libs import tslibs +from pandas._libs.tslibs import resolution + from pandas.core.dtypes.common import ( is_float, is_integer, is_integer_dtype, @@ -23,13 +26,11 @@ from pandas.compat import lrange import pandas.compat as compat -from pandas._libs import tslibs import pandas.core.common as com from pandas.core.index import Index from pandas.core.indexes.datetimes import date_range import pandas.core.tools.datetimes as tools -from pandas._libs.tslibs import resolution import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import FreqGroup from pandas.core.indexes.period import Period, PeriodIndex diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 842da838b4b83..06020bdfd5d1d 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -9,10 +9,15 @@ import numpy as np -from pandas.util._decorators import cache_readonly +from pandas.util._decorators import cache_readonly, Appender +from pandas.compat import range, lrange, map, zip, string_types +import pandas.compat as compat + import pandas.core.common as com from pandas.core.base import PandasObject from pandas.core.config import get_option +from pandas.core.generic import _shared_docs, _shared_doc_kwargs + from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike from pandas.core.dtypes.common import ( is_list_like, @@ -20,16 +25,10 @@ is_number, is_hashable, is_iterator) -from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame +from pandas.core.dtypes.generic import ( + ABCSeries, ABCDataFrame, ABCPeriodIndex, ABCMultiIndex, ABCIndexClass) -from pandas.core.generic import _shared_docs, _shared_doc_kwargs -from pandas.core.index import Index, MultiIndex - -from pandas.core.indexes.period import PeriodIndex -from pandas.compat import range, lrange, map, zip, string_types -import pandas.compat as compat from pandas.io.formats.printing import pprint_thing -from pandas.util._decorators import Appender from pandas.plotting._compat import (_mpl_ge_1_3_1, _mpl_ge_1_5_0, @@ -170,7 +169,8 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=None, for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]): self.errors[kw] = self._parse_errorbars(kw, err) - if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)): + if not isinstance(secondary_y, (bool, tuple, list, + np.ndarray, ABCIndexClass)): secondary_y = [secondary_y] self.secondary_y = secondary_y @@ -484,7 +484,7 @@ def _apply_axis_properties(self, axis, rot=None, fontsize=None): @property def legend_title(self): - if not isinstance(self.data.columns, MultiIndex): + if not isinstance(self.data.columns, ABCMultiIndex): name = self.data.columns.name if name is not None: name = pprint_thing(name) @@ -566,7 +566,7 @@ def _get_xticks(self, convert_period=False): 'datetime64', 'time') if self.use_index: - if convert_period and isinstance(index, PeriodIndex): + if convert_period and isinstance(index, ABCPeriodIndex): self.data = self.data.reindex(index=index.sort_values()) x = self.data.index.to_timestamp()._mpl_repr() elif index.is_numeric(): @@ -596,7 +596,7 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): y = np.ma.array(y) y = np.ma.masked_where(mask, y) - if isinstance(x, Index): + if isinstance(x, ABCIndexClass): x = x._mpl_repr() if is_errorbar: @@ -615,7 +615,7 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): return ax.plot(*args, **kwds) def _get_index_name(self): - if isinstance(self.data.index, MultiIndex): + if isinstance(self.data.index, ABCMultiIndex): name = self.data.index.names if com._any_not_none(*name): name = ','.join(pprint_thing(x) for x in name) @@ -653,7 +653,8 @@ def on_right(self, i): if isinstance(self.secondary_y, bool): return self.secondary_y - if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)): + if isinstance(self.secondary_y, (tuple, list, + np.ndarray, ABCIndexClass)): return self.data.columns[i] in self.secondary_y def _apply_style_colors(self, colors, kwds, col_num, label): @@ -704,14 +705,12 @@ def _parse_errorbars(self, label, err): if err is None: return None - from pandas import DataFrame, Series - def match_labels(data, e): e = e.reindex(data.index) return e # key-matched DataFrame - if isinstance(err, DataFrame): + if isinstance(err, ABCDataFrame): err = match_labels(self.data, err) # key-matched dict @@ -719,7 +718,7 @@ def match_labels(data, e): pass # Series of error values - elif isinstance(err, Series): + elif isinstance(err, ABCSeries): # broadcast error series across data err = match_labels(self.data, err) err = np.atleast_2d(err) @@ -765,14 +764,13 @@ def match_labels(data, e): return err def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True): - from pandas import DataFrame errors = {} for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]): if flag: err = self.errors[kw] # user provided label-matched dataframe of errors - if isinstance(err, (DataFrame, dict)): + if isinstance(err, (ABCDataFrame, dict)): if label is not None and label in err.keys(): err = err[label] else: @@ -2196,9 +2194,8 @@ def boxplot(data, column=None, by=None, ax=None, fontsize=None, if return_type not in BoxPlot._valid_return_types: raise ValueError("return_type must be {'axes', 'dict', 'both'}") - from pandas import Series, DataFrame - if isinstance(data, Series): - data = DataFrame({'x': data}) + if isinstance(data, ABCSeries): + data = data.to_frame('x') column = 'x' def _get_colors(): @@ -2420,7 +2417,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, return axes if column is not None: - if not isinstance(column, (list, np.ndarray, Index)): + if not isinstance(column, (list, np.ndarray, ABCIndexClass)): column = [column] data = data[column] data = data._get_numeric_data() @@ -2658,7 +2655,6 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True, figsize=None, sharex=True, sharey=True, layout=None, rot=0, ax=None, **kwargs): - from pandas import DataFrame if figsize == 'default': # allowed to specify mpl default with 'default' @@ -2679,7 +2675,7 @@ def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True, for i, (key, group) in enumerate(grouped): ax = _axes[i] - if numeric_only and isinstance(group, DataFrame): + if numeric_only and isinstance(group, ABCDataFrame): group = group._get_numeric_data() plotf(group, ax, **kwargs) ax.set_title(pprint_thing(key)) diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index 21a03ea388566..0522d7e721b65 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -3,14 +3,16 @@ import functools import numpy as np - from matplotlib import pylab -from pandas.core.indexes.period import Period + +from pandas._libs.tslibs.period import Period + +from pandas.core.dtypes.generic import ( + ABCPeriodIndex, ABCDatetimeIndex, ABCTimedeltaIndex) + from pandas.tseries.offsets import DateOffset import pandas.tseries.frequencies as frequencies -from pandas.core.indexes.datetimes import DatetimeIndex -from pandas.core.indexes.period import PeriodIndex -from pandas.core.indexes.timedeltas import TimedeltaIndex + from pandas.io.formats.printing import pprint_thing import pandas.compat as compat @@ -69,7 +71,7 @@ def _maybe_resample(series, ax, kwargs): raise ValueError('Cannot use dynamic axis without frequency info') # Convert DatetimeIndex to PeriodIndex - if isinstance(series.index, DatetimeIndex): + if isinstance(series.index, ABCDatetimeIndex): series = series.to_period(freq=freq) if ax_freq is not None and freq != ax_freq: @@ -239,7 +241,7 @@ def _use_dynamic_x(ax, data): return False # hack this for 0.10.1, creating more technical debt...sigh - if isinstance(data.index, DatetimeIndex): + if isinstance(data.index, ABCDatetimeIndex): base = frequencies.get_freq(freq) x = data.index if (base <= frequencies.FreqGroup.FR_DAY): @@ -262,7 +264,7 @@ def _get_index_freq(data): def _maybe_convert_index(ax, data): # tsplot converts automatically, but don't want to convert index # over and over for DataFrames - if isinstance(data.index, DatetimeIndex): + if isinstance(data.index, ABCDatetimeIndex): freq = getattr(data.index, 'freq', None) if freq is None: @@ -320,7 +322,7 @@ def format_dateaxis(subplot, freq, index): # handle index specific formatting # Note: DatetimeIndex does not use this # interface. DatetimeIndex uses matplotlib.date directly - if isinstance(index, PeriodIndex): + if isinstance(index, ABCPeriodIndex): majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=False, @@ -343,7 +345,7 @@ def format_dateaxis(subplot, freq, index): # x and y coord info subplot.format_coord = functools.partial(_format_coord, freq) - elif isinstance(index, TimedeltaIndex): + elif isinstance(index, ABCTimedeltaIndex): subplot.xaxis.set_major_formatter( TimeSeries_TimedeltaFormatter()) else: diff --git a/pandas/plotting/_tools.py b/pandas/plotting/_tools.py index 816586fbb82f5..7618afd42010f 100644 --- a/pandas/plotting/_tools.py +++ b/pandas/plotting/_tools.py @@ -8,8 +8,7 @@ import numpy as np from pandas.core.dtypes.common import is_list_like -from pandas.core.dtypes.generic import ABCSeries -from pandas.core.index import Index +from pandas.core.dtypes.generic import ABCSeries, ABCIndexClass, ABCDataFrame from pandas.compat import range @@ -43,10 +42,9 @@ def table(ax, data, rowLabels=None, colLabels=None, **kwargs): ------- matplotlib table object """ - from pandas import DataFrame if isinstance(data, ABCSeries): - data = DataFrame(data, columns=[data.name]) - elif isinstance(data, DataFrame): + data = data.to_frame() + elif isinstance(data, ABCDataFrame): pass else: raise ValueError('Input data must be DataFrame or Series') @@ -341,7 +339,7 @@ def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey): def _flatten(axes): if not is_list_like(axes): return np.array([axes]) - elif isinstance(axes, (np.ndarray, Index)): + elif isinstance(axes, (np.ndarray, ABCIndexClass)): return axes.ravel() return np.array(axes)
Parts of the code are made more difficult to reason about by depending on `pandas.io.formats`, which has dependencies all over the code. This PR starts in on this by replacing `import Foo` with `import ABCFoo` where possible. Along the way it cleans up import order.
https://api.github.com/repos/pandas-dev/pandas/pulls/21844
2018-07-10T15:24:47Z
2018-07-11T00:09:43Z
2018-07-11T00:09:43Z
2018-07-11T01:19:27Z
[REF] Move constructor helpers to EA Mixin classes
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 65f34b847f8d0..00964fce49358 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -403,7 +403,7 @@ def _addsub_int_array(self, other, op): td = Timedelta(self.freq) return op(self, td * other) - # We should only get here with DatetimeIndex; dispatch + # We should only get here with Datetime Array/Index; dispatch # to _addsub_offset_array assert not is_timedelta64_dtype(self) return op(self, np.array(other) * self.freq) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 05bc3d23cfb8e..402c635a5a046 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -from datetime import timedelta +from datetime import datetime, timedelta import warnings import numpy as np @@ -22,6 +22,8 @@ _ensure_int64) from pandas.core.dtypes.dtypes import DatetimeTZDtype +from pandas.core.algorithms import checked_add_with_arr + from pandas.tseries.frequencies import to_offset, DateOffset from pandas.tseries.offsets import Tick @@ -250,8 +252,41 @@ def _assert_tzawareness_compat(self, other): # ----------------------------------------------------------------- # Arithmetic Methods + def _sub_datelike(self, other): + # subtract a datetime from myself, yielding a ndarray[timedelta64[ns]] + if isinstance(other, (DatetimeArrayMixin, np.ndarray)): + if isinstance(other, np.ndarray): + # if other is an ndarray, we assume it is datetime64-dtype + other = type(self)(other) + # require tz compat + if not self._has_same_tz(other): + raise TypeError("{cls} subtraction must have the same " + "timezones or no timezones" + .format(cls=type(self).__name__)) + result = self._sub_datelike_dti(other) + elif isinstance(other, (datetime, np.datetime64)): + assert other is not NaT + other = Timestamp(other) + if other is NaT: + return self - NaT + elif not self._has_same_tz(other): + # require tz compat + raise TypeError("Timestamp subtraction must have the same " + "timezones or no timezones") + else: + i8 = self.asi8 + result = checked_add_with_arr(i8, -other.value, + arr_mask=self._isnan) + result = self._maybe_mask_results(result, + fill_value=iNaT) + else: + raise TypeError("cannot subtract {cls} and {typ}" + .format(cls=type(self).__name__, + typ=type(other).__name__)) + return result.view('timedelta64[ns]') + def _sub_datelike_dti(self, other): - """subtraction of two DatetimeIndexes""" + """subtraction of two Datetime Arrays/Indexes""" if not len(self) == len(other): raise ValueError("cannot add indices of unequal length") @@ -517,6 +552,48 @@ def to_pydatetime(self): """ return tslib.ints_to_pydatetime(self.asi8, tz=self.tz) + def normalize(self): + """ + Convert times to midnight. + + The time component of the date-time is converted to midnight i.e. + 00:00:00. This is useful in cases, when the time does not matter. + Length is unaltered. The timezones are unaffected. + + This method is available on Series with datetime values under + the ``.dt`` accessor, and directly on DatetimeIndex. + + Returns + ------- + DatetimeArray, DatetimeIndex or Series + The same type as the original data. Series will have the same + name and index. DatetimeIndex will have the same name. + + See Also + -------- + floor : Floor the datetimes to the specified freq. + ceil : Ceil the datetimes to the specified freq. + round : Round the datetimes to the specified freq. + + Examples + -------- + >>> idx = pd.DatetimeIndex(start='2014-08-01 10:00', freq='H', + ... periods=3, tz='Asia/Calcutta') + >>> idx + DatetimeIndex(['2014-08-01 10:00:00+05:30', + '2014-08-01 11:00:00+05:30', + '2014-08-01 12:00:00+05:30'], + dtype='datetime64[ns, Asia/Calcutta]', freq='H') + >>> idx.normalize() + DatetimeIndex(['2014-08-01 00:00:00+05:30', + '2014-08-01 00:00:00+05:30', + '2014-08-01 00:00:00+05:30'], + dtype='datetime64[ns, Asia/Calcutta]', freq=None) + """ + new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) + return type(self)(new_values, + freq='infer').tz_localize(self.tz) + # ----------------------------------------------------------------- # Properties - Vectorized Timestamp Properties/Methods diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 35baa3262d3dd..2bab49913d2db 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -5,20 +5,25 @@ import numpy as np from pandas._libs import lib -from pandas._libs.tslib import NaT, iNaT from pandas._libs.tslibs.period import ( Period, IncompatibleFrequency, DIFFERENT_FREQ_INDEX, get_period_field_arr, period_asfreq_arr) -from pandas._libs.tslibs import period as libperiod -from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds +from pandas._libs.tslibs import ( + NaT, iNaT, + delta_to_nanoseconds, + period as libperiod) from pandas._libs.tslibs.fields import isleapyear_arr from pandas import compat +from pandas.compat import zip from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import ( is_integer_dtype, is_float_dtype, is_period_dtype) from pandas.core.dtypes.dtypes import PeriodDtype +from pandas.core.dtypes.generic import ABCSeries + +import pandas.core.common as com from pandas.tseries import frequencies from pandas.tseries.offsets import Tick, DateOffset @@ -157,6 +162,25 @@ def _from_ordinals(cls, values, freq=None): result._freq = Period._maybe_convert_freq(freq) return result + @classmethod + def _generate_range(cls, start, end, periods, freq, fields): + if freq is not None: + freq = Period._maybe_convert_freq(freq) + + field_count = len(fields) + if com._count_not_none(start, end) > 0: + if field_count > 0: + raise ValueError('Can either instantiate from fields ' + 'or endpoints, but not both') + subarr, freq = _get_ordinal_range(start, end, periods, freq) + elif field_count > 0: + subarr, freq = _range_from_fields(freq=freq, **fields) + else: + raise ValueError('Not enough parameters to construct ' + 'Period range') + + return subarr, freq + # -------------------------------------------------------------------- # Vectorized analogues of Period properties @@ -371,3 +395,102 @@ def _add_comparison_methods(cls): PeriodArrayMixin._add_comparison_methods() + + +# ----------------------------------------------------------------- +# Constructor Helpers + +def _get_ordinal_range(start, end, periods, freq, mult=1): + if com._count_not_none(start, end, periods) != 2: + raise ValueError('Of the three parameters: start, end, and periods, ' + 'exactly two must be specified') + + if freq is not None: + _, mult = frequencies.get_freq_code(freq) + + if start is not None: + start = Period(start, freq) + if end is not None: + end = Period(end, freq) + + is_start_per = isinstance(start, Period) + is_end_per = isinstance(end, Period) + + if is_start_per and is_end_per and start.freq != end.freq: + raise ValueError('start and end must have same freq') + if (start is NaT or end is NaT): + raise ValueError('start and end must not be NaT') + + if freq is None: + if is_start_per: + freq = start.freq + elif is_end_per: + freq = end.freq + else: # pragma: no cover + raise ValueError('Could not infer freq from start/end') + + if periods is not None: + periods = periods * mult + if start is None: + data = np.arange(end.ordinal - periods + mult, + end.ordinal + 1, mult, + dtype=np.int64) + else: + data = np.arange(start.ordinal, start.ordinal + periods, mult, + dtype=np.int64) + else: + data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) + + return data, freq + + +def _range_from_fields(year=None, month=None, quarter=None, day=None, + hour=None, minute=None, second=None, freq=None): + if hour is None: + hour = 0 + if minute is None: + minute = 0 + if second is None: + second = 0 + if day is None: + day = 1 + + ordinals = [] + + if quarter is not None: + if freq is None: + freq = 'Q' + base = frequencies.FreqGroup.FR_QTR + else: + base, mult = frequencies.get_freq_code(freq) + if base != frequencies.FreqGroup.FR_QTR: + raise AssertionError("base must equal FR_QTR") + + year, quarter = _make_field_arrays(year, quarter) + for y, q in zip(year, quarter): + y, m = libperiod._quarter_to_myear(y, q, freq) + val = libperiod.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) + ordinals.append(val) + else: + base, mult = frequencies.get_freq_code(freq) + arrays = _make_field_arrays(year, month, day, hour, minute, second) + for y, mth, d, h, mn, s in zip(*arrays): + ordinals.append(libperiod.period_ordinal( + y, mth, d, h, mn, s, 0, 0, base)) + + return np.array(ordinals, dtype=np.int64), freq + + +def _make_field_arrays(*fields): + length = None + for x in fields: + if isinstance(x, (list, np.ndarray, ABCSeries)): + if length is not None and len(x) != length: + raise ValueError('Mismatched Period array lengths') + elif length is None: + length = len(x) + + arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries)) + else np.repeat(x, length) for x in fields] + + return arrays diff --git a/pandas/core/arrays/timedelta.py b/pandas/core/arrays/timedelta.py index f093cadec5a38..58830253526aa 100644 --- a/pandas/core/arrays/timedelta.py +++ b/pandas/core/arrays/timedelta.py @@ -4,7 +4,7 @@ import numpy as np from pandas._libs import tslibs -from pandas._libs.tslibs import Timedelta, NaT +from pandas._libs.tslibs import Timestamp, Timedelta, NaT, iNaT from pandas._libs.tslibs.fields import get_timedelta_field from pandas._libs.tslibs.timedeltas import array_to_timedelta64 @@ -15,6 +15,8 @@ from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna +from pandas.core.algorithms import checked_add_with_arr + from pandas.tseries.offsets import Tick, DateOffset from pandas.tseries.frequencies import to_offset @@ -92,6 +94,24 @@ def _add_offset(self, other): .format(typ=type(other).__name__, cls=type(self).__name__)) + def _add_datelike(self, other): + # adding a timedeltaindex to a datetimelike + from .datetimes import DatetimeArrayMixin + if isinstance(other, (DatetimeArrayMixin, np.ndarray)): + # if other is an ndarray, we assume it is datetime64-dtype + # defer to implementation in DatetimeIndex + if isinstance(other, np.ndarray): + other = DatetimeArrayMixin(other) + return other + self + else: + assert other is not NaT + other = Timestamp(other) + i8 = self.asi8 + result = checked_add_with_arr(i8, other.value, + arr_mask=self._isnan) + result = self._maybe_mask_results(result, fill_value=iNaT) + return DatetimeArrayMixin(result) + def _sub_datelike(self, other): assert other is not NaT raise TypeError("cannot subtract a datelike from a {cls}" @@ -198,3 +218,33 @@ def to_pytimedelta(self): nanoseconds = _field_accessor("nanoseconds", "nanoseconds", "\nNumber of nanoseconds (>= 0 and less " "than 1 microsecond) for each\nelement.\n") + + @property + def components(self): + """ + Return a dataframe of the components (days, hours, minutes, + seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas. + + Returns + ------- + a DataFrame + """ + from pandas import DataFrame + + columns = ['days', 'hours', 'minutes', 'seconds', + 'milliseconds', 'microseconds', 'nanoseconds'] + hasnans = self.hasnans + if hasnans: + def f(x): + if isna(x): + return [np.nan] * len(columns) + return x.components + else: + def f(x): + return x.components + + result = DataFrame([f(x) for x in self]) + result.columns = columns + if not hasnans: + result = result.astype('int64') + return result diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 96c30eeb92628..62a1a3ae59ebc 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -32,7 +32,6 @@ from pandas.core.dtypes.missing import isna import pandas.core.dtypes.concat as _concat -from pandas.core.algorithms import checked_add_with_arr from pandas.core.arrays.datetimes import DatetimeArrayMixin from pandas.core.indexes.base import Index, _index_shared_docs @@ -782,38 +781,6 @@ def __setstate__(self, state): raise Exception("invalid pickle state") _unpickle_compat = __setstate__ - def _sub_datelike(self, other): - # subtract a datetime from myself, yielding a ndarray[timedelta64[ns]] - if isinstance(other, (DatetimeIndex, np.ndarray)): - # if other is an ndarray, we assume it is datetime64-dtype - other = DatetimeIndex(other) - # require tz compat - if not self._has_same_tz(other): - raise TypeError("{cls} subtraction must have the same " - "timezones or no timezones" - .format(cls=type(self).__name__)) - result = self._sub_datelike_dti(other) - elif isinstance(other, (datetime, np.datetime64)): - assert other is not tslibs.NaT - other = Timestamp(other) - if other is tslibs.NaT: - return self - tslibs.NaT - # require tz compat - elif not self._has_same_tz(other): - raise TypeError("Timestamp subtraction must have the same " - "timezones or no timezones") - else: - i8 = self.asi8 - result = checked_add_with_arr(i8, -other.value, - arr_mask=self._isnan) - result = self._maybe_mask_results(result, - fill_value=tslibs.iNaT) - else: - raise TypeError("cannot subtract {cls} and {typ}" - .format(cls=type(self).__name__, - typ=type(other).__name__)) - return result.view('timedelta64[ns]') - def _maybe_update_attributes(self, attrs): """ Update Index attributes (e.g. freq) depending on op """ freq = attrs.get('freq', None) @@ -1581,48 +1548,11 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): is_year_end = _wrap_field_accessor('is_year_end') is_leap_year = _wrap_field_accessor('is_leap_year') + @Appender(DatetimeArrayMixin.normalize.__doc__) def normalize(self): - """ - Convert times to midnight. - - The time component of the date-time is converted to midnight i.e. - 00:00:00. This is useful in cases, when the time does not matter. - Length is unaltered. The timezones are unaffected. - - This method is available on Series with datetime values under - the ``.dt`` accessor, and directly on DatetimeIndex. - - Returns - ------- - DatetimeIndex or Series - The same type as the original data. Series will have the same - name and index. DatetimeIndex will have the same name. - - See Also - -------- - floor : Floor the datetimes to the specified freq. - ceil : Ceil the datetimes to the specified freq. - round : Round the datetimes to the specified freq. - - Examples - -------- - >>> idx = pd.DatetimeIndex(start='2014-08-01 10:00', freq='H', - ... periods=3, tz='Asia/Calcutta') - >>> idx - DatetimeIndex(['2014-08-01 10:00:00+05:30', - '2014-08-01 11:00:00+05:30', - '2014-08-01 12:00:00+05:30'], - dtype='datetime64[ns, Asia/Calcutta]', freq='H') - >>> idx.normalize() - DatetimeIndex(['2014-08-01 00:00:00+05:30', - '2014-08-01 00:00:00+05:30', - '2014-08-01 00:00:00+05:30'], - dtype='datetime64[ns, Asia/Calcutta]', freq=None) - """ - new_values = conversion.normalize_i8_timestamps(self.asi8, self.tz) - return DatetimeIndex(new_values, - freq='infer', - name=self.name).tz_localize(self.tz) + res = DatetimeArrayMixin.normalize(self) + res.name = self.name + return res @Substitution(klass='DatetimeIndex') @Appender(_shared_docs['searchsorted']) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index a531a57eb031f..a8e0c7f1aaa6a 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -16,7 +16,6 @@ is_bool_dtype, pandas_dtype, _ensure_object) -from pandas.core.dtypes.generic import ABCSeries import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import get_freq_code as _gfc @@ -29,7 +28,7 @@ from pandas._libs import tslib, index as libindex from pandas._libs.tslibs.period import (Period, IncompatibleFrequency, DIFFERENT_FREQ_INDEX, - _validate_end_alias, _quarter_to_myear) + _validate_end_alias) from pandas._libs.tslibs import resolution, period from pandas.core.arrays.period import PeriodArrayMixin @@ -39,7 +38,6 @@ from pandas import compat from pandas.util._decorators import (Appender, Substitution, cache_readonly, deprecate_kwarg) -from pandas.compat import zip import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -266,25 +264,6 @@ def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None, def _engine(self): return self._engine_type(lambda: self, len(self)) - @classmethod - def _generate_range(cls, start, end, periods, freq, fields): - if freq is not None: - freq = Period._maybe_convert_freq(freq) - - field_count = len(fields) - if com._count_not_none(start, end) > 0: - if field_count > 0: - raise ValueError('Can either instantiate from fields ' - 'or endpoints, but not both') - subarr, freq = _get_ordinal_range(start, end, periods, freq) - elif field_count > 0: - subarr, freq = _range_from_fields(freq=freq, **fields) - else: - raise ValueError('Not enough parameters to construct ' - 'Period range') - - return subarr, freq - @classmethod def _simple_new(cls, values, name=None, freq=None, **kwargs): """ @@ -877,102 +856,6 @@ def tz_localize(self, tz, ambiguous='raise'): PeriodIndex._add_datetimelike_methods() -def _get_ordinal_range(start, end, periods, freq, mult=1): - if com._count_not_none(start, end, periods) != 2: - raise ValueError('Of the three parameters: start, end, and periods, ' - 'exactly two must be specified') - - if freq is not None: - _, mult = _gfc(freq) - - if start is not None: - start = Period(start, freq) - if end is not None: - end = Period(end, freq) - - is_start_per = isinstance(start, Period) - is_end_per = isinstance(end, Period) - - if is_start_per and is_end_per and start.freq != end.freq: - raise ValueError('start and end must have same freq') - if (start is tslib.NaT or end is tslib.NaT): - raise ValueError('start and end must not be NaT') - - if freq is None: - if is_start_per: - freq = start.freq - elif is_end_per: - freq = end.freq - else: # pragma: no cover - raise ValueError('Could not infer freq from start/end') - - if periods is not None: - periods = periods * mult - if start is None: - data = np.arange(end.ordinal - periods + mult, - end.ordinal + 1, mult, - dtype=np.int64) - else: - data = np.arange(start.ordinal, start.ordinal + periods, mult, - dtype=np.int64) - else: - data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) - - return data, freq - - -def _range_from_fields(year=None, month=None, quarter=None, day=None, - hour=None, minute=None, second=None, freq=None): - if hour is None: - hour = 0 - if minute is None: - minute = 0 - if second is None: - second = 0 - if day is None: - day = 1 - - ordinals = [] - - if quarter is not None: - if freq is None: - freq = 'Q' - base = frequencies.FreqGroup.FR_QTR - else: - base, mult = _gfc(freq) - if base != frequencies.FreqGroup.FR_QTR: - raise AssertionError("base must equal FR_QTR") - - year, quarter = _make_field_arrays(year, quarter) - for y, q in zip(year, quarter): - y, m = _quarter_to_myear(y, q, freq) - val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) - ordinals.append(val) - else: - base, mult = _gfc(freq) - arrays = _make_field_arrays(year, month, day, hour, minute, second) - for y, mth, d, h, mn, s in zip(*arrays): - ordinals.append(period.period_ordinal( - y, mth, d, h, mn, s, 0, 0, base)) - - return np.array(ordinals, dtype=np.int64), freq - - -def _make_field_arrays(*fields): - length = None - for x in fields: - if isinstance(x, (list, np.ndarray, ABCSeries)): - if length is not None and len(x) != length: - raise ValueError('Mismatched Period array lengths') - elif length is None: - length = len(x) - - arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries)) - else np.repeat(x, length) for x in fields] - - return arrays - - def pnow(freq=None): # deprecation, xref #13790 warnings.warn("pd.pnow() and pandas.core.indexes.period.pnow() " diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 3af825455caac..2708151d2c2bb 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -23,7 +23,6 @@ import pandas.compat as compat from pandas.tseries.frequencies import to_offset -from pandas.core.algorithms import checked_add_with_arr from pandas.core.base import _shared_docs from pandas.core.indexes.base import _index_shared_docs import pandas.core.common as com @@ -35,7 +34,7 @@ to_timedelta, _coerce_scalar_to_timedelta_type) from pandas.tseries.offsets import Tick, DateOffset from pandas._libs import (lib, index as libindex, - join as libjoin, Timedelta, NaT, iNaT) + join as libjoin, Timedelta, NaT) def _wrap_field_accessor(name): @@ -344,23 +343,6 @@ def _evaluate_with_timedelta_like(self, other, op): return NotImplemented return Index(result, name=self.name, copy=False) - def _add_datelike(self, other): - # adding a timedeltaindex to a datetimelike - from pandas import Timestamp, DatetimeIndex - if isinstance(other, (DatetimeIndex, np.ndarray)): - # if other is an ndarray, we assume it is datetime64-dtype - # defer to implementation in DatetimeIndex - other = DatetimeIndex(other) - return other + self - else: - assert other is not NaT - other = Timestamp(other) - i8 = self.asi8 - result = checked_add_with_arr(i8, other.value, - arr_mask=self._isnan) - result = self._maybe_mask_results(result, fill_value=iNaT) - return DatetimeIndex(result) - def _addsub_offset_array(self, other, op): # Add or subtract Array-like of DateOffset objects try: @@ -383,36 +365,6 @@ def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs): microseconds = _wrap_field_accessor("microseconds") nanoseconds = _wrap_field_accessor("nanoseconds") - @property - def components(self): - """ - Return a dataframe of the components (days, hours, minutes, - seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas. - - Returns - ------- - a DataFrame - """ - from pandas import DataFrame - - columns = ['days', 'hours', 'minutes', 'seconds', - 'milliseconds', 'microseconds', 'nanoseconds'] - hasnans = self.hasnans - if hasnans: - def f(x): - if isna(x): - return [np.nan] * len(columns) - return x.components - else: - def f(x): - return x.components - - result = DataFrame([f(x) for x in self]) - result.columns = columns - if not hasnans: - result = result.astype('int64') - return result - def total_seconds(self): """ Return total duration of each element expressed in seconds.
After this: - Tests - Comparisons - Tests - Deal with the fact that DatetimeIndex caches ranges, only feasible because of immutability - Tests! - formatting/`__repr__` methods - Teeeeeests - Double-check docstring accuracy
https://api.github.com/repos/pandas-dev/pandas/pulls/21843
2018-07-10T15:17:06Z
2018-07-10T17:41:40Z
null
2018-07-11T20:21:52Z
DOC: Mention both series and dataframe mirror methods
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 8da678e0adec0..6285b66cc21ee 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7827,17 +7827,18 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, Returns ------- - wh : same type as caller + Series or DataFrame Notes ----- The %(name)s method is an application of the if-then idiom. For each - element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the - element is used; otherwise the corresponding element from the DataFrame + element in the caller, if ``cond`` is ``%(cond)s`` the + element is used; otherwise the corresponding element from ``other`` is used. - The signature for :func:`DataFrame.where` differs from - :func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to + The signature for :func:`Series.where` or + :func:`DataFrame.where` differs from :func:`numpy.where`. + Roughly ``DataFrame.where(m, df2)`` is equivalent to ``np.where(m, df1, df2)``. For further details and examples see the ``%(name)s`` documentation in @@ -7846,7 +7847,9 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, See Also -------- :func:`DataFrame.%(name_other)s` : Return an object of same shape as - self + caller. + :func:`Series.%(name_other)s` : Return an object of same shape as + caller. Examples --------
- [x] closes #21825 Now the automatically-generated doc features both `Series` and `Dataframe` in the _See also:_ section.
https://api.github.com/repos/pandas-dev/pandas/pulls/21840
2018-07-10T12:04:13Z
2018-12-14T16:00:47Z
null
2018-12-14T16:00:47Z
ENH: Add Timedelta Support to JSON Reader with orient=table (#21140)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 3b04d9937d7f2..3404bcede4e54 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -179,6 +179,7 @@ Other Enhancements - :class:`IntervalIndex` has gained the :meth:`~IntervalIndex.set_closed` method to change the existing ``closed`` value (:issue:`21670`) - :func:`~DataFrame.to_csv` and :func:`~DataFrame.to_json` now support ``compression='infer'`` to infer compression based on filename (:issue:`15008`) - :func:`to_timedelta` now supports iso-formated timedelta strings (:issue:`21877`) +- :func:`read_json` now parses timedelta with `orient='table'` (:issue:`21140`) - :class:`Series` and :class:`DataFrame` now support :class:`Iterable` in constructor (:issue:`2193`) .. _whatsnew_0240.api_breaking: diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py index 2dc176648fb31..78ce7c7a00224 100644 --- a/pandas/io/json/table_schema.py +++ b/pandas/io/json/table_schema.py @@ -6,7 +6,7 @@ import warnings import pandas._libs.json as json -from pandas import DataFrame +from pandas import DataFrame, to_timedelta from pandas.api.types import CategoricalDtype import pandas.core.common as com from pandas.core.dtypes.common import ( @@ -163,7 +163,7 @@ def convert_json_field_to_pandas_type(field): elif typ == 'boolean': return 'bool' elif typ == 'duration': - return 'timedelta64' + return 'timedelta64[ns]' elif typ == 'datetime': if field.get('tz'): return 'datetime64[ns, {tz}]'.format(tz=field['tz']) @@ -306,10 +306,9 @@ def parse_table_schema(json, precise_float): raise NotImplementedError('table="orient" can not yet read timezone ' 'data') - # No ISO constructor for Timedelta as of yet, so need to raise - if 'timedelta64' in dtypes.values(): - raise NotImplementedError('table="orient" can not yet read ' - 'ISO-formatted Timedelta data') + for col, dtype in dtypes.items(): + if dtype == 'timedelta64[ns]': + df[col] = to_timedelta(df[col]) df = df.astype(dtypes) diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index b6483d0e978ba..10ce834b08873 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -390,7 +390,7 @@ def test_convert_pandas_type_to_json_field_categorical(self, kind, ({'type': 'integer'}, 'int64'), ({'type': 'number'}, 'float64'), ({'type': 'boolean'}, 'bool'), - ({'type': 'duration'}, 'timedelta64'), + ({'type': 'duration'}, 'timedelta64[ns]'), ({'type': 'datetime'}, 'datetime64[ns]'), ({'type': 'datetime', 'tz': 'US/Hawaii'}, 'datetime64[ns, US/Hawaii]'), ({'type': 'any'}, 'object'), @@ -499,6 +499,7 @@ class TestTableOrientReader(object): 'level_0']) @pytest.mark.parametrize("vals", [ {'ints': [1, 2, 3, 4]}, + {'timedeltas': pd.timedelta_range('1H', periods=4, freq='T')}, {'objects': ['a', 'b', 'c', 'd']}, {'date_ranges': pd.date_range('2016-01-01', freq='d', periods=4)}, {'categoricals': pd.Series(pd.Categorical(['a', 'b', 'c', 'c']))}, @@ -516,7 +517,6 @@ def test_read_json_table_orient(self, index_nm, vals, recwarn): @pytest.mark.parametrize("index_nm", [ None, "idx", "index"]) @pytest.mark.parametrize("vals", [ - {'timedeltas': pd.timedelta_range('1H', periods=4, freq='T')}, {'timezones': pd.date_range('2016-01-01', freq='d', periods=4, tz='US/Central')}]) def test_read_json_table_orient_raises(self, index_nm, vals, recwarn): @@ -530,7 +530,7 @@ def test_comprehensive(self): {'A': [1, 2, 3, 4], 'B': ['a', 'b', 'c', 'c'], 'C': pd.date_range('2016-01-01', freq='d', periods=4), - # 'D': pd.timedelta_range('1H', periods=4, freq='T'), + 'D': pd.timedelta_range('1H', periods=4, freq='T'), 'E': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'])), 'F': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'], ordered=True)),
- [ ] closes #21140 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/21827
2018-07-09T12:22:31Z
2018-11-23T03:49:57Z
null
2018-11-23T03:49:57Z
[CLN] cy cleanup, de-duplication
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 41047d9c25c22..e040fd1f52478 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -44,9 +44,9 @@ cdef inline bint is_definitely_invalid_key(object val): or PyList_Check(val) or hasattr(val, '_data')) -def get_value_at(ndarray arr, object loc): +cpdef get_value_at(ndarray arr, object loc, object tz=None): if arr.descr.type_num == NPY_DATETIME: - return Timestamp(util.get_value_at(arr, loc)) + return Timestamp(util.get_value_at(arr, loc), tz=tz) elif arr.descr.type_num == NPY_TIMEDELTA: return Timedelta(util.get_value_at(arr, loc)) return util.get_value_at(arr, loc) @@ -69,12 +69,7 @@ cpdef object get_value_box(ndarray arr, object loc): if i >= sz or sz == 0 or i < 0: raise IndexError('index out of bounds') - if arr.descr.type_num == NPY_DATETIME: - return Timestamp(util.get_value_1d(arr, i)) - elif arr.descr.type_num == NPY_TIMEDELTA: - return Timedelta(util.get_value_1d(arr, i)) - else: - return util.get_value_1d(arr, i) + return get_value_at(arr, i, tz=None) # Don't populate hash tables in monotonic indexes larger than this @@ -115,11 +110,7 @@ cdef class IndexEngine: if PySlice_Check(loc) or cnp.PyArray_Check(loc): return arr[loc] else: - if arr.descr.type_num == NPY_DATETIME: - return Timestamp(util.get_value_at(arr, loc), tz=tz) - elif arr.descr.type_num == NPY_TIMEDELTA: - return Timedelta(util.get_value_at(arr, loc)) - return util.get_value_at(arr, loc) + return get_value_at(arr, loc, tz=tz) cpdef set_value(self, ndarray arr, object key, object value): """ diff --git a/pandas/_libs/indexing.pyx b/pandas/_libs/indexing.pyx index fb707a3c3e5e2..c680706b7b2d2 100644 --- a/pandas/_libs/indexing.pyx +++ b/pandas/_libs/indexing.pyx @@ -1,10 +1,10 @@ # cython: profile=False cdef class _NDFrameIndexerBase: - ''' + """ A base class for _NDFrameIndexer for fast instantiation and attribute access. - ''' + """ cdef public object obj, name, _ndim def __init__(self, name, obj): diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index 0901d474d044c..12d35f7ce2f58 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -8,9 +8,7 @@ Cython implementations of functions resembling the stdlib calendar module cimport cython from cython cimport Py_ssize_t -cimport numpy as cnp from numpy cimport int64_t, int32_t -cnp.import_array() from locale import LC_TIME from strptime import LocaleTime diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 9964ca0847ce7..a3b7d6c59200c 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -556,6 +556,9 @@ cdef inline void localize_tso(_TSObject obj, tzinfo tz): elif treat_tz_as_dateutil(tz): dt64_to_dtstruct(obj.value + deltas[pos], &obj.dts) else: + # TODO: this case is never reached in the tests, but get_dst_info + # has a path that returns typ = None and empty deltas. + # --> Is this path possible? pass obj.tzinfo = tz @@ -1145,10 +1148,7 @@ cdef ndarray[int64_t] _normalize_local(ndarray[int64_t] stamps, object tz): # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - _pos = trans.searchsorted(stamps, side='right') - 1 - if _pos.dtype != np.int64: - _pos = _pos.astype(np.int64) - pos = _pos + pos = trans.searchsorted(stamps, side='right') - 1 # statictzinfo if typ not in ['pytz', 'dateutil']: diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index d89c06d43ccb9..4ed3e13dc0025 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -960,10 +960,7 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps, # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - _pos = trans.searchsorted(stamps, side='right') - 1 - if _pos.dtype != np.int64: - _pos = _pos.astype(np.int64) - pos = _pos + pos = trans.searchsorted(stamps, side='right') - 1 # statictzinfo if typ not in ['pytz', 'dateutil']: diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 8565857fa945f..cfb7e1dce9309 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -103,10 +103,7 @@ cdef _reso_local(ndarray[int64_t] stamps, object tz): # Adjust datetime64 timestamp, recompute datetimestruct trans, deltas, typ = get_dst_info(tz) - _pos = trans.searchsorted(stamps, side='right') - 1 - if _pos.dtype != np.int64: - _pos = _pos.astype(np.int64) - pos = _pos + pos = trans.searchsorted(stamps, side='right') - 1 # statictzinfo if typ not in ['pytz', 'dateutil']: diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 74fadbdb64763..b3fab83fef415 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -188,7 +188,7 @@ cdef object get_utc_trans_times_from_dateutil_tz(object tz): return new_trans -cpdef ndarray unbox_utcoffsets(object transinfo): +cpdef ndarray[int64_t, ndim=1] unbox_utcoffsets(object transinfo): cdef: Py_ssize_t i, sz ndarray[int64_t] arr @@ -216,6 +216,8 @@ cdef object get_dst_info(object tz): """ cache_key = tz_cache_key(tz) if cache_key is None: + # e.g. pytz.FixedOffset, matplotlib.dates._UTC, + # psycopg2.tz.FixedOffsetTimezone num = int(get_utcoffset(tz, None).total_seconds()) * 1000000000 return (np.array([NPY_NAT + 1], dtype=np.int64), np.array([num], dtype=np.int64),
Small cleanups, remove some unnecessary casting, add typing in timezones.
https://api.github.com/repos/pandas-dev/pandas/pulls/21826
2018-07-09T11:55:38Z
2018-07-11T00:03:59Z
2018-07-11T00:03:59Z
2018-07-11T01:20:33Z