title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
implement bits of numpy_helper in cython where possible
diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h index 6c2029fff8a1a..844be9b292be3 100644 --- a/pandas/_libs/src/numpy_helper.h +++ b/pandas/_libs/src/numpy_helper.h @@ -18,33 +18,6 @@ The full license is in the LICENSE file, distributed with this software. PANDAS_INLINE npy_int64 get_nat(void) { return NPY_MIN_INT64; } -PANDAS_INLINE int is_integer_object(PyObject* obj) { - return (!PyBool_Check(obj)) && PyArray_IsIntegerScalar(obj); -} - -PANDAS_INLINE int is_float_object(PyObject* obj) { - return (PyFloat_Check(obj) || PyArray_IsScalar(obj, Floating)); -} -PANDAS_INLINE int is_complex_object(PyObject* obj) { - return (PyComplex_Check(obj) || PyArray_IsScalar(obj, ComplexFloating)); -} - -PANDAS_INLINE int is_bool_object(PyObject* obj) { - return (PyBool_Check(obj) || PyArray_IsScalar(obj, Bool)); -} - -PANDAS_INLINE int is_string_object(PyObject* obj) { - return (PyString_Check(obj) || PyUnicode_Check(obj)); -} - -PANDAS_INLINE int is_datetime64_object(PyObject* obj) { - return PyArray_IsScalar(obj, Datetime); -} - -PANDAS_INLINE int is_timedelta64_object(PyObject* obj) { - return PyArray_IsScalar(obj, Timedelta); -} - PANDAS_INLINE int assign_value_1d(PyArrayObject* ap, Py_ssize_t _i, PyObject* v) { npy_intp i = (npy_intp)_i; @@ -80,17 +53,4 @@ void set_array_not_contiguous(PyArrayObject* ao) { ao->flags &= ~(NPY_C_CONTIGUOUS | NPY_F_CONTIGUOUS); } -// If arr is zerodim array, return a proper array scalar (e.g. np.int64). -// Otherwise, return arr as is. -PANDAS_INLINE PyObject* unbox_if_zerodim(PyObject* arr) { - if (PyArray_IsZeroDim(arr)) { - PyObject* ret; - ret = PyArray_ToScalar(PyArray_DATA(arr), arr); - return ret; - } else { - Py_INCREF(arr); - return arr; - } -} - #endif // PANDAS__LIBS_SRC_NUMPY_HELPER_H_ diff --git a/pandas/_libs/src/util.pxd b/pandas/_libs/src/util.pxd index be6591a118dc5..cf23df1279f34 100644 --- a/pandas/_libs/src/util.pxd +++ b/pandas/_libs/src/util.pxd @@ -1,24 +1,76 @@ -from numpy cimport ndarray +from numpy cimport ndarray, NPY_C_CONTIGUOUS, NPY_F_CONTIGUOUS cimport numpy as cnp +cnp.import_array() + cimport cpython +from cpython cimport PyTypeObject + +cdef extern from "Python.h": + # Note: importing extern-style allows us to declare these as nogil + # functions, whereas `from cpython cimport` does not. + bint PyUnicode_Check(object obj) nogil + bint PyString_Check(object obj) nogil + bint PyBool_Check(object obj) nogil + bint PyFloat_Check(object obj) nogil + bint PyComplex_Check(object obj) nogil + bint PyObject_TypeCheck(object obj, PyTypeObject* type) nogil + + +cdef extern from "numpy/arrayobject.h": + PyTypeObject PyFloatingArrType_Type + +cdef extern from "numpy/ndarrayobject.h": + PyTypeObject PyTimedeltaArrType_Type + PyTypeObject PyDatetimeArrType_Type + PyTypeObject PyComplexFloatingArrType_Type + PyTypeObject PyBoolArrType_Type + + bint PyArray_IsIntegerScalar(obj) nogil + bint PyArray_Check(obj) nogil + +# -------------------------------------------------------------------- +# Type Checking + +cdef inline bint is_string_object(object obj) nogil: + return PyString_Check(obj) or PyUnicode_Check(obj) + + +cdef inline bint is_integer_object(object obj) nogil: + return not PyBool_Check(obj) and PyArray_IsIntegerScalar(obj) + + +cdef inline bint is_float_object(object obj) nogil: + return (PyFloat_Check(obj) or + (PyObject_TypeCheck(obj, &PyFloatingArrType_Type))) + +cdef inline bint is_complex_object(object obj) nogil: + return (PyComplex_Check(obj) or + PyObject_TypeCheck(obj, &PyComplexFloatingArrType_Type)) + + +cdef inline bint is_bool_object(object obj) nogil: + return (PyBool_Check(obj) or + PyObject_TypeCheck(obj, &PyBoolArrType_Type)) + + +cdef inline bint is_timedelta64_object(object obj) nogil: + return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) + + +cdef inline bint is_datetime64_object(object obj) nogil: + return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) + +# -------------------------------------------------------------------- cdef extern from "numpy_helper.h": void set_array_not_contiguous(ndarray ao) - int is_integer_object(object) - int is_float_object(object) - int is_complex_object(object) - int is_bool_object(object) - int is_string_object(object) - int is_datetime64_object(object) - int is_timedelta64_object(object) int assign_value_1d(ndarray, Py_ssize_t, object) except -1 cnp.int64_t get_nat() object get_value_1d(ndarray, Py_ssize_t) char *get_c_string(object) except NULL object char_to_string(char*) - object unbox_if_zerodim(object arr) ctypedef fused numeric: cnp.int8_t @@ -112,3 +164,22 @@ cdef inline bint _checknan(object val): cdef inline bint is_period_object(object val): return getattr(val, '_typ', '_typ') == 'period' + + +cdef inline object unbox_if_zerodim(object arr): + """ + If arr is zerodim array, return a proper array scalar (e.g. np.int64). + Otherwise, return arr as is. + + Parameters + ---------- + arr : object + + Returns + ------- + result : object + """ + if cnp.PyArray_IsZeroDim(arr): + return cnp.PyArray_ToScalar(cnp.PyArray_DATA(arr), arr) + else: + return arr diff --git a/setup.py b/setup.py index 859d50303ecb1..a140221f943ea 100755 --- a/setup.py +++ b/setup.py @@ -686,8 +686,7 @@ def pxd(name): ext.sources[0] = root + suffix ujson_ext = Extension('pandas._libs.json', - depends=['pandas/_libs/src/ujson/lib/ultrajson.h', - 'pandas/_libs/src/numpy_helper.h'], + depends=['pandas/_libs/src/ujson/lib/ultrajson.h'], sources=(['pandas/_libs/src/ujson/python/ujson.c', 'pandas/_libs/src/ujson/python/objToJSON.c', 'pandas/_libs/src/ujson/python/JSONtoObj.c',
Like with the transition to tslibs.np_datetime, this implements pieces of numpy_helper.h directly in cython in util.pxd. The generated C should be equivalent to existing versions, but that is worth double-checking. One dependency is removed from setup.py that was missed in #19415, should have been deleted there.
https://api.github.com/repos/pandas-dev/pandas/pulls/19450
2018-01-30T00:33:23Z
2018-01-31T11:30:22Z
2018-01-31T11:30:22Z
2018-02-11T21:58:35Z
Continue de-nesting core.ops
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index ba8a15b60ba56..6ea4a81cb52a1 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -39,8 +39,7 @@ ABCSeries, ABCDataFrame, ABCIndex, - ABCPeriodIndex, - ABCSparseSeries) + ABCSparseSeries, ABCSparseArray) def _gen_eval_kwargs(name): @@ -445,8 +444,14 @@ def names(x): return new_methods -def add_methods(cls, new_methods, force): +def add_methods(cls, new_methods): for name, method in new_methods.items(): + # For most methods, if we find that the class already has a method + # of the same name, it is OK to over-write it. The exception is + # inplace methods (__iadd__, __isub__, ...) for SparseArray, which + # retain the np.ndarray versions. + force = not (issubclass(cls, ABCSparseArray) and + name.startswith('__i')) if force or name not in cls.__dict__: bind_method(cls, name, method) @@ -454,8 +459,7 @@ def add_methods(cls, new_methods, force): # ---------------------------------------------------------------------- # Arithmetic def add_special_arithmetic_methods(cls, arith_method=None, - comp_method=None, bool_method=None, - force=False): + comp_method=None, bool_method=None): """ Adds the full suite of special arithmetic methods (``__add__``, ``__sub__``, etc.) to the class. @@ -469,9 +473,6 @@ def add_special_arithmetic_methods(cls, arith_method=None, factory for rich comparison - signature: f(op, name, str_rep) bool_method : function (optional) factory for boolean methods - signature: f(op, name, str_rep) - force : bool, default False - if False, checks whether function is defined **on ``cls.__dict__``** - before defining if True, always defines functions on class base """ new_methods = _create_methods(cls, arith_method, comp_method, bool_method, special=True) @@ -512,12 +513,11 @@ def f(self, other): __ior__=_wrap_inplace_method(new_methods["__or__"]), __ixor__=_wrap_inplace_method(new_methods["__xor__"]))) - add_methods(cls, new_methods=new_methods, force=force) + add_methods(cls, new_methods=new_methods) def add_flex_arithmetic_methods(cls, flex_arith_method, - flex_comp_method=None, flex_bool_method=None, - force=False): + flex_comp_method=None, flex_bool_method=None): """ Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. @@ -529,9 +529,6 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, f(op, name, str_rep) flex_comp_method : function, optional, factory for rich comparison - signature: f(op, name, str_rep) - force : bool, default False - if False, checks whether function is defined **on ``cls.__dict__``** - before defining if True, always defines functions on class base """ new_methods = _create_methods(cls, flex_arith_method, flex_comp_method, flex_bool_method, @@ -544,7 +541,7 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, if k in new_methods: new_methods.pop(k) - add_methods(cls, new_methods=new_methods, force=force) + add_methods(cls, new_methods=new_methods) # ----------------------------------------------------------------------------- @@ -614,14 +611,11 @@ def na_op(x, y): result = np.empty(x.size, dtype=dtype) mask = notna(x) & notna(y) result[mask] = op(x[mask], com._values_from_object(y[mask])) - elif isinstance(x, np.ndarray): + else: + assert isinstance(x, np.ndarray) result = np.empty(len(x), dtype=x.dtype) mask = notna(x) result[mask] = op(x[mask], y) - else: - raise TypeError("{typ} cannot perform the operation " - "{op}".format(typ=type(x).__name__, - op=str_rep)) result, changed = maybe_upcast_putmask(result, ~mask, np.nan) @@ -658,6 +652,10 @@ def wrapper(left, right, name=name, na_op=na_op): index=left.index, name=res_name, dtype=result.dtype) + elif is_categorical_dtype(left): + raise TypeError("{typ} cannot perform the operation " + "{op}".format(typ=type(left).__name__, op=str_rep)) + lvalues = left.values rvalues = right if isinstance(rvalues, ABCSeries): @@ -745,8 +743,12 @@ def na_op(x, y): elif is_categorical_dtype(y) and not is_scalar(y): return op(y, x) - if is_object_dtype(x.dtype): + elif is_object_dtype(x.dtype): result = _comp_method_OBJECT_ARRAY(op, x, y) + + elif is_datetimelike_v_numeric(x, y): + raise TypeError("invalid type comparison") + else: # we want to compare like types @@ -754,15 +756,6 @@ def na_op(x, y): # we are not NotImplemented, otherwise # we would allow datetime64 (but viewed as i8) against # integer comparisons - if is_datetimelike_v_numeric(x, y): - raise TypeError("invalid type comparison") - - # numpy does not like comparisons vs None - if is_scalar(y) and isna(y): - if name == '__ne__': - return np.ones(len(x), dtype=bool) - else: - return np.zeros(len(x), dtype=bool) # we have a datetime/timedelta and may need to convert mask = None @@ -795,15 +788,18 @@ def wrapper(self, other, axis=None): if axis is not None: self._get_axis_number(axis) - if isinstance(other, ABCSeries): + if isinstance(other, ABCDataFrame): # pragma: no cover + # Defer to DataFrame implementation; fail early + return NotImplemented + + elif isinstance(other, ABCSeries): name = com._maybe_match_name(self, other) if not self._indexed_same(other): msg = 'Can only compare identically-labeled Series objects' raise ValueError(msg) - return self._constructor(na_op(self.values, other.values), - index=self.index, name=name) - elif isinstance(other, ABCDataFrame): # pragma: no cover - return NotImplemented + res_values = na_op(self.values, other.values) + return self._constructor(res_values, index=self.index, name=name) + elif isinstance(other, (np.ndarray, pd.Index)): # do not check length of zerodim array # as it will broadcast @@ -811,23 +807,25 @@ def wrapper(self, other, axis=None): len(self) != len(other)): raise ValueError('Lengths must match to compare') - if isinstance(other, ABCPeriodIndex): - # temp workaround until fixing GH 13637 - # tested in test_nat_comparisons - # (pandas.tests.series.test_operators.TestSeriesOperators) - return self._constructor(na_op(self.values, - other.astype(object).values), - index=self.index) - - return self._constructor(na_op(self.values, np.asarray(other)), + res_values = na_op(self.values, np.asarray(other)) + return self._constructor(res_values, index=self.index).__finalize__(self) - elif isinstance(other, pd.Categorical): - if not is_categorical_dtype(self): - msg = ("Cannot compare a Categorical for op {op} with Series " - "of dtype {typ}.\nIf you want to compare values, use " - "'series <op> np.asarray(other)'.") - raise TypeError(msg.format(op=op, typ=self.dtype)) + elif (isinstance(other, pd.Categorical) and + not is_categorical_dtype(self)): + raise TypeError("Cannot compare a Categorical for op {op} with " + "Series of dtype {typ}.\nIf you want to compare " + "values, use 'series <op> np.asarray(other)'." + .format(op=op, typ=self.dtype)) + + elif is_scalar(other) and isna(other): + # numpy does not like comparisons vs None + if op is operator.ne: + res_values = np.ones(len(self), dtype=bool) + else: + res_values = np.zeros(len(self), dtype=bool) + return self._constructor(res_values, index=self.index, + name=self.name, dtype='bool') if is_categorical_dtype(self): # cats are a special case as get_values() would return an ndarray, @@ -877,11 +875,10 @@ def na_op(x, y): y = _ensure_object(y) result = lib.vec_binop(x, y, op) else: + # let null fall thru + if not isna(y): + y = bool(y) try: - - # let null fall thru - if not isna(y): - y = bool(y) result = lib.scalar_binop(x, y, op) except: msg = ("cannot compare a dtyped [{dtype}] array " @@ -899,26 +896,31 @@ def wrapper(self, other): self, other = _align_method_SERIES(self, other, align_asobject=True) - if isinstance(other, ABCSeries): + if isinstance(other, ABCDataFrame): + # Defer to DataFrame implementation; fail early + return NotImplemented + + elif isinstance(other, ABCSeries): name = com._maybe_match_name(self, other) is_other_int_dtype = is_integer_dtype(other.dtype) other = fill_int(other) if is_other_int_dtype else fill_bool(other) filler = (fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool) - return filler(self._constructor(na_op(self.values, other.values), - index=self.index, name=name)) - elif isinstance(other, ABCDataFrame): - return NotImplemented + res_values = na_op(self.values, other.values) + unfilled = self._constructor(res_values, + index=self.index, name=name) + return filler(unfilled) else: # scalars, list, tuple, np.array filler = (fill_int if is_self_int_dtype and is_integer_dtype(np.asarray(other)) else fill_bool) - return filler(self._constructor( - na_op(self.values, other), - index=self.index)).__finalize__(self) + + res_values = na_op(self.values, other) + unfilled = self._constructor(res_values, index=self.index) + return filler(unfilled).__finalize__(self) return wrapper @@ -1023,21 +1025,23 @@ def na_op(x, y): mask = notna(xrav) & notna(yrav) xrav = xrav[mask] - # we may need to manually - # broadcast a 1 element array if yrav.shape != mask.shape: - yrav = np.empty(mask.shape, dtype=yrav.dtype) - yrav.fill(yrav.item()) + # FIXME: GH#5284, GH#5035, GH#19448 + # Without specifically raising here we get mismatched + # errors in Py3 (TypeError) vs Py2 (ValueError) + raise ValueError('Cannot broadcast operands together.') yrav = yrav[mask] - if np.prod(xrav.shape) and np.prod(yrav.shape): + if xrav.size: with np.errstate(all='ignore'): result[mask] = op(xrav, yrav) - elif hasattr(x, 'size'): + + elif isinstance(x, np.ndarray): + # mask is only meaningful for x result = np.empty(x.size, dtype=x.dtype) mask = notna(xrav) xrav = xrav[mask] - if np.prod(xrav.shape): + if xrav.size: with np.errstate(all='ignore'): result[mask] = op(xrav, y) else: diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 1c23527cf57c4..62a467bec2683 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -819,4 +819,4 @@ def from_coo(cls, A, dense_index=False): ops.add_special_arithmetic_methods(SparseSeries, ops._arith_method_SPARSE_SERIES, comp_method=ops._arith_method_SPARSE_SERIES, - bool_method=None, force=True) + bool_method=None)
- Move `isinstance(other, ABCDataFrame)` checks to consistently be the first thing checked in Series ops - Remove `force` kwarg, define it in the one place it is used. - Remove kludge for `PeriodIndex` - Handle categorical_dtype earlier in arith_method_SERIES, decreasing complexity of the closure. - Handle scalar na other earlier in _comp_method_SERIES, decreasing complexity of the closure. - Remove broken broadcasting case from _arith_method_FRAME (closes #19421)
https://api.github.com/repos/pandas-dev/pandas/pulls/19448
2018-01-29T18:27:20Z
2018-02-02T11:29:53Z
2018-02-02T11:29:53Z
2018-02-04T16:43:27Z
catch PerformanceWarning
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 38e5753d1752d..8feee6e6cff68 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -19,6 +19,7 @@ from pandas.core.indexes.timedeltas import Timedelta import pandas.core.nanops as nanops +from pandas.errors import PerformanceWarning from pandas.compat import range, zip from pandas import compat from pandas.util.testing import (assert_series_equal, assert_almost_equal, @@ -871,8 +872,9 @@ def test_timedelta64_operations_with_DateOffset(self): expected = Series([timedelta(minutes=4, seconds=3)] * 3) assert_series_equal(result, expected) - result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3), - pd.offsets.Hour(2)]) + with tm.assert_produces_warning(PerformanceWarning): + result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3), + pd.offsets.Hour(2)]) expected = Series([timedelta(minutes=6, seconds=3), timedelta( minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)]) assert_series_equal(result, expected)
- [x] closes #19409
https://api.github.com/repos/pandas-dev/pandas/pulls/19446
2018-01-29T17:16:36Z
2018-01-29T23:59:33Z
2018-01-29T23:59:33Z
2018-02-04T16:41:18Z
Simple solutions for users running into problems running large queries in pandas.read_gbq()
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index b452b0cf5ddd4..b5027adfa7a41 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -1,4 +1,5 @@ """ Google BigQuery support """ +import datetime def _try_import(): @@ -23,6 +24,8 @@ def _try_import(): def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=False, verbose=True, private_key=None, dialect='legacy', + allow_large_results=False, query_dataset='query_dataset', + query_tableid=None, **kwargs): r"""Load data from Google BigQuery. @@ -74,6 +77,17 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, see `BigQuery SQL Reference <https://cloud.google.com/bigquery/sql-reference/>`__ + allow_large_results : boolean (default False) + Allows large queries greater than quota limit - Use when a GenericGBQException + error is thrown with "Reason: responseTooLarge" + See: https://cloud.google.com/bigquery/docs/writing-results#large-results + + query_dataset : str (optional) + Google BigQuery dataset to which the results of a large query will + be saved + query_tableid : str (optional) + Google BigQuery tableid to which the results will be saved + `**kwargs` : Arbitrary keyword arguments configuration (dict): query config parameters for job processing. For example: @@ -90,6 +104,9 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, """ pandas_gbq = _try_import() + kwargs = update_gbq_kwargs_for_big_queries( + kwargs, project_id, allow_large_results, query_dataset, query_tableid + ) return pandas_gbq.read_gbq( query, project_id=project_id, index_col=index_col, col_order=col_order, @@ -106,3 +123,83 @@ def to_gbq(dataframe, destination_table, project_id, chunksize=10000, chunksize=chunksize, verbose=verbose, reauth=reauth, if_exists=if_exists, private_key=private_key) + + +def update_gbq_kwargs_for_big_queries(read_gbq_kwargs, project_id, + allow_large_results, query_dataset, + query_tableid, + write_disposition="WRITE_TRUNCATE"): + """ + + Parameters + ---------- + read_gbq_kwargs: dict + query config parameters for job processing passed to the read_gbq function + For example: + + configuration = {'query': {'useQueryCache': False}} + + For more information see `BigQuery SQL Reference + <https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query>`__ + project_id : str + Google BigQuery Account project ID. + allow_large_results : boolean (default False) + Allows large queries greater than quota limit - Use when a GenericGBQException + error is thrown with "Reason: responseTooLarge" + See: https://cloud.google.com/bigquery/docs/writing-results#large-results + intermediate_dataset : str + Google BigQuery dataset to which the results of a large query will + be saved + query_tableid : str + Google BigQuery tableid to which the results will be saved + write_disposition : str + Use "WRITE_TRUNCATE" to overwrite old intermediate BigQuery query data + + + + Returns + ------- + updated kwargs for allowing large queries from Google BigQuery + + """ + + # Only update kwargs if user sets allow_large_results to True: + if not allow_large_results: + return read_gbq_kwargs + else: + print("Attempting to write intermediate query data to %s/%s" % + (project_id, query_dataset)) + + + # Create tableId if left as None + if query_tableid is None: + # Generic name with timestamp unique down to the microsecond: + query_tableid = 'intermediate_query_results_' \ + + datetime.datetime.utcnow().strftime("%Y%M%d_%H%m_%f") + + # New configuration for allowing large queries: + updated_query_config = {'query': { + 'allowLargeResults': allow_large_results, + 'destinationTable': { + 'projectId': project_id, + 'datasetId': query_dataset, + 'tableId': query_tableid + }, + 'writeDisposition': write_disposition + }} + + # Append to predefined configuration: + if 'configuration' not in read_gbq_kwargs: + read_gbq_kwargs['configuration'] = updated_query_config + else: + # Append new configuration to user prescribed query configuration: + read_gbq_kwargs['configuration'] + if 'query' not in read_gbq_kwargs['configuration']: + read_gbq_kwargs['configuration']['query'] = updated_query_config['query'] + else: + for updated_key in updated_query_config: + if updated_key not in read_gbq_kwargs['configuration']['query']: + read_gbq_kwargs['configuration']['query'][updated_key] = \ + updated_query_config['query'][updated_key] + + return read_gbq_kwargs
- This solves the `allowLargeResults` problem users bump into when running large queries using `pandas.read_gbq()`. Here is an example: ``` > import pandas > project_id = '' # add your project id: > q = """ SELECT contributor_id, FROM [bigquery-public-data:samples.wikipedia] LIMIT 1000000000 """ > pandas.read_gbq(q, project_id) GenericGBQException: Reason: 403 GET https://www.googleapis.com/bigquery/v2/projects/data-reply/queries/1777aeb7-b1e6-49ca-9ab0-6bd41d162d49?timeoutMs=900&maxResults=0: Response too large to return. Consider setting allowLargeResults to true in your job configuration. For more information, see https://cloud.google.com/bigquery/troubleshooting-errors ``` The problem is now fixed by doing the following: 1. Create an intermediate query data dataset in GBQ 1. Run the `pandas.read_gbq()` function now with `allow_large_results=True` and the name of the new dataset created in the previous step. ``` > pandas.read_gbq(q, project_id, allow_large_results=True, query_dataset="name of intermediate query dataset") ``` - This removes the undocumented annoyance users go through to figure out how to add their own customized configuration to get through this problem - If this seems useful, I will write tests - I know it seems like code that I should add to pandas-gbq but to me the configuration modification should be done at the top level (which in this case is pandas.read_gbq)
https://api.github.com/repos/pandas-dev/pandas/pulls/19445
2018-01-29T17:10:17Z
2018-01-29T23:58:00Z
null
2023-05-11T01:17:16Z
Change Future to DeprecationWarning for make_block_same_class
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index ec884035fe0c4..f3e5e4c99a899 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -230,7 +230,7 @@ def make_block_same_class(self, values, placement=None, ndim=None, if dtype is not None: # issue 19431 fastparquet is passing this warnings.warn("dtype argument is deprecated, will be removed " - "in a future release.", FutureWarning) + "in a future release.", DeprecationWarning) if placement is None: placement = self.mgr_locs return make_block(values, placement=placement, ndim=ndim, diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index f17306b8b52f9..e3490f465b24a 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -288,9 +288,10 @@ def test_delete(self): def test_make_block_same_class(self): # issue 19431 block = create_block('M8[ns, US/Eastern]', [3]) - with tm.assert_produces_warning(FutureWarning, + with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): - block.make_block_same_class(block.values, dtype=block.values.dtype) + block.make_block_same_class(block.values.values, + dtype=block.values.dtype) class TestDatetimeBlock(object):
xref https://github.com/pandas-dev/pandas/pull/19434 @jreback I didn't want to further discuss on the PR, so let's do that here :-) By having it as a FutureWarning, we only annoy users, and the fastparquet developers are already aware of it. BTW, we do exactly the same for pyarrow's 'misuse' of internal API, we added a deprecationwarning for them.
https://api.github.com/repos/pandas-dev/pandas/pulls/19442
2018-01-29T13:00:15Z
2018-01-29T21:39:09Z
2018-01-29T21:39:09Z
2018-01-29T21:39:13Z
BUG: Fix problem with SparseDataFrame not persisting to csv
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 1890636bc8e1a..ae1d05ecdb008 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -497,7 +497,6 @@ I/O - Bug in :func:`DataFrame.to_parquet` where an exception was raised if the write destination is S3 (:issue:`19134`) - :class:`Interval` now supported in :func:`DataFrame.to_excel` for all Excel file types (:issue:`19242`) - :class:`Timedelta` now supported in :func:`DataFrame.to_excel` for xls file type (:issue:`19242`, :issue:`9155`) -- Plotting ^^^^^^^^ @@ -521,7 +520,7 @@ Sparse ^^^^^^ - Bug in which creating a ``SparseDataFrame`` from a dense ``Series`` or an unsupported type raised an uncontrolled exception (:issue:`19374`) -- +- Bug in :class:`SparseDataFrame.to_csv` causing exception (:issue:`19384`) - Reshaping diff --git a/pandas/core/internals.py b/pandas/core/internals.py index f3e5e4c99a899..d06346cc27a28 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -709,7 +709,8 @@ def to_native_types(self, slicer=None, na_rep='nan', quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ - values = self.values + values = self.get_values() + if slicer is not None: values = values[:, slicer] mask = isna(values) diff --git a/pandas/tests/sparse/frame/test_to_csv.py b/pandas/tests/sparse/frame/test_to_csv.py new file mode 100644 index 0000000000000..b0243dfde8d3f --- /dev/null +++ b/pandas/tests/sparse/frame/test_to_csv.py @@ -0,0 +1,20 @@ +import numpy as np +import pytest +from pandas import SparseDataFrame, read_csv +from pandas.util import testing as tm + + +class TestSparseDataFrameToCsv(object): + fill_values = [np.nan, 0, None, 1] + + @pytest.mark.parametrize('fill_value', fill_values) + def test_to_csv_sparse_dataframe(self, fill_value): + # GH19384 + sdf = SparseDataFrame({'a': type(self).fill_values}, + default_fill_value=fill_value) + + with tm.ensure_clean('sparse_df.csv') as path: + sdf.to_csv(path, index=False) + df = read_csv(path, skip_blank_lines=False) + + tm.assert_sp_frame_equal(df.to_sparse(fill_value=fill_value), sdf)
- [x] closes #19384 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19441
2018-01-29T08:03:44Z
2018-02-01T19:26:16Z
2018-02-01T19:26:16Z
2018-02-01T19:26:21Z
TST: fix test for MultiIndexPyIntEngine on 32 bit
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 9582264a8c716..65332ae7153e2 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1611,7 +1611,7 @@ def test_pyint_engine(self): index = MultiIndex.from_tuples(keys) assert index.get_loc(keys[idx]) == idx - expected = np.arange(idx + 1, dtype='int64') + expected = np.arange(idx + 1, dtype=np.intp) result = index.get_indexer([keys[i] for i in expected]) tm.assert_numpy_array_equal(result, expected)
- [x] closes #19439 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` It was just a problem in the test.
https://api.github.com/repos/pandas-dev/pandas/pulls/19440
2018-01-29T07:47:08Z
2018-01-29T14:07:03Z
2018-01-29T14:07:03Z
2018-01-30T04:47:31Z
BUG: don't assume series is length > 0
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 69965f44d87a8..e8d2ec5eb0d9e 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -555,7 +555,7 @@ Sparse - Bug in which creating a ``SparseDataFrame`` from a dense ``Series`` or an unsupported type raised an uncontrolled exception (:issue:`19374`) - Bug in :class:`SparseDataFrame.to_csv` causing exception (:issue:`19384`) -- +- Bug in :class:`SparseArray.memory_usage` which caused segfault by accessing non sparse elements (:issue:`19368`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/base.py b/pandas/core/base.py index 54d25a16a10a3..d5b204dba063e 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1048,7 +1048,7 @@ def is_monotonic_decreasing(self): def memory_usage(self, deep=False): """ - Memory usage of my values + Memory usage of the values Parameters ---------- diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index fa07400a0706e..65aefd9fb8c0a 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -8,10 +8,10 @@ import warnings import pandas as pd -from pandas.core.base import PandasObject +from pandas.core.base import PandasObject, IndexOpsMixin from pandas import compat -from pandas.compat import range +from pandas.compat import range, PYPY from pandas.compat.numpy import function as nv from pandas.core.dtypes.generic import ABCSparseSeries @@ -30,6 +30,7 @@ from pandas.core.dtypes.missing import isna, notna, na_value_for_dtype import pandas._libs.sparse as splib +import pandas._libs.lib as lib from pandas._libs.sparse import SparseIndex, BlockIndex, IntIndex from pandas._libs import index as libindex import pandas.core.algorithms as algos @@ -238,6 +239,17 @@ def kind(self): elif isinstance(self.sp_index, IntIndex): return 'integer' + @Appender(IndexOpsMixin.memory_usage.__doc__) + def memory_usage(self, deep=False): + values = self.sp_values + + v = values.nbytes + + if deep and is_object_dtype(self) and not PYPY: + v += lib.memory_usage_of_objects(values) + + return v + def __array_wrap__(self, out_arr, context=None): """ NumPy calls this method when ufunc is applied diff --git a/pandas/tests/sparse/series/test_series.py b/pandas/tests/sparse/series/test_series.py index 2ea1e63433520..3f5d5a59cc540 100644 --- a/pandas/tests/sparse/series/test_series.py +++ b/pandas/tests/sparse/series/test_series.py @@ -23,6 +23,8 @@ from pandas.core.sparse.api import SparseSeries from pandas.tests.series.test_api import SharedWithSparse +from itertools import product + def _test_data1(): # nan-based @@ -971,6 +973,17 @@ def test_combine_first(self): tm.assert_sp_series_equal(result, result2) tm.assert_sp_series_equal(result, expected) + @pytest.mark.parametrize('deep,fill_values', [([True, False], + [0, 1, np.nan, None])]) + def test_memory_usage_deep(self, deep, fill_values): + for deep, fill_value in product(deep, fill_values): + sparse_series = SparseSeries(fill_values, fill_value=fill_value) + dense_series = Series(fill_values) + sparse_usage = sparse_series.memory_usage(deep=deep) + dense_usage = dense_series.memory_usage(deep=deep) + + assert sparse_usage < dense_usage + class TestSparseHandlingMultiIndexes(object):
- [x] closes #19368 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19438
2018-01-29T07:08:02Z
2018-02-06T01:24:54Z
null
2018-02-06T01:25:22Z
CI: pin fastparquet <= 0.1.3
diff --git a/ci/requirements-2.7.sh b/ci/requirements-2.7.sh index e3bd5e46026c5..81a45eaca8620 100644 --- a/ci/requirements-2.7.sh +++ b/ci/requirements-2.7.sh @@ -4,4 +4,4 @@ source activate pandas echo "install 27" -conda install -n pandas -c conda-forge feather-format pyarrow=0.4.1 fastparquet +conda install -n pandas -c conda-forge feather-format pyarrow=0.4.1 fastparquet=0.1.3 diff --git a/ci/requirements-3.5_OSX.sh b/ci/requirements-3.5_OSX.sh index c2978b175968c..65900820ae0f4 100644 --- a/ci/requirements-3.5_OSX.sh +++ b/ci/requirements-3.5_OSX.sh @@ -4,4 +4,4 @@ source activate pandas echo "install 35_OSX" -conda install -n pandas -c conda-forge feather-format==0.3.1 fastparquet +conda install -n pandas -c conda-forge feather-format==0.3.1 fastparquet=0.1.3 diff --git a/ci/requirements-3.6.run b/ci/requirements-3.6.run index 822144a80bc9a..f7c9eba862d76 100644 --- a/ci/requirements-3.6.run +++ b/ci/requirements-3.6.run @@ -18,7 +18,7 @@ feather-format pyarrow psycopg2 python-snappy -fastparquet +fastparquet=0.1.3 beautifulsoup4 s3fs xarray diff --git a/ci/requirements-3.6_DOC.sh b/ci/requirements-3.6_DOC.sh index aec0f62148622..cd90efec4a0c7 100644 --- a/ci/requirements-3.6_DOC.sh +++ b/ci/requirements-3.6_DOC.sh @@ -6,6 +6,6 @@ echo "[install DOC_BUILD deps]" pip install pandas-gbq -conda install -n pandas -c conda-forge feather-format pyarrow nbsphinx pandoc fastparquet +conda install -n pandas -c conda-forge feather-format pyarrow nbsphinx pandoc fastparquet=0.1.3 conda install -n pandas -c r r rpy2 --yes diff --git a/ci/requirements-3.6_WIN.run b/ci/requirements-3.6_WIN.run index 3042888763863..cae78f675455f 100644 --- a/ci/requirements-3.6_WIN.run +++ b/ci/requirements-3.6_WIN.run @@ -13,5 +13,5 @@ pytables matplotlib blosc thrift=0.10* -fastparquet +fastparquet=0.1.3 pyarrow
https://api.github.com/repos/pandas-dev/pandas/pulls/19435
2018-01-28T20:52:05Z
2018-01-28T22:13:06Z
null
2018-01-28T22:13:06Z
[#19431] Regression in make_block_same_class (tests failing for new fastparquet release)
diff --git a/doc/source/io.rst b/doc/source/io.rst index ae04996b4fddf..4199f161501ec 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -4537,7 +4537,7 @@ See the documentation for `pyarrow <http://arrow.apache.org/docs/python/>`__ and .. note:: These engines are very similar and should read/write nearly identical parquet format files. - Currently ``pyarrow`` does not support timedelta data, and ``fastparquet`` does not support timezone aware datetimes (they are coerced to UTC). + Currently ``pyarrow`` does not support timedelta data, ``fastparquet>=0.1.4`` supports timezone aware datetimes. These libraries differ by having different underlying dependencies (``fastparquet`` by using ``numba``, while ``pyarrow`` uses a c-library). .. ipython:: python diff --git a/pandas/core/internals.py b/pandas/core/internals.py index c2d3d0852384c..ec884035fe0c4 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -224,12 +224,17 @@ def make_block_scalar(self, values): """ return ScalarBlock(values) - def make_block_same_class(self, values, placement=None, ndim=None): + def make_block_same_class(self, values, placement=None, ndim=None, + dtype=None): """ Wrap given values in a block of same type as self. """ + if dtype is not None: + # issue 19431 fastparquet is passing this + warnings.warn("dtype argument is deprecated, will be removed " + "in a future release.", FutureWarning) if placement is None: placement = self.mgr_locs return make_block(values, placement=placement, ndim=ndim, - klass=self.__class__) + klass=self.__class__, dtype=dtype) def __unicode__(self): diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 57884e9816ed3..f17306b8b52f9 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -285,6 +285,13 @@ def test_delete(self): with pytest.raises(Exception): newb.delete(3) + def test_make_block_same_class(self): + # issue 19431 + block = create_block('M8[ns, US/Eastern]', [3]) + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + block.make_block_same_class(block.values, dtype=block.values.dtype) + class TestDatetimeBlock(object): diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 8a6a22abe23fa..244b6f4244252 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -71,6 +71,15 @@ def fp(): return 'fastparquet' +@pytest.fixture +def fp_lt_014(): + if not _HAVE_FASTPARQUET: + pytest.skip("fastparquet is not installed") + if LooseVersion(fastparquet.__version__) >= LooseVersion('0.1.4'): + pytest.skip("fastparquet is >= 0.1.4") + return 'fastparquet' + + @pytest.fixture def df_compat(): return pd.DataFrame({'A': [1, 2, 3], 'B': 'foo'}) @@ -449,8 +458,10 @@ def test_basic(self, fp, df_full): df = df_full # additional supported types for fastparquet + if LooseVersion(fastparquet.__version__) >= LooseVersion('0.1.4'): + df['datetime_tz'] = pd.date_range('20130101', periods=3, + tz='US/Eastern') df['timedelta'] = pd.timedelta_range('1 day', periods=3) - check_round_trip(df, fp) @pytest.mark.skip(reason="not supported") @@ -482,14 +493,15 @@ def test_categorical(self, fp): df = pd.DataFrame({'a': pd.Categorical(list('abc'))}) check_round_trip(df, fp) - def test_datetime_tz(self, fp): - # doesn't preserve tz + def test_datetime_tz(self, fp_lt_014): + + # fastparquet<0.1.4 doesn't preserve tz df = pd.DataFrame({'a': pd.date_range('20130101', periods=3, tz='US/Eastern')}) - # warns on the coercion with catch_warnings(record=True): - check_round_trip(df, fp, expected=df.astype('datetime64[ns]')) + check_round_trip(df, fp_lt_014, + expected=df.astype('datetime64[ns]')) def test_filter_row_groups(self, fp): d = {'a': list(range(0, 3))}
- [x] closes https://github.com/pandas-dev/pandas/issues/19431 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry `dtype` seems still in use at: https://github.com/minggli/pandas/blob/2f4fc0790a5e5c51eb80bbfadf4c80f0bb424c56/pandas/core/internals.py#L2911
https://api.github.com/repos/pandas-dev/pandas/pulls/19434
2018-01-28T18:59:58Z
2018-01-29T12:43:00Z
2018-01-29T12:43:00Z
2018-01-29T12:46:08Z
BUG: groupby with resample using on parameter errors when selecting column to apply function
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 69965f44d87a8..3b7c3ee916e17 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -548,7 +548,7 @@ Groupby/Resample/Rolling - Bug in :func:`DataFrame.resample` which silently ignored unsupported (or mistyped) options for ``label``, ``closed`` and ``convention`` (:issue:`19303`) - Bug in :func:`DataFrame.groupby` where tuples were interpreted as lists of keys rather than as keys (:issue:`17979`, :issue:`18249`) - Bug in ``transform`` where particular aggregation functions were being incorrectly cast to match the dtype(s) of the grouped data (:issue:`19200`) -- +- Bug in `DataFrame.groupby` passing the `on=` kwarg, and subsequently using `.apply` (:issue:`17813`) Sparse ^^^^^^ diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 2c1deb9db7bba..ccd8a2092506c 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -37,6 +37,7 @@ _ensure_categorical, _ensure_float) from pandas.core.dtypes.cast import maybe_downcast_to_dtype +from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna, notna, _maybe_fill from pandas.core.base import (PandasObject, SelectionMixin, GroupByError, @@ -423,6 +424,7 @@ def __init__(self, key=None, level=None, freq=None, axis=0, sort=False): self.obj = None self.indexer = None self.binner = None + self._grouper = None @property def ax(self): @@ -465,12 +467,22 @@ def _set_grouper(self, obj, sort=False): raise ValueError( "The Grouper cannot specify both a key and a level!") + # Keep self.grouper value before overriding + if self._grouper is None: + self._grouper = self.grouper + # the key must be a valid info item if self.key is not None: key = self.key - if key not in obj._info_axis: - raise KeyError("The grouper name {0} is not found".format(key)) - ax = Index(obj[key], name=key) + # The 'on' is already defined + if getattr(self.grouper, 'name', None) == key and \ + isinstance(obj, ABCSeries): + ax = self._grouper.take(obj.index) + else: + if key not in obj._info_axis: + raise KeyError( + "The grouper name {0} is not found".format(key)) + ax = Index(obj[key], name=key) else: ax = obj._get_axis(self.axis) diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index a5aaa328a8e06..2de890ea459f0 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -3077,6 +3077,15 @@ def test_getitem_multiple(self): result = r['buyer'].count() assert_series_equal(result, expected) + def test_groupby_resample_on_api_with_getitem(self): + # GH 17813 + df = pd.DataFrame({'id': list('aabbb'), + 'date': pd.date_range('1-1-2016', periods=5), + 'data': 1}) + exp = df.set_index('date').groupby('id').resample('2D')['data'].sum() + result = df.groupby('id').resample('2D', on='date')['data'].sum() + assert_series_equal(result, exp) + def test_nearest(self): # GH 17496
- [x] closes #17813 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19433
2018-01-28T16:56:33Z
2018-02-05T11:13:45Z
null
2018-02-05T11:14:50Z
Misc typos
diff --git a/asv_bench/benchmarks/replace.py b/asv_bench/benchmarks/replace.py index 6330a2b36c516..41208125e8f32 100644 --- a/asv_bench/benchmarks/replace.py +++ b/asv_bench/benchmarks/replace.py @@ -44,15 +44,15 @@ class Convert(object): goal_time = 0.5 params = (['DataFrame', 'Series'], ['Timestamp', 'Timedelta']) - param_names = ['contructor', 'replace_data'] + param_names = ['constructor', 'replace_data'] - def setup(self, contructor, replace_data): + def setup(self, constructor, replace_data): N = 10**3 data = {'Series': pd.Series(np.random.randint(N, size=N)), 'DataFrame': pd.DataFrame({'A': np.random.randint(N, size=N), 'B': np.random.randint(N, size=N)})} self.to_replace = {i: getattr(pd, replace_data) for i in range(N)} - self.data = data[contructor] + self.data = data[constructor] - def time_replace(self, contructor, replace_data): + def time_replace(self, constructor, replace_data): self.data.replace(self.to_replace) diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py index 45142c53dcd01..59cf7d090a622 100644 --- a/asv_bench/benchmarks/rolling.py +++ b/asv_bench/benchmarks/rolling.py @@ -12,14 +12,14 @@ class Methods(object): ['int', 'float'], ['median', 'mean', 'max', 'min', 'std', 'count', 'skew', 'kurt', 'sum', 'corr', 'cov']) - param_names = ['contructor', 'window', 'dtype', 'method'] + param_names = ['constructor', 'window', 'dtype', 'method'] - def setup(self, contructor, window, dtype, method): + def setup(self, constructor, window, dtype, method): N = 10**5 arr = np.random.random(N).astype(dtype) - self.roll = getattr(pd, contructor)(arr).rolling(window) + self.roll = getattr(pd, constructor)(arr).rolling(window) - def time_rolling(self, contructor, window, dtype, method): + def time_rolling(self, constructor, window, dtype, method): getattr(self.roll, method)() @@ -30,12 +30,12 @@ class Quantile(object): [10, 1000], ['int', 'float'], [0, 0.5, 1]) - param_names = ['contructor', 'window', 'dtype', 'percentile'] + param_names = ['constructor', 'window', 'dtype', 'percentile'] - def setup(self, contructor, window, dtype, percentile): + def setup(self, constructor, window, dtype, percentile): N = 10**5 arr = np.random.random(N).astype(dtype) - self.roll = getattr(pd, contructor)(arr).rolling(window) + self.roll = getattr(pd, constructor)(arr).rolling(window) - def time_quantile(self, contructor, window, dtype, percentile): + def time_quantile(self, constructor, window, dtype, percentile): self.roll.quantile(percentile) diff --git a/doc/source/api.rst b/doc/source/api.rst index ddd09327935ce..44f87aa3e1cec 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -2500,7 +2500,7 @@ Scalar introspection Extensions ---------- -These are primarily intented for library authors looking to extend pandas +These are primarily intended for library authors looking to extend pandas objects. .. currentmodule:: pandas diff --git a/doc/source/io.rst b/doc/source/io.rst index ae04996b4fddf..2210cd82ee561 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2675,7 +2675,7 @@ file, and the ``sheet_name`` indicating which sheet to parse. +++++++++++++++++++ To facilitate working with multiple sheets from the same file, the ``ExcelFile`` -class can be used to wrap the file and can be be passed into ``read_excel`` +class can be used to wrap the file and can be passed into ``read_excel`` There will be a performance benefit for reading multiple sheets as the file is read into memory only once. diff --git a/doc/sphinxext/numpydoc/tests/test_docscrape.py b/doc/sphinxext/numpydoc/tests/test_docscrape.py index b682504e1618f..b412124d774bb 100755 --- a/doc/sphinxext/numpydoc/tests/test_docscrape.py +++ b/doc/sphinxext/numpydoc/tests/test_docscrape.py @@ -42,7 +42,7 @@ ------- out : ndarray The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is + shape given is (m,n,...), then the shape of `out` is (m,n,...,N). In other words, each entry ``out[i,j,...,:]`` is an N-dimensional @@ -222,7 +222,7 @@ def test_str(): ------- out : ndarray The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is + shape given is (m,n,...), then the shape of `out` is (m,n,...,N). In other words, each entry ``out[i,j,...,:]`` is an N-dimensional @@ -340,7 +340,7 @@ def test_sphinx_str(): **out** : ndarray The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is + shape given is (m,n,...), then the shape of `out` is (m,n,...,N). In other words, each entry ``out[i,j,...,:]`` is an N-dimensional diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 1e6ea7794dfff..37693068e0974 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -897,7 +897,7 @@ class Timedelta(_Timedelta): Represents a duration, the difference between two dates or times. Timedelta is the pandas equivalent of python's ``datetime.timedelta`` - and is interchangable with it in most cases. + and is interchangeable with it in most cases. Parameters ---------- diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index c22e0b8e555a3..215ae9ce087ee 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -295,7 +295,7 @@ cpdef bint tz_compare(object start, object end): timezones. For example `<DstTzInfo 'Europe/Paris' LMT+0:09:00 STD>` and `<DstTzInfo 'Europe/Paris' CET+1:00:00 STD>` are essentially same - timezones but aren't evaluted such, but the string representation + timezones but aren't evaluated such, but the string representation for both of these is `'Europe/Paris'`. This exists only to add a notion of equality to pytz-style zones diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7328cd336babf..788b236b0ec59 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4115,7 +4115,7 @@ def combine(self, other, func, fill_value=None, overwrite=True): series[this_mask] = fill_value otherSeries[other_mask] = fill_value - # if we have different dtypes, possibily promote + # if we have different dtypes, possibly promote new_dtype = this_dtype if not is_dtype_equal(this_dtype, other_dtype): new_dtype = find_common_type([this_dtype, other_dtype]) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index f43c6dc567f69..8e77c7a7fa48c 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -332,7 +332,7 @@ def freqstr(self): @cache_readonly def inferred_freq(self): """ - Trys to return a string representing a frequency guess, + Tryies to return a string representing a frequency guess, generated by infer_freq. Returns None if it can't autodetect the frequency. """ diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 99bf0d5b7ac51..91dc44e3f185e 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -120,7 +120,7 @@ def __init__(self, data=None, index=None, columns=None, default_kind=None, if dtype is not None: mgr = mgr.astype(dtype) else: - msg = ('SparseDataFrame called with unkown type "{data_type}" ' + msg = ('SparseDataFrame called with unknown type "{data_type}" ' 'for data argument') raise TypeError(msg.format(data_type=type(data).__name__)) diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 4e207f9d1838c..1c23527cf57c4 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -493,7 +493,7 @@ def _set_value(self, label, value, takeable=False): values = self.to_dense() # if the label doesn't exist, we will create a new object here - # and possibily change the index + # and possibly change the index new_values = values._set_value(label, value, takeable=takeable) if new_values is not None: values = new_values diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 5c31b9a5668ff..12c7feb5f2b15 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1395,7 +1395,7 @@ def _validate(data): elif isinstance(data, Index): # can't use ABCIndex to exclude non-str - # see scc/inferrence.pyx which can contain string values + # see src/inference.pyx which can contain string values allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer') if data.inferred_type not in allowed_types: message = ("Can only use .str accessor with string values " diff --git a/pandas/core/util/hashing.py b/pandas/core/util/hashing.py index 0c82773b75c28..7edb5b16ce77a 100644 --- a/pandas/core/util/hashing.py +++ b/pandas/core/util/hashing.py @@ -210,7 +210,7 @@ def _hash_categorical(c, encoding, hash_key): # we have uint64, as we don't directly support missing values # we don't want to use take_nd which will coerce to float - # instead, directly construt the result with a + # instead, directly construct the result with a # max(np.uint64) as the missing value indicator # # TODO: GH 15362 diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 2293032ebb8a1..bca0b64cb53fe 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1961,7 +1961,7 @@ def formatter(value): def get_result_as_array(self): """ Returns the float values converted into strings using - the parameters given at initalisation, as a numpy array + the parameters given at initialisation, as a numpy array """ if self.formatter is not None: diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 106823199ee93..5376473f83f22 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -3763,7 +3763,7 @@ def write(self, **kwargs): class LegacyTable(Table): """ an appendable table: allow append/query/delete operations to a - (possibily) already existing appendable table this table ALLOWS + (possibly) already existing appendable table this table ALLOWS append (but doesn't require them), and stores the data in a format that can be easily searched diff --git a/pandas/tests/categorical/test_constructors.py b/pandas/tests/categorical/test_constructors.py index b29d75bed5c6f..6cc34770a65e0 100644 --- a/pandas/tests/categorical/test_constructors.py +++ b/pandas/tests/categorical/test_constructors.py @@ -382,7 +382,7 @@ def test_constructor_from_categorical_with_unknown_dtype(self): ordered=True) tm.assert_categorical_equal(result, expected) - def test_contructor_from_categorical_string(self): + def test_constructor_from_categorical_string(self): values = Categorical(['a', 'b', 'd']) # use categories, ordered result = Categorical(values, categories=['a', 'b', 'c'], ordered=True, diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 8b57e96e6fa06..b24ae22162a34 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -543,7 +543,7 @@ def test_nested_dict_frame_constructor(self): tm.assert_frame_equal(result, df) def _check_basic_constructor(self, empty): - # mat: 2d matrix with shpae (3, 2) to input. empty - makes sized + # mat: 2d matrix with shape (3, 2) to input. empty - makes sized # objects mat = empty((2, 3), dtype=float) # 2-D input diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index b277d8256e612..e0ce27de5c31f 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -2531,7 +2531,7 @@ def test_date_tz(self): [datetime(2013, 1, 1), pd.NaT], utc=True).format() assert formatted[0] == "2013-01-01 00:00:00+00:00" - def test_date_explict_date_format(self): + def test_date_explicit_date_format(self): formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format( date_format="%m-%d-%Y", na_rep="UT") assert formatted[0] == "02-01-2003" diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index f2b7c20b774b0..0e6e44e839464 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -43,7 +43,7 @@ def test_empty(self, method, unit, use_bottleneck): result = getattr(s, method)() assert result == unit - # Explict + # Explicit result = getattr(s, method)(min_count=0) assert result == unit diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 7505e6b0cec3b..38e5753d1752d 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -1163,7 +1163,7 @@ def test_timedelta_floordiv(self, scalar_td): ('NCC1701D', 'NCC1701D', 'NCC1701D')]) def test_td64_series_with_tdi(self, names): # GH#17250 make sure result dtype is correct - # GH#19043 make sure names are propogated correctly + # GH#19043 make sure names are propagated correctly tdi = pd.TimedeltaIndex(['0 days', '1 day'], name=names[0]) ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1]) expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)], diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index 2b589ebd4735e..0b7948cc32d24 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -218,7 +218,7 @@ def test_constructor_from_unknown_type(self): class Unknown: pass with pytest.raises(TypeError, - message='SparseDataFrame called with unkown type ' + message='SparseDataFrame called with unknown type ' '"Unknown" for data argument'): SparseDataFrame(Unknown()) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 941bdcbc8b064..0009e26f8b100 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -2401,7 +2401,7 @@ class for all warnings. To check that no warning is returned, into errors. Valid values are: - * "error" - turns matching warnings into exeptions + * "error" - turns matching warnings into exceptions * "ignore" - discard the warning * "always" - always emit a warning * "default" - print the warning the first time it is generated
Found via `codespell -q 3`
https://api.github.com/repos/pandas-dev/pandas/pulls/19430
2018-01-28T14:22:35Z
2018-01-29T14:14:34Z
2018-01-29T14:14:34Z
2018-01-29T14:22:40Z
DOC: Spellcheck of categorical.rst and visualization.rst
diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst index 7364167611730..efcc04d688334 100644 --- a/doc/source/categorical.rst +++ b/doc/source/categorical.rst @@ -19,10 +19,11 @@ Categorical Data This is an introduction to pandas categorical data type, including a short comparison with R's ``factor``. -`Categoricals` are a pandas data type, which correspond to categorical variables in -statistics: a variable, which can take on only a limited, and usually fixed, -number of possible values (`categories`; `levels` in R). Examples are gender, social class, -blood types, country affiliations, observation time or ratings via Likert scales. +`Categoricals` are a pandas data type corresponding to categorical variables in +statistics. A categorical variable takes on a limited, and usually fixed, +number of possible values (`categories`; `levels` in R). Examples are gender, +social class, blood type, country affiliation, observation time or rating via +Likert scales. In contrast to statistical categorical variables, categorical data might have an order (e.g. 'strongly agree' vs 'agree' or 'first observation' vs. 'second observation'), but numerical @@ -48,16 +49,16 @@ See also the :ref:`API docs on categoricals<api.categorical>`. Object Creation --------------- -Categorical `Series` or columns in a `DataFrame` can be created in several ways: +Categorical ``Series`` or columns in a ``DataFrame`` can be created in several ways: -By specifying ``dtype="category"`` when constructing a `Series`: +By specifying ``dtype="category"`` when constructing a ``Series``: .. ipython:: python s = pd.Series(["a","b","c","a"], dtype="category") s -By converting an existing `Series` or column to a ``category`` dtype: +By converting an existing ``Series`` or column to a ``category`` dtype: .. ipython:: python @@ -65,18 +66,17 @@ By converting an existing `Series` or column to a ``category`` dtype: df["B"] = df["A"].astype('category') df -By using some special functions: +By using special functions, such as :func:`~pandas.cut`, which groups data into +discrete bins. See the :ref:`example on tiling <reshaping.tile.cut>` in the docs. .. ipython:: python df = pd.DataFrame({'value': np.random.randint(0, 100, 20)}) - labels = [ "{0} - {1}".format(i, i + 9) for i in range(0, 100, 10) ] + labels = ["{0} - {1}".format(i, i + 9) for i in range(0, 100, 10)] df['group'] = pd.cut(df.value, range(0, 105, 10), right=False, labels=labels) df.head(10) -See :ref:`documentation <reshaping.tile.cut>` for :func:`~pandas.cut`. - By passing a :class:`pandas.Categorical` object to a `Series` or assigning it to a `DataFrame`. .. ipython:: python @@ -89,10 +89,11 @@ By passing a :class:`pandas.Categorical` object to a `Series` or assigning it to df["B"] = raw_cat df -Anywhere above we passed a keyword ``dtype='category'``, we used the default behavior of +In the examples above where we passed ``dtype='category'``, we used the default +behavior: -1. categories are inferred from the data -2. categories are unordered. +1. Categories are inferred from the data. +2. Categories are unordered. To control those behaviors, instead of passing ``'category'``, use an instance of :class:`~pandas.api.types.CategoricalDtype`. @@ -123,8 +124,8 @@ Categorical data has a specific ``category`` :ref:`dtype <basics.dtypes>`: In contrast to R's `factor` function, there is currently no way to assign/change labels at creation time. Use `categories` to change the categories after creation time. -To get back to the original Series or `numpy` array, use ``Series.astype(original_dtype)`` or -``np.asarray(categorical)``: +To get back to the original ``Series`` or NumPy array, use +``Series.astype(original_dtype)`` or ``np.asarray(categorical)``: .. ipython:: python @@ -135,8 +136,9 @@ To get back to the original Series or `numpy` array, use ``Series.astype(origina s2.astype(str) np.asarray(s2) -If you have already `codes` and `categories`, you can use the :func:`~pandas.Categorical.from_codes` -constructor to save the factorize step during normal constructor mode: +If you already have `codes` and `categories`, you can use the +:func:`~pandas.Categorical.from_codes` constructor to save the factorize step +during normal constructor mode: .. ipython:: python @@ -171,7 +173,7 @@ by default. A :class:`~pandas.api.types.CategoricalDtype` can be used in any place pandas expects a `dtype`. For example :func:`pandas.read_csv`, -:func:`pandas.DataFrame.astype`, or in the Series constructor. +:func:`pandas.DataFrame.astype`, or in the ``Series`` constructor. .. note:: @@ -185,8 +187,8 @@ Equality Semantics ~~~~~~~~~~~~~~~~~~ Two instances of :class:`~pandas.api.types.CategoricalDtype` compare equal -whenever they have the same categories and orderedness. When comparing two -unordered categoricals, the order of the ``categories`` is not considered +whenever they have the same categories and order. When comparing two +unordered categoricals, the order of the ``categories`` is not considered. .. ipython:: python @@ -198,7 +200,7 @@ unordered categoricals, the order of the ``categories`` is not considered # Unequal, since the second CategoricalDtype is ordered c1 == CategoricalDtype(['a', 'b', 'c'], ordered=True) -All instances of ``CategoricalDtype`` compare equal to the string ``'category'`` +All instances of ``CategoricalDtype`` compare equal to the string ``'category'``. .. ipython:: python @@ -215,8 +217,8 @@ All instances of ``CategoricalDtype`` compare equal to the string ``'category'`` Description ----------- -Using ``.describe()`` on categorical data will produce similar output to a `Series` or -`DataFrame` of type ``string``. +Using :meth:`~DataFrame.describe` on categorical data will produce similar +output to a ``Series`` or ``DataFrame`` of type ``string``. .. ipython:: python @@ -230,10 +232,10 @@ Using ``.describe()`` on categorical data will produce similar output to a `Seri Working with categories ----------------------- -Categorical data has a `categories` and a `ordered` property, which list their possible values and -whether the ordering matters or not. These properties are exposed as ``s.cat.categories`` and -``s.cat.ordered``. If you don't manually specify categories and ordering, they are inferred from the -passed in values. +Categorical data has a `categories` and a `ordered` property, which list their +possible values and whether the ordering matters or not. These properties are +exposed as ``s.cat.categories`` and ``s.cat.ordered``. If you don't manually +specify categories and ordering, they are inferred from the passed arguments. .. ipython:: python @@ -251,13 +253,13 @@ It's also possible to pass in the categories in a specific order: .. note:: - New categorical data are NOT automatically ordered. You must explicitly pass ``ordered=True`` to - indicate an ordered ``Categorical``. + New categorical data are **not** automatically ordered. You must explicitly + pass ``ordered=True`` to indicate an ordered ``Categorical``. .. note:: - The result of ``Series.unique()`` is not always the same as ``Series.cat.categories``, + The result of :meth:`~Series.unique` is not always the same as ``Series.cat.categories``, because ``Series.unique()`` has a couple of guarantees, namely that it returns categories in the order of appearance, and it only includes values that are actually present. @@ -275,8 +277,10 @@ It's also possible to pass in the categories in a specific order: Renaming categories ~~~~~~~~~~~~~~~~~~~ -Renaming categories is done by assigning new values to the ``Series.cat.categories`` property or -by using the :func:`Categorical.rename_categories` method: +Renaming categories is done by assigning new values to the +``Series.cat.categories`` property or by using the +:meth:`~pandas.Categorical.rename_categories` method: + .. ipython:: python @@ -296,8 +300,8 @@ by using the :func:`Categorical.rename_categories` method: .. note:: - Be aware that assigning new categories is an inplace operations, while most other operation - under ``Series.cat`` per default return a new Series of dtype `category`. + Be aware that assigning new categories is an inplace operation, while most other operations + under ``Series.cat`` per default return a new ``Series`` of dtype `category`. Categories must be unique or a `ValueError` is raised: @@ -320,7 +324,8 @@ Categories must also not be ``NaN`` or a `ValueError` is raised: Appending new categories ~~~~~~~~~~~~~~~~~~~~~~~~ -Appending categories can be done by using the :func:`Categorical.add_categories` method: +Appending categories can be done by using the +:meth:`~pandas.Categorical.add_categories` method: .. ipython:: python @@ -331,8 +336,9 @@ Appending categories can be done by using the :func:`Categorical.add_categories` Removing categories ~~~~~~~~~~~~~~~~~~~ -Removing categories can be done by using the :func:`Categorical.remove_categories` method. Values -which are removed are replaced by ``np.nan``.: +Removing categories can be done by using the +:meth:`~pandas.Categorical.remove_categories` method. Values which are removed +are replaced by ``np.nan``.: .. ipython:: python @@ -353,8 +359,10 @@ Removing unused categories can also be done: Setting categories ~~~~~~~~~~~~~~~~~~ -If you want to do remove and add new categories in one step (which has some speed advantage), -or simply set the categories to a predefined scale, use :func:`Categorical.set_categories`. +If you want to do remove and add new categories in one step (which has some +speed advantage), or simply set the categories to a predefined scale, +use :meth:`~pandas.Categorical.set_categories`. + .. ipython:: python @@ -366,7 +374,7 @@ or simply set the categories to a predefined scale, use :func:`Categorical.set_c .. note:: Be aware that :func:`Categorical.set_categories` cannot know whether some category is omitted intentionally or because it is misspelled or (under Python3) due to a type difference (e.g., - numpys S1 dtype and Python strings). This can result in surprising behaviour! + NumPy S1 dtype and Python strings). This can result in surprising behaviour! Sorting and Order ----------------- @@ -374,7 +382,7 @@ Sorting and Order .. _categorical.sort: If categorical data is ordered (``s.cat.ordered == True``), then the order of the categories has a -meaning and certain operations are possible. If the categorical is unordered, ``.min()/.max()`` will raise a `TypeError`. +meaning and certain operations are possible. If the categorical is unordered, ``.min()/.max()`` will raise a ``TypeError``. .. ipython:: python @@ -411,8 +419,8 @@ This is even true for strings and numeric data: Reordering ~~~~~~~~~~ -Reordering the categories is possible via the :func:`Categorical.reorder_categories` and -the :func:`Categorical.set_categories` methods. For :func:`Categorical.reorder_categories`, all +Reordering the categories is possible via the :meth:`Categorical.reorder_categories` and +the :meth:`Categorical.set_categories` methods. For :meth:`Categorical.reorder_categories`, all old categories must be included in the new categories and no new categories are allowed. This will necessarily make the sort order the same as the categories order. @@ -428,16 +436,16 @@ necessarily make the sort order the same as the categories order. .. note:: Note the difference between assigning new categories and reordering the categories: the first - renames categories and therefore the individual values in the `Series`, but if the first + renames categories and therefore the individual values in the ``Series``, but if the first position was sorted last, the renamed value will still be sorted last. Reordering means that the way values are sorted is different afterwards, but not that individual values in the - `Series` are changed. + ``Series`` are changed. .. note:: - If the `Categorical` is not ordered, ``Series.min()`` and ``Series.max()`` will raise + If the ``Categorical`` is not ordered, :meth:`Series.min` and :meth:`Series.max` will raise ``TypeError``. Numeric operations like ``+``, ``-``, ``*``, ``/`` and operations based on them - (e.g. ``Series.median()``, which would need to compute the mean between two values if the length + (e.g. :meth:`Series.median`, which would need to compute the mean between two values if the length of an array is even) do not work and raise a ``TypeError``. Multi Column Sorting @@ -464,19 +472,19 @@ Comparisons Comparing categorical data with other objects is possible in three cases: - * comparing equality (``==`` and ``!=``) to a list-like object (list, Series, array, + * Comparing equality (``==`` and ``!=``) to a list-like object (list, Series, array, ...) of the same length as the categorical data. - * all comparisons (``==``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``) of categorical data to + * All comparisons (``==``, ``!=``, ``>``, ``>=``, ``<``, and ``<=``) of categorical data to another categorical Series, when ``ordered==True`` and the `categories` are the same. - * all comparisons of a categorical data to a scalar. + * All comparisons of a categorical data to a scalar. All other comparisons, especially "non-equality" comparisons of two categoricals with different -categories or a categorical with any list-like object, will raise a TypeError. +categories or a categorical with any list-like object, will raise a ``TypeError``. .. note:: - Any "non-equality" comparisons of categorical data with a `Series`, `np.array`, `list` or - categorical data with different categories or ordering will raise an `TypeError` because custom + Any "non-equality" comparisons of categorical data with a ``Series``, ``np.array``, ``list`` or + categorical data with different categories or ordering will raise a ``TypeError`` because custom categories ordering could be interpreted in two ways: one with taking into account the ordering and one without. @@ -546,11 +554,11 @@ When you compare two unordered categoricals with the same categories, the order Operations ---------- -Apart from ``Series.min()``, ``Series.max()`` and ``Series.mode()``, the following operations are -possible with categorical data: +Apart from :meth:`Series.min`, :meth:`Series.max` and :meth:`Series.mode`, the +following operations are possible with categorical data: -`Series` methods like `Series.value_counts()` will use all categories, even if some categories are not -present in the data: +``Series`` methods like :meth:`Series.value_counts` will use all categories, +even if some categories are not present in the data: .. ipython:: python @@ -588,8 +596,8 @@ that only values already in `categories` can be assigned. Getting ~~~~~~~ -If the slicing operation returns either a `DataFrame` or a column of type `Series`, -the ``category`` dtype is preserved. +If the slicing operation returns either a ``DataFrame`` or a column of type +``Series``, the ``category`` dtype is preserved. .. ipython:: python @@ -602,8 +610,8 @@ the ``category`` dtype is preserved. df.loc["h":"j","cats"] df[df["cats"] == "b"] -An example where the category type is not preserved is if you take one single row: the -resulting `Series` is of dtype ``object``: +An example where the category type is not preserved is if you take one single +row: the resulting ``Series`` is of dtype ``object``: .. ipython:: python @@ -620,10 +628,11 @@ of length "1". df.at["h","cats"] # returns a string .. note:: - This is a difference to R's `factor` function, where ``factor(c(1,2,3))[1]`` + The is in contrast to R's `factor` function, where ``factor(c(1,2,3))[1]`` returns a single value `factor`. -To get a single value `Series` of type ``category`` pass in a list with a single value: +To get a single value ``Series`` of type ``category``, you pass in a list with +a single value: .. ipython:: python @@ -632,8 +641,8 @@ To get a single value `Series` of type ``category`` pass in a list with a single String and datetime accessors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The accessors ``.dt`` and ``.str`` will work if the ``s.cat.categories`` are of an appropriate -type: +The accessors ``.dt`` and ``.str`` will work if the ``s.cat.categories`` are of +an appropriate type: .. ipython:: python @@ -676,8 +685,8 @@ That means, that the returned values from methods and properties on the accessor Setting ~~~~~~~ -Setting values in a categorical column (or `Series`) works as long as the value is included in the -`categories`: +Setting values in a categorical column (or ``Series``) works as long as the +value is included in the `categories`: .. ipython:: python @@ -704,7 +713,7 @@ Setting values by assigning categorical data will also check that the `categorie except ValueError as e: print("ValueError: " + str(e)) -Assigning a `Categorical` to parts of a column of other types will use the values: +Assigning a ``Categorical`` to parts of a column of other types will use the values: .. ipython:: python @@ -719,7 +728,7 @@ Assigning a `Categorical` to parts of a column of other types will use the value Merging ~~~~~~~ -You can concat two `DataFrames` containing categorical data together, +You can concat two ``DataFrames`` containing categorical data together, but the categories of these categoricals need to be the same: .. ipython:: python @@ -731,7 +740,7 @@ but the categories of these categoricals need to be the same: res res.dtypes -In this case the categories are not the same and so an error is raised: +In this case the categories are not the same, and therefore an error is raised: .. ipython:: python @@ -754,10 +763,10 @@ Unioning .. versionadded:: 0.19.0 -If you want to combine categoricals that do not necessarily have -the same categories, the ``union_categoricals`` function will -combine a list-like of categoricals. The new categories -will be the union of the categories being combined. +If you want to combine categoricals that do not necessarily have the same +categories, the :func:`~pandas.api.types.union_categoricals` function will +combine a list-like of categoricals. The new categories will be the union of +the categories being combined. .. ipython:: python @@ -805,8 +814,9 @@ using the ``ignore_ordered=True`` argument. b = pd.Categorical(["c", "b", "a"], ordered=True) union_categoricals([a, b], ignore_order=True) -``union_categoricals`` also works with a ``CategoricalIndex``, or ``Series`` containing -categorical data, but note that the resulting array will always be a plain ``Categorical`` +:func:`~pandas.api.types.union_categoricals` also works with a +``CategoricalIndex``, or ``Series`` containing categorical data, but note that +the resulting array will always be a plain ``Categorical``: .. ipython:: python @@ -956,7 +966,7 @@ Differences to R's `factor` The following differences to R's factor functions can be observed: -* R's `levels` are named `categories` +* R's `levels` are named `categories`. * R's `levels` are always of type string, while `categories` in pandas can be of any dtype. * It's not possible to specify labels at creation time. Use ``s.cat.rename_categories(new_labels)`` afterwards. @@ -1009,10 +1019,10 @@ an ``object`` dtype is a constant times the length of the data. `Categorical` is not a `numpy` array ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Currently, categorical data and the underlying `Categorical` is implemented as a python -object and not as a low-level `numpy` array dtype. This leads to some problems. +Currently, categorical data and the underlying ``Categorical`` is implemented as a Python +object and not as a low-level NumPy array dtype. This leads to some problems. -`numpy` itself doesn't know about the new `dtype`: +NumPy itself doesn't know about the new `dtype`: .. ipython:: python @@ -1041,7 +1051,7 @@ To check if a Series contains Categorical data, use ``hasattr(s, 'cat')``: hasattr(pd.Series(['a'], dtype='category'), 'cat') hasattr(pd.Series(['a']), 'cat') -Using `numpy` functions on a `Series` of type ``category`` should not work as `Categoricals` +Using NumPy functions on a ``Series`` of type ``category`` should not work as `Categoricals` are not numeric data (even in the case that ``.categories`` is numeric). .. ipython:: python @@ -1080,7 +1090,7 @@ and allows efficient indexing and storage of an index with a large number of dup See the :ref:`advanced indexing docs <indexing.categoricalindex>` for a more detailed explanation. -Setting the index will create a ``CategoricalIndex`` +Setting the index will create a ``CategoricalIndex``: .. ipython:: python @@ -1095,8 +1105,9 @@ Setting the index will create a ``CategoricalIndex`` Side Effects ~~~~~~~~~~~~ -Constructing a `Series` from a `Categorical` will not copy the input `Categorical`. This -means that changes to the `Series` will in most cases change the original `Categorical`: +Constructing a ``Series`` from a ``Categorical`` will not copy the input +``Categorical``. This means that changes to the ``Series`` will in most cases +change the original ``Categorical``: .. ipython:: python @@ -1109,7 +1120,7 @@ means that changes to the `Series` will in most cases change the original `Categ df["cat"].cat.categories = [1,2,3,4,5] cat -Use ``copy=True`` to prevent such a behaviour or simply don't reuse `Categoricals`: +Use ``copy=True`` to prevent such a behaviour or simply don't reuse ``Categoricals``: .. ipython:: python @@ -1120,6 +1131,6 @@ Use ``copy=True`` to prevent such a behaviour or simply don't reuse `Categorical cat .. note:: - This also happens in some cases when you supply a `numpy` array instead of a `Categorical`: - using an int array (e.g. ``np.array([1,2,3,4])``) will exhibit the same behaviour, while using + This also happens in some cases when you supply a NumPy array instead of a ``Categorical``: + using an int array (e.g. ``np.array([1,2,3,4])``) will exhibit the same behavior, while using a string array (e.g. ``np.array(["a","b","c","a"])``) will not. diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index cbd17493beb7e..ee93f06fbc958 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -37,7 +37,8 @@ libraries that go beyond the basics documented here. Basic Plotting: ``plot`` ------------------------ -See the :ref:`cookbook<cookbook.plotting>` for some advanced strategies +We will demonstrate the basics, see the :ref:`cookbook<cookbook.plotting>` for +some advanced strategies. The ``plot`` method on Series and DataFrame is just a simple wrapper around :meth:`plt.plot() <matplotlib.axes.Axes.plot>`: @@ -94,7 +95,8 @@ You can plot one column versus another using the `x` and `y` keywords in .. note:: - For more formatting and styling options, see :ref:`below <visualization.formatting>`. + For more formatting and styling options, see + :ref:`formatting <visualization.formatting>` below. .. ipython:: python :suppress: @@ -107,14 +109,13 @@ Other Plots ----------- Plotting methods allow for a handful of plot styles other than the -default Line plot. These methods can be provided as the ``kind`` -keyword argument to :meth:`~DataFrame.plot`. -These include: +default line plot. These methods can be provided as the ``kind`` +keyword argument to :meth:`~DataFrame.plot`, and include: * :ref:`'bar' <visualization.barplot>` or :ref:`'barh' <visualization.barplot>` for bar plots * :ref:`'hist' <visualization.hist>` for histogram * :ref:`'box' <visualization.box>` for boxplot -* :ref:`'kde' <visualization.kde>` or ``'density'`` for density plots +* :ref:`'kde' <visualization.kde>` or :ref:`'density' <visualization.kde>` for density plots * :ref:`'area' <visualization.area_plot>` for area plots * :ref:`'scatter' <visualization.scatter>` for scatter plots * :ref:`'hexbin' <visualization.hexbin>` for hexagonal bin plots @@ -220,7 +221,7 @@ To get horizontal bar plots, use the ``barh`` method: Histograms ~~~~~~~~~~ -Histogram can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Series.plot.hist` methods. +Histograms can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Series.plot.hist` methods. .. ipython:: python @@ -238,7 +239,8 @@ Histogram can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Serie plt.close('all') -Histogram can be stacked by ``stacked=True``. Bin size can be changed by ``bins`` keyword. +A histogram can be stacked using ``stacked=True``. Bin size can be changed +using the ``bins`` keyword. .. ipython:: python @@ -252,7 +254,9 @@ Histogram can be stacked by ``stacked=True``. Bin size can be changed by ``bins` plt.close('all') -You can pass other keywords supported by matplotlib ``hist``. For example, horizontal and cumulative histogram can be drawn by ``orientation='horizontal'`` and ``cumulative=True``. +You can pass other keywords supported by matplotlib ``hist``. For example, +horizontal and cumulative histograms can be drawn by +``orientation='horizontal'`` and ``cumulative=True``. .. ipython:: python @@ -463,7 +467,7 @@ keyword, will affect the output type as well: ``'both'`` Yes Series of namedtuples ================ ======= ========================== -``Groupby.boxplot`` always returns a Series of ``return_type``. +``Groupby.boxplot`` always returns a ``Series`` of ``return_type``. .. ipython:: python :okwarning: @@ -481,7 +485,9 @@ keyword, will affect the output type as well: plt.close('all') -Compare to: +The subplots above are split by the numeric columns first, then the value of +the ``g`` column. Below the subplots are first split by the value of ``g``, +then by the numeric columns. .. ipython:: python :okwarning: @@ -536,8 +542,8 @@ Scatter Plot ~~~~~~~~~~~~ Scatter plot can be drawn by using the :meth:`DataFrame.plot.scatter` method. -Scatter plot requires numeric columns for x and y axis. -These can be specified by ``x`` and ``y`` keywords each. +Scatter plot requires numeric columns for the x and y axes. +These can be specified by the ``x`` and ``y`` keywords. .. ipython:: python :suppress: @@ -581,8 +587,9 @@ each point: plt.close('all') -You can pass other keywords supported by matplotlib ``scatter``. -Below example shows a bubble chart using a dataframe column values as bubble size. +You can pass other keywords supported by matplotlib +:meth:`scatter <matplotlib.axes.Axes.scatter>`. The example below shows a +bubble chart using a column of the ``DataFrame`` as the bubble size. .. ipython:: python @@ -631,7 +638,7 @@ You can specify alternative aggregations by passing values to the ``C`` and and ``reduce_C_function`` is a function of one argument that reduces all the values in a bin to a single number (e.g. ``mean``, ``max``, ``sum``, ``std``). In this example the positions are given by columns ``a`` and ``b``, while the value is -given by column ``z``. The bins are aggregated with numpy's ``max`` function. +given by column ``z``. The bins are aggregated with NumPy's ``max`` function. .. ipython:: python :suppress: @@ -685,14 +692,16 @@ A ``ValueError`` will be raised if there are any negative values in your data. plt.close('all') -For pie plots it's best to use square figures, one's with an equal aspect ratio. You can create the -figure with equal width and height, or force the aspect ratio to be equal after plotting by -calling ``ax.set_aspect('equal')`` on the returned ``axes`` object. +For pie plots it's best to use square figures, i.e. a figure aspect ratio 1. +You can create the figure with equal width and height, or force the aspect ratio +to be equal after plotting by calling ``ax.set_aspect('equal')`` on the returned +``axes`` object. -Note that pie plot with :class:`DataFrame` requires that you either specify a target column by the ``y`` -argument or ``subplots=True``. When ``y`` is specified, pie plot of selected column -will be drawn. If ``subplots=True`` is specified, pie plots for each column are drawn as subplots. -A legend will be drawn in each pie plots by default; specify ``legend=False`` to hide it. +Note that pie plot with :class:`DataFrame` requires that you either specify a +target column by the ``y`` argument or ``subplots=True``. When ``y`` is +specified, pie plot of selected column will be drawn. If ``subplots=True`` is +specified, pie plots for each column are drawn as subplots. A legend will be +drawn in each pie plots by default; specify ``legend=False`` to hide it. .. ipython:: python :suppress: @@ -762,7 +771,7 @@ See the `matplotlib pie documentation <http://matplotlib.org/api/pyplot_api.html Plotting with Missing Data -------------------------- -Pandas tries to be pragmatic about plotting DataFrames or Series +Pandas tries to be pragmatic about plotting ``DataFrames`` or ``Series`` that contain missing data. Missing values are dropped, left out, or filled depending on the plot type. @@ -861,7 +870,8 @@ Andrews Curves Andrews curves allow one to plot multivariate data as a large number of curves that are created using the attributes of samples as coefficients -for Fourier series. By coloring these curves differently for each class +for Fourier series, see the `Wikipedia entry<https://en.wikipedia.org/wiki/Andrews_plot>`_ +for more information. By coloring these curves differently for each class it is possible to visualize data clustering. Curves belonging to samples of the same class will usually be closer together and form larger structures. @@ -883,8 +893,10 @@ of the same class will usually be closer together and form larger structures. Parallel Coordinates ~~~~~~~~~~~~~~~~~~~~ -Parallel coordinates is a plotting technique for plotting multivariate data. -It allows one to see clusters in data and to estimate other statistics visually. +Parallel coordinates is a plotting technique for plotting multivariate data, +see the `Wikipedia entry<https://en.wikipedia.org/wiki/Parallel_coordinates>`_ +for an introduction. +Parallel coordinates allows one to see clusters in data and to estimate other statistics visually. Using parallel coordinates points are represented as connected line segments. Each vertical line represents one attribute. One set of connected line segments represents one data point. Points that tend to cluster will appear closer together. @@ -912,7 +924,9 @@ Lag Plot Lag plots are used to check if a data set or time series is random. Random data should not exhibit any structure in the lag plot. Non-random structure -implies that the underlying data are not random. +implies that the underlying data are not random. The ``lag`` argument may +be passed, and when ``lag=1`` the plot is essentially ``data[:-1]`` vs. +``data[1:]``. .. ipython:: python :suppress: @@ -947,7 +961,9 @@ If time series is random, such autocorrelations should be near zero for any and all time-lag separations. If time series is non-random then one or more of the autocorrelations will be significantly non-zero. The horizontal lines displayed in the plot correspond to 95% and 99% confidence bands. The dashed line is 99% -confidence band. +confidence band. See the +`Wikipedia entry<https://en.wikipedia.org/wiki/Correlogram>`_ for more about +autocorrelation plots. .. ipython:: python :suppress: @@ -1016,6 +1032,8 @@ unit interval). The point in the plane, where our sample settles to (where the forces acting on our sample are at an equilibrium) is where a dot representing our sample will be drawn. Depending on which class that sample belongs it will be colored differently. +See the R package `Radviz<https://cran.r-project.org/web/packages/Radviz/>`_ +for more information. **Note**: The "Iris" dataset is available `here <https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv>`__. @@ -1046,7 +1064,7 @@ Setting the plot style From version 1.5 and up, matplotlib offers a range of preconfigured plotting styles. Setting the style can be used to easily give plots the general look that you want. Setting the style is as easy as calling ``matplotlib.style.use(my_plot_style)`` before -creating your plot. For example you could do ``matplotlib.style.use('ggplot')`` for ggplot-style +creating your plot. For example you could write ``matplotlib.style.use('ggplot')`` for ggplot-style plots. You can see the various available style names at ``matplotlib.style.available`` and it's very @@ -1147,7 +1165,7 @@ To plot data on a secondary y-axis, use the ``secondary_y`` keyword: plt.close('all') -To plot some columns in a DataFrame, give the column names to the ``secondary_y`` +To plot some columns in a ``DataFrame``, give the column names to the ``secondary_y`` keyword: .. ipython:: python @@ -1248,7 +1266,7 @@ See the :meth:`autofmt_xdate <matplotlib.figure.autofmt_xdate>` method and the Subplots ~~~~~~~~ -Each Series in a DataFrame can be plotted on a different axis +Each ``Series`` in a ``DataFrame`` can be plotted on a different axis with the ``subplots`` keyword: .. ipython:: python @@ -1264,9 +1282,9 @@ with the ``subplots`` keyword: Using Layout and Targeting Multiple Axes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The layout of subplots can be specified by ``layout`` keyword. It can accept +The layout of subplots can be specified by the ``layout`` keyword. It can accept ``(rows, columns)``. The ``layout`` keyword can be used in -``hist`` and ``boxplot`` also. If input is invalid, ``ValueError`` will be raised. +``hist`` and ``boxplot`` also. If the input is invalid, a ``ValueError`` will be raised. The number of axes which can be contained by rows x columns specified by ``layout`` must be larger than the number of required subplots. If layout can contain more axes than required, @@ -1284,7 +1302,7 @@ or columns needed, given the other. plt.close('all') -The above example is identical to using +The above example is identical to using: .. ipython:: python @@ -1298,11 +1316,11 @@ The above example is identical to using The required number of columns (3) is inferred from the number of series to plot and the given number of rows (2). -Also, you can pass multiple axes created beforehand as list-like via ``ax`` keyword. -This allows to use more complicated layout. +You can pass multiple axes created beforehand as list-like via ``ax`` keyword. +This allows more complicated layouts. The passed axes must be the same number as the subplots being drawn. -When multiple axes are passed via ``ax`` keyword, ``layout``, ``sharex`` and ``sharey`` keywords +When multiple axes are passed via the ``ax`` keyword, ``layout``, ``sharex`` and ``sharey`` keywords don't affect to the output. You should explicitly pass ``sharex=False`` and ``sharey=False``, otherwise you will see a warning. @@ -1359,13 +1377,13 @@ Another option is passing an ``ax`` argument to :meth:`Series.plot` to plot on a Plotting With Error Bars ~~~~~~~~~~~~~~~~~~~~~~~~ -Plotting with error bars is now supported in the :meth:`DataFrame.plot` and :meth:`Series.plot` +Plotting with error bars is supported in :meth:`DataFrame.plot` and :meth:`Series.plot`. -Horizontal and vertical errorbars can be supplied to the ``xerr`` and ``yerr`` keyword arguments to :meth:`~DataFrame.plot()`. The error values can be specified using a variety of formats. +Horizontal and vertical error bars can be supplied to the ``xerr`` and ``yerr`` keyword arguments to :meth:`~DataFrame.plot()`. The error values can be specified using a variety of formats: -- As a :class:`DataFrame` or ``dict`` of errors with column names matching the ``columns`` attribute of the plotting :class:`DataFrame` or matching the ``name`` attribute of the :class:`Series` -- As a ``str`` indicating which of the columns of plotting :class:`DataFrame` contain the error values -- As raw values (``list``, ``tuple``, or ``np.ndarray``). Must be the same length as the plotting :class:`DataFrame`/:class:`Series` +- As a :class:`DataFrame` or ``dict`` of errors with column names matching the ``columns`` attribute of the plotting :class:`DataFrame` or matching the ``name`` attribute of the :class:`Series`. +- As a ``str`` indicating which of the columns of plotting :class:`DataFrame` contain the error values. +- As raw values (``list``, ``tuple``, or ``np.ndarray``). Must be the same length as the plotting :class:`DataFrame`/:class:`Series`. Asymmetrical error bars are also supported, however raw error values must be provided in this case. For a ``M`` length :class:`Series`, a ``Mx2`` array should be provided indicating lower and upper (or left and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors should be in a ``Mx2xN`` array. @@ -1420,7 +1438,10 @@ Plotting with matplotlib table is now supported in :meth:`DataFrame.plot` and : plt.close('all') -Also, you can pass different :class:`DataFrame` or :class:`Series` for ``table`` keyword. The data will be drawn as displayed in print method (not transposed automatically). If required, it should be transposed manually as below example. +Also, you can pass a different :class:`DataFrame` or :class:`Series` to the +``table`` keyword. The data will be drawn as displayed in print method +(not transposed automatically). If required, it should be transposed manually +as seen in the example below. .. ipython:: python @@ -1434,7 +1455,10 @@ Also, you can pass different :class:`DataFrame` or :class:`Series` for ``table`` plt.close('all') -Finally, there is a helper function ``pandas.plotting.table`` to create a table from :class:`DataFrame` and :class:`Series`, and add it to an ``matplotlib.Axes``. This function can accept keywords which matplotlib table has. +There also exists a helper function ``pandas.plotting.table``, which creates a +table from :class:`DataFrame` or :class:`Series`, and adds it to an +``matplotlib.Axes`` instance. This function can accept keywords which the +matplotlib `table <http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.table>`__ has. .. ipython:: python @@ -1461,18 +1485,18 @@ Colormaps A potential issue when plotting a large number of columns is that it can be difficult to distinguish some series due to repetition in the default colors. To -remedy this, DataFrame plotting supports the use of the ``colormap=`` argument, +remedy this, ``DataFrame`` plotting supports the use of the ``colormap`` argument, which accepts either a Matplotlib `colormap <http://matplotlib.org/api/cm_api.html>`__ or a string that is a name of a colormap registered with Matplotlib. A visualization of the default matplotlib colormaps is available `here -<http://wiki.scipy.org/Cookbook/Matplotlib/Show_colormaps>`__. +<https://matplotlib.org/examples/color/colormaps_reference.html>`__. As matplotlib does not directly support colormaps for line-based plots, the colors are selected based on an even spacing determined by the number of columns -in the DataFrame. There is no consideration made for background color, so some +in the ``DataFrame``. There is no consideration made for background color, so some colormaps will produce lines that are not easily visible. -To use the cubehelix colormap, we can simply pass ``'cubehelix'`` to ``colormap=`` +To use the cubehelix colormap, we can pass ``colormap='cubehelix'``. .. ipython:: python :suppress: @@ -1494,7 +1518,7 @@ To use the cubehelix colormap, we can simply pass ``'cubehelix'`` to ``colormap= plt.close('all') -or we can pass the colormap itself +Alternatively, we can pass the colormap itself: .. ipython:: python @@ -1565,9 +1589,9 @@ Plotting directly with matplotlib In some situations it may still be preferable or necessary to prepare plots directly with matplotlib, for instance when a certain type of plot or -customization is not (yet) supported by pandas. Series and DataFrame objects -behave like arrays and can therefore be passed directly to matplotlib functions -without explicit casts. +customization is not (yet) supported by pandas. ``Series`` and ``DataFrame`` +objects behave like arrays and can therefore be passed directly to +matplotlib functions without explicit casts. pandas also automatically registers formatters and locators that recognize date indices, thereby extending date and time support to practically all plot types
Minor changes to the documentation, specifically `categorical.rst` and `visualization.rst`: * Function references as links. * Backticks ` `` ` around Series, DataFrame. * Minor rephrasing of sentences, spelling, etc. * For plots such as lag plots, Andrews plot, Radviz, I added links to Wikipedia entries. Comments very welcome.
https://api.github.com/repos/pandas-dev/pandas/pulls/19428
2018-01-28T11:29:28Z
2018-01-31T23:54:16Z
2018-01-31T23:54:16Z
2018-02-01T05:29:38Z
DOC/ERR: better error message on no common merge keys
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index ca625f492b61f..54dba831f7216 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -667,6 +667,7 @@ Reshaping - Bug in :func:`DataFrame.stack`, :func:`DataFrame.unstack`, :func:`Series.unstack` which were not returning subclasses (:issue:`15563`) - Bug in timezone comparisons, manifesting as a conversion of the index to UTC in ``.concat()`` (:issue:`18523`) - Bug in :func:`concat` when concatting sparse and dense series it returns only a ``SparseDataFrame``. Should be a ``DataFrame``. (:issue:`18914`, :issue:`18686`, and :issue:`16874`) +- Improved error message for :func:`DataFrame.merge` when there is no common merge key (:issue:`19427`) - diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 201d8ba427c8a..3d1983f65d70d 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -233,7 +233,7 @@ -------- merge_ordered merge_asof - +DataFrame.join """ # ----------------------------------------------------------------------- diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 3ec78ce52c6e5..9dbb327e3d956 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -1021,7 +1021,12 @@ def _validate_specification(self): common_cols = self.left.columns.intersection( self.right.columns) if len(common_cols) == 0: - raise MergeError('No common columns to perform merge on') + raise MergeError( + 'No common columns to perform merge on. ' + 'Merge options: left_on={lon}, right_on={ron}, ' + 'left_index={lidx}, right_index={ridx}' + .format(lon=self.left_on, ron=self.right_on, + lidx=self.left_index, ridx=self.right_index)) if not common_cols.is_unique: raise MergeError("Data columns not unique: {common!r}" .format(common=common_cols)) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index f63c206c0c407..32f83ab972be5 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -270,6 +270,14 @@ def test_no_overlap_more_informative_error(self): df2 = DataFrame({'y': ['b', 'c']}, index=[dt, dt]) pytest.raises(MergeError, merge, df1, df2) + msg = ('No common columns to perform merge on. ' + 'Merge options: left_on={lon}, right_on={ron}, ' + 'left_index={lidx}, right_index={ridx}' + .format(lon=None, ron=None, lidx=False, ridx=False)) + + with tm.assert_raises_regex(MergeError, msg): + merge(df1, df2) + def test_merge_non_unique_indexes(self): dt = datetime(2012, 5, 1)
- [o] closes #19391 - [o] tests passed - [o] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - Let MergeError emit values keyword arguments - Add `DataFrame.join` on `See also` section of `pandas.merge`
https://api.github.com/repos/pandas-dev/pandas/pulls/19427
2018-01-28T08:30:26Z
2018-02-06T14:16:14Z
2018-02-06T14:16:13Z
2018-02-06T14:59:55Z
CLN: GH19404 Changing function signature to match logic
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6e777281b11e1..0174dc47f2144 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1906,7 +1906,7 @@ def to_pickle(self, path, compression='infer', return to_pickle(self, path, compression=compression, protocol=protocol) - def to_clipboard(self, excel=None, sep=None, **kwargs): + def to_clipboard(self, excel=True, sep=None, **kwargs): """ Attempt to write text representation of object to the system clipboard This can be pasted into Excel, for example. diff --git a/pandas/io/clipboards.py b/pandas/io/clipboards.py index 347ec41baf0e1..dcc221ce978b3 100644 --- a/pandas/io/clipboards.py +++ b/pandas/io/clipboards.py @@ -63,7 +63,7 @@ def read_clipboard(sep=r'\s+', **kwargs): # pragma: no cover return read_table(StringIO(text), sep=sep, **kwargs) -def to_clipboard(obj, excel=None, sep=None, **kwargs): # pragma: no cover +def to_clipboard(obj, excel=True, sep=None, **kwargs): # pragma: no cover """ Attempt to write text representation of object to the system clipboard The clipboard can be then pasted into Excel for example.
- [x] closes #19404 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/19425
2018-01-27T18:09:39Z
2018-02-01T13:26:37Z
2018-02-01T13:26:36Z
2018-02-01T13:26:40Z
BUG: maybe_convert_objects with convert_datetime
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index 1fa07dbed6822..17fbc46ea550d 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -1333,9 +1333,20 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, seen.object_ = 1 break - # we try to coerce datetime w/tz but must all have the same tz + # we try to coerce datetime w/tz but must all have the same tz, ie if we have UTC and PST tzinfo then this will not + # work if seen.datetimetz_: - if len({getattr(val, 'tzinfo', None) for val in objects}) == 1: + unique_types = set() + from dateutil import tz + for val in objects: + item = getattr(val, 'tzinfo', type(val).__name__) + # as tzoffset is not hashable, we use __repr__ in our set + if isinstance(item, tz.tzoffset): + unique_types.add(item.__repr__()) + else: + unique_types.add(item) + + if len(unique_types) == 1: from pandas import DatetimeIndex return DatetimeIndex(objects) seen.object_ = 1 diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index a057ca0879cac..30b2fb86ab7f6 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -18,6 +18,7 @@ assert_frame_equal) import pandas.util.testing as tm from pandas.tests.frame.common import TestData +from dateutil.parser import parse class TestDataFrameApply(TestData): @@ -370,6 +371,7 @@ def test_apply_bug(self): def f(r): return r['market'] + expected = positions.apply(f, axis=1) positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50], @@ -553,6 +555,26 @@ def test_apply_non_numpy_dtype(self): result = df.apply(lambda x: x) assert_frame_equal(result, df) + @pytest.mark.parametrize('time_in', ['22:05 UTC+1', '22:05']) + @pytest.mark.parametrize("test_input", [ + 'string text', + parse('22:05'), + parse('12:13 UTC+1'), + parse('15:56 UTC+2'), + 42, + 3.14159, ]) + def test_gh_19359(self, time_in, test_input): + def transform(x): + return Series({'time': parse(time_in), + 'title': test_input}) + + applied = DataFrame(['stub']).apply(transform) + assert applied is not None + answer = Series(data=[parse(time_in), test_input], + index=['time', 'title']) + answer.name = 0 + tm.assert_series_equal(answer, applied[0]) + class TestInferOutputShape(object): # the user has supplied an opaque UDF where @@ -817,11 +839,10 @@ def zip_frames(*frames): class TestDataFrameAggregate(TestData): + _multiprocess_can_split_ = True def test_agg_transform(self): - with np.errstate(all='ignore'): - f_sqrt = np.sqrt(self.frame) f_abs = np.abs(self.frame) @@ -862,16 +883,19 @@ def test_transform_and_agg_err(self): # cannot both transform and agg def f(): self.frame.transform(['max', 'min']) + pytest.raises(ValueError, f) def f(): with np.errstate(all='ignore'): self.frame.agg(['max', 'sqrt']) + pytest.raises(ValueError, f) def f(): with np.errstate(all='ignore'): self.frame.transform(['max', 'sqrt']) + pytest.raises(ValueError, f) df = pd.DataFrame({'A': range(5), 'B': 5}) @@ -898,7 +922,6 @@ def test_demo(self): tm.assert_frame_equal(result.reindex_like(expected), expected) def test_agg_dict_nested_renaming_depr(self): - df = pd.DataFrame({'A': range(5), 'B': 5}) # nested renaming @@ -941,7 +964,6 @@ def test_agg_reduce(self): assert_frame_equal(result.reindex_like(expected), expected) def test_nuiscance_columns(self): - # GH 15015 df = DataFrame({'A': [1, 2, 3], 'B': [1., 2., 3.], @@ -969,7 +991,6 @@ def test_nuiscance_columns(self): assert_frame_equal(result, expected) def test_non_callable_aggregates(self): - # GH 16405 # 'size' is a property of frame/series # validate that this is working
- [ Y] closes #19359 - [ Y] tests added / passed - [ Y] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ Y] whatsnew entry: bug fix for GH19359
https://api.github.com/repos/pandas-dev/pandas/pulls/19423
2018-01-27T08:23:33Z
2018-10-11T01:54:27Z
null
2019-01-21T00:34:28Z
Remove src/numpy.pxd
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 996ece063b980..bfea4ff9915ac 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -8,10 +8,14 @@ from cpython.slice cimport PySlice_Check import numpy as np cimport numpy as cnp -from numpy cimport (ndarray, float64_t, int32_t, int64_t, uint8_t, uint64_t, - NPY_DATETIME, NPY_TIMEDELTA) +from numpy cimport ndarray, float64_t, int32_t, int64_t, uint8_t, uint64_t cnp.import_array() +cdef extern from "numpy/arrayobject.h": + # These can be cimported directly from numpy in cython>=0.27.3 + cdef enum NPY_TYPES: + NPY_DATETIME + NPY_TIMEDELTA cimport util diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index f14d508a625d0..b29a2e519efcd 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -12,6 +12,22 @@ iNaT = util.get_nat() cdef bint PY2 = sys.version_info[0] == 2 cdef double nan = <double> np.NaN +cdef extern from "numpy/arrayobject.h": + # cython's numpy.dtype specification is incorrect, which leads to + # errors in issubclass(self.dtype.type, np.bool_), so we directly + # include the correct version + # https://github.com/cython/cython/issues/2022 + + ctypedef class numpy.dtype [object PyArray_Descr]: + # Use PyDataType_* macros when possible, however there are no macros + # for accessing some of the fields, so some are defined. Please + # ask on cython-dev if you need more. + cdef int type_num + cdef int itemsize "elsize" + cdef char byteorder + cdef object fields + cdef tuple names + from util cimport UINT8_MAX, UINT64_MAX, INT64_MAX, INT64_MIN # core.common import for fast inference checks @@ -609,13 +625,13 @@ cdef class Validator: cdef: Py_ssize_t n - cnp.dtype dtype + dtype dtype bint skipna def __cinit__( self, Py_ssize_t n, - cnp.dtype dtype=np.dtype(np.object_), + dtype dtype=np.dtype(np.object_), bint skipna=False ): self.n = n @@ -823,7 +839,7 @@ cdef class TemporalValidator(Validator): def __cinit__( self, Py_ssize_t n, - cnp.dtype dtype=np.dtype(np.object_), + dtype dtype=np.dtype(np.object_), bint skipna=False ): self.n = n diff --git a/pandas/_libs/src/numpy.pxd b/pandas/_libs/src/numpy.pxd deleted file mode 100644 index 8ce398ce218a8..0000000000000 --- a/pandas/_libs/src/numpy.pxd +++ /dev/null @@ -1,994 +0,0 @@ -# NumPy static imports for Cython -# -# If any of the PyArray_* functions are called, import_array must be -# called first. -# -# This also defines backwards-compatability buffer acquisition -# code for use in Python 2.x (or Python <= 2.5 when NumPy starts -# implementing PEP-3118 directly). -# -# Because of laziness, the format string of the buffer is statically -# allocated. Increase the size if this is not enough, or submit a -# patch to do this properly. -# -# Author: Dag Sverre Seljebotn -# - -DEF _buffer_format_string_len = 255 - -cimport cpython.buffer as pybuf -from cpython.ref cimport Py_INCREF, Py_XDECREF -from cpython.object cimport PyObject -cimport libc.stdlib as stdlib -cimport libc.stdio as stdio - -cdef extern from "Python.h": - ctypedef int Py_intptr_t - -cdef extern from "numpy/arrayobject.h": - ctypedef Py_intptr_t npy_intp - ctypedef size_t npy_uintp - - cdef enum NPY_TYPES: - NPY_BOOL - NPY_BYTE - NPY_UBYTE - NPY_SHORT - NPY_USHORT - NPY_INT - NPY_UINT - NPY_LONG - NPY_ULONG - NPY_LONGLONG - NPY_ULONGLONG - NPY_FLOAT - NPY_DOUBLE - NPY_LONGDOUBLE - NPY_CFLOAT - NPY_CDOUBLE - NPY_CLONGDOUBLE - NPY_OBJECT - NPY_STRING - NPY_UNICODE - NPY_VOID - NPY_NTYPES - NPY_NOTYPE - - NPY_INT8 - NPY_INT16 - NPY_INT32 - NPY_INT64 - NPY_INT128 - NPY_INT256 - NPY_UINT8 - NPY_UINT16 - NPY_UINT32 - NPY_UINT64 - NPY_UINT128 - NPY_UINT256 - NPY_FLOAT16 - NPY_FLOAT32 - NPY_FLOAT64 - NPY_FLOAT80 - NPY_FLOAT96 - NPY_FLOAT128 - NPY_FLOAT256 - NPY_COMPLEX32 - NPY_COMPLEX64 - NPY_COMPLEX128 - NPY_COMPLEX160 - NPY_COMPLEX192 - NPY_COMPLEX256 - NPY_COMPLEX512 - - NPY_DATETIME - NPY_TIMEDELTA - - NPY_INTP - - ctypedef enum NPY_ORDER: - NPY_ANYORDER - NPY_CORDER - NPY_FORTRANORDER - - ctypedef enum NPY_CLIPMODE: - NPY_CLIP - NPY_WRAP - NPY_RAISE - - ctypedef enum NPY_SCALARKIND: - NPY_NOSCALAR, - NPY_BOOL_SCALAR, - NPY_INTPOS_SCALAR, - NPY_INTNEG_SCALAR, - NPY_FLOAT_SCALAR, - NPY_COMPLEX_SCALAR, - NPY_OBJECT_SCALAR - - ctypedef enum NPY_SORTKIND: - NPY_QUICKSORT - NPY_HEAPSORT - NPY_MERGESORT - - ctypedef enum NPY_SEARCHSIDE: - NPY_SEARCHLEFT - NPY_SEARCHRIGHT - - enum: - NPY_C_CONTIGUOUS - NPY_F_CONTIGUOUS - NPY_CONTIGUOUS - NPY_FORTRAN - NPY_OWNDATA - NPY_FORCECAST - NPY_ENSURECOPY - NPY_ENSUREARRAY - NPY_ELEMENTSTRIDES - NPY_ALIGNED - NPY_NOTSWAPPED - NPY_WRITEABLE - NPY_UPDATEIFCOPY - NPY_ARR_HAS_DESCR - - NPY_BEHAVED - NPY_BEHAVED_NS - NPY_CARRAY - NPY_CARRAY_RO - NPY_FARRAY - NPY_FARRAY_RO - NPY_DEFAULT - - NPY_IN_ARRAY - NPY_OUT_ARRAY - NPY_INOUT_ARRAY - NPY_IN_FARRAY - NPY_OUT_FARRAY - NPY_INOUT_FARRAY - - NPY_UPDATE_ALL - - cdef enum: - NPY_MAXDIMS - - npy_intp NPY_MAX_ELSIZE - - ctypedef void (*PyArray_VectorUnaryFunc)( - void *, void *, npy_intp, void *, void *) - - ctypedef class numpy.dtype [object PyArray_Descr]: - # Use PyDataType_* macros when possible, however there are no macros - # for accessing some of the fields, so some are defined. Please - # ask on cython-dev if you need more. - cdef int type_num - cdef int itemsize "elsize" - cdef char byteorder - cdef object fields - cdef tuple names - - ctypedef extern class numpy.flatiter [object PyArrayIterObject]: - # Use through macros - pass - - ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject]: - # Use through macros - pass - - ctypedef struct PyArrayObject: - # For use in situations where ndarray can't replace PyArrayObject*, - # like PyArrayObject**. - pass - - ctypedef class numpy.ndarray [object PyArrayObject]: - cdef __cythonbufferdefaults__ = {"mode": "strided"} - - cdef: - # Only taking a few of the most commonly used and stable fields. - # One should use PyArray_* macros instead to access the C fields. - char *data - int ndim "nd" - npy_intp *shape "dimensions" - npy_intp *strides - dtype descr - PyObject* base - - # Note: This syntax (function definition in pxd files) is an - # experimental exception made for __getbuffer__ and __releasebuffer__ - # -- the details of this may change. - def __getbuffer__(ndarray self, Py_buffer* info, int flags): - # This implementation of getbuffer is geared towards Cython - # requirements, and does not yet fulfill the PEP. - # In particular strided access is always provided regardless - # of flags - - if info == NULL: return - - cdef int copy_shape, i, ndim - cdef int endian_detector = 1 - cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) - - ndim = PyArray_NDIM(self) - - if sizeof(npy_intp) != sizeof(Py_ssize_t): - copy_shape = 1 - else: - copy_shape = 0 - - if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) - and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): - raise ValueError(u"ndarray is not C contiguous") - - if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) - and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): - raise ValueError(u"ndarray is not Fortran contiguous") - - info.buf = PyArray_DATA(self) - info.ndim = ndim - if copy_shape: - # Allocate new buffer for strides and shape info. - # This is allocated as one block, strides first. - info.strides = <Py_ssize_t*>stdlib.malloc( - sizeof(Py_ssize_t) * <size_t>ndim * 2) - - info.shape = info.strides + ndim - for i in range(ndim): - info.strides[i] = PyArray_STRIDES(self)[i] - info.shape[i] = PyArray_DIMS(self)[i] - else: - info.strides = <Py_ssize_t*>PyArray_STRIDES(self) - info.shape = <Py_ssize_t*>PyArray_DIMS(self) - info.suboffsets = NULL - info.itemsize = PyArray_ITEMSIZE(self) - info.readonly = not PyArray_ISWRITEABLE(self) - - cdef int t - cdef char* f = NULL - cdef dtype descr = self.descr - cdef list stack - cdef int offset - - cdef bint hasfields = PyDataType_HASFIELDS(descr) - - if not hasfields and not copy_shape: - # do not call releasebuffer - info.obj = None - else: - # need to call releasebuffer - info.obj = self - - if not hasfields: - t = descr.type_num - if ((descr.byteorder == '>' and little_endian) or - (descr.byteorder == '<' and not little_endian)): - raise ValueError(u"Non-native byte order not supported") - if t == NPY_BYTE: f = "b" - elif t == NPY_UBYTE: f = "B" - elif t == NPY_SHORT: f = "h" - elif t == NPY_USHORT: f = "H" - elif t == NPY_INT: f = "i" - elif t == NPY_UINT: f = "I" - elif t == NPY_LONG: f = "l" - elif t == NPY_ULONG: f = "L" - elif t == NPY_LONGLONG: f = "q" - elif t == NPY_ULONGLONG: f = "Q" - elif t == NPY_FLOAT: f = "f" - elif t == NPY_DOUBLE: f = "d" - elif t == NPY_LONGDOUBLE: f = "g" - elif t == NPY_CFLOAT: f = "Zf" - elif t == NPY_CDOUBLE: f = "Zd" - elif t == NPY_CLONGDOUBLE: f = "Zg" - elif t == NPY_OBJECT: f = "O" - else: - raise ValueError( - u"unknown dtype code in numpy.pxd (%d)" % t) - info.format = f - return - else: - info.format = <char*>stdlib.malloc(_buffer_format_string_len) - info.format[0] = '^' # Native data types, manual alignment - offset = 0 - f = _util_dtypestring(descr, info.format + 1, - info.format + _buffer_format_string_len, - &offset) - f[0] = 0 # Terminate format string - - def __releasebuffer__(ndarray self, Py_buffer* info): - if PyArray_HASFIELDS(self): - stdlib.free(info.format) - if sizeof(npy_intp) != sizeof(Py_ssize_t): - stdlib.free(info.strides) - # info.shape was stored after info.strides in the same block - - ctypedef signed char npy_bool - - ctypedef signed char npy_byte - ctypedef signed short npy_short - ctypedef signed int npy_int - ctypedef signed long npy_long - ctypedef signed long long npy_longlong - - ctypedef unsigned char npy_ubyte - ctypedef unsigned short npy_ushort - ctypedef unsigned int npy_uint - ctypedef unsigned long npy_ulong - ctypedef unsigned long long npy_ulonglong - - ctypedef float npy_float - ctypedef double npy_double - ctypedef long double npy_longdouble - - ctypedef signed char npy_int8 - ctypedef signed short npy_int16 - ctypedef signed int npy_int32 - ctypedef signed long long npy_int64 - ctypedef signed long long npy_int96 - ctypedef signed long long npy_int128 - - ctypedef unsigned char npy_uint8 - ctypedef unsigned short npy_uint16 - ctypedef unsigned int npy_uint32 - ctypedef unsigned long long npy_uint64 - ctypedef unsigned long long npy_uint96 - ctypedef unsigned long long npy_uint128 - - ctypedef float npy_float16 - ctypedef float npy_float32 - ctypedef double npy_float64 - ctypedef long double npy_float80 - ctypedef long double npy_float96 - ctypedef long double npy_float128 - - ctypedef struct npy_cfloat: - double real - double imag - - ctypedef struct npy_cdouble: - double real - double imag - - ctypedef struct npy_clongdouble: - double real - double imag - - ctypedef struct npy_complex64: - double real - double imag - - ctypedef struct npy_complex128: - double real - double imag - - ctypedef struct npy_complex160: - double real - double imag - - ctypedef struct npy_complex192: - double real - double imag - - ctypedef struct npy_complex256: - double real - double imag - - ctypedef struct PyArray_Dims: - npy_intp *ptr - int len - - void import_array() - - # - # Macros from ndarrayobject.h - # - bint PyArray_CHKFLAGS(ndarray m, int flags) - bint PyArray_ISCONTIGUOUS(ndarray m) - bint PyArray_ISWRITEABLE(ndarray m) - bint PyArray_ISALIGNED(ndarray m) - - int PyArray_NDIM(ndarray) - bint PyArray_ISONESEGMENT(ndarray) - bint PyArray_ISFORTRAN(ndarray) - int PyArray_FORTRANIF(ndarray) - - void* PyArray_DATA(ndarray) - char* PyArray_BYTES(ndarray) - npy_intp* PyArray_DIMS(ndarray) - npy_intp* PyArray_STRIDES(ndarray) - npy_intp PyArray_DIM(ndarray, size_t) - npy_intp PyArray_STRIDE(ndarray, size_t) - - # object PyArray_BASE(ndarray) wrong refcount semantics - # dtype PyArray_DESCR(ndarray) wrong refcount semantics - int PyArray_FLAGS(ndarray) - npy_intp PyArray_ITEMSIZE(ndarray) - int PyArray_TYPE(ndarray arr) - - object PyArray_GETITEM(ndarray arr, void *itemptr) - int PyArray_SETITEM(ndarray arr, void *itemptr, object obj) - - bint PyTypeNum_ISBOOL(int) - bint PyTypeNum_ISUNSIGNED(int) - bint PyTypeNum_ISSIGNED(int) - bint PyTypeNum_ISINTEGER(int) - bint PyTypeNum_ISFLOAT(int) - bint PyTypeNum_ISNUMBER(int) - bint PyTypeNum_ISSTRING(int) - bint PyTypeNum_ISCOMPLEX(int) - bint PyTypeNum_ISPYTHON(int) - bint PyTypeNum_ISFLEXIBLE(int) - bint PyTypeNum_ISUSERDEF(int) - bint PyTypeNum_ISEXTENDED(int) - bint PyTypeNum_ISOBJECT(int) - - bint PyDataType_ISBOOL(dtype) - bint PyDataType_ISUNSIGNED(dtype) - bint PyDataType_ISSIGNED(dtype) - bint PyDataType_ISINTEGER(dtype) - bint PyDataType_ISFLOAT(dtype) - bint PyDataType_ISNUMBER(dtype) - bint PyDataType_ISSTRING(dtype) - bint PyDataType_ISCOMPLEX(dtype) - bint PyDataType_ISPYTHON(dtype) - bint PyDataType_ISFLEXIBLE(dtype) - bint PyDataType_ISUSERDEF(dtype) - bint PyDataType_ISEXTENDED(dtype) - bint PyDataType_ISOBJECT(dtype) - bint PyDataType_HASFIELDS(dtype) - - bint PyArray_ISBOOL(ndarray) - bint PyArray_ISUNSIGNED(ndarray) - bint PyArray_ISSIGNED(ndarray) - bint PyArray_ISINTEGER(ndarray) - bint PyArray_ISFLOAT(ndarray) - bint PyArray_ISNUMBER(ndarray) - bint PyArray_ISSTRING(ndarray) - bint PyArray_ISCOMPLEX(ndarray) - bint PyArray_ISPYTHON(ndarray) - bint PyArray_ISFLEXIBLE(ndarray) - bint PyArray_ISUSERDEF(ndarray) - bint PyArray_ISEXTENDED(ndarray) - bint PyArray_ISOBJECT(ndarray) - bint PyArray_HASFIELDS(ndarray) - - bint PyArray_ISVARIABLE(ndarray) - - bint PyArray_SAFEALIGNEDCOPY(ndarray) - bint PyArray_ISNBO(ndarray) - bint PyArray_IsNativeByteOrder(ndarray) - bint PyArray_ISNOTSWAPPED(ndarray) - bint PyArray_ISBYTESWAPPED(ndarray) - - bint PyArray_FLAGSWAP(ndarray, int) - - bint PyArray_ISCARRAY(ndarray) - bint PyArray_ISCARRAY_RO(ndarray) - bint PyArray_ISFARRAY(ndarray) - bint PyArray_ISFARRAY_RO(ndarray) - bint PyArray_ISBEHAVED(ndarray) - bint PyArray_ISBEHAVED_RO(ndarray) - - bint PyDataType_ISNOTSWAPPED(dtype) - bint PyDataType_ISBYTESWAPPED(dtype) - - bint PyArray_DescrCheck(object) - - bint PyArray_Check(object) - bint PyArray_CheckExact(object) - - # Cannot be supported due to out arg: - # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&) - # bint PyArray_HasArrayInterface(op, out) - - bint PyArray_IsZeroDim(object) - # Cannot be supported due to ## ## in macro: - # bint PyArray_IsScalar(object, verbatim work) - bint PyArray_CheckScalar(object) - bint PyArray_IsPythonNumber(object) - bint PyArray_IsPythonScalar(object) - bint PyArray_IsAnyScalar(object) - bint PyArray_CheckAnyScalar(object) - ndarray PyArray_GETCONTIGUOUS(ndarray) - bint PyArray_SAMESHAPE(ndarray, ndarray) - npy_intp PyArray_SIZE(ndarray) - npy_intp PyArray_NBYTES(ndarray) - - object PyArray_FROM_O(object) - object PyArray_FROM_OF(object m, int flags) - bint PyArray_FROM_OT(object m, int type) - bint PyArray_FROM_OTF(object m, int type, int flags) - object PyArray_FROMANY(object m, int type, int min, int max, int flags) - object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) - object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) - void PyArray_FILLWBYTE(object, int val) - npy_intp PyArray_REFCOUNT(object) - object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) - unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) - bint PyArray_EquivByteorders(int b1, int b2) - object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) - object PyArray_SimpleNewFromData(int nd, npy_intp* dims, - int typenum, void* data) - #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr) - object PyArray_ToScalar(void* data, ndarray arr) - - void* PyArray_GETPTR1(ndarray m, npy_intp i) - void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j) - void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k) - void* PyArray_GETPTR4(ndarray m, npy_intp i, - npy_intp j, npy_intp k, npy_intp l) - - void PyArray_XDECREF_ERR(ndarray) - # Cannot be supported due to out arg - # void PyArray_DESCR_REPLACE(descr) - - object PyArray_Copy(ndarray) - object PyArray_FromObject(object op, int type, - int min_depth, int max_depth) - object PyArray_ContiguousFromObject(object op, int type, - int min_depth, int max_depth) - object PyArray_CopyFromObject(object op, int type, - int min_depth, int max_depth) - - object PyArray_Cast(ndarray mp, int type_num) - object PyArray_Take(ndarray ap, object items, int axis) - object PyArray_Put(ndarray ap, object items, object values) - - void PyArray_ITER_RESET(flatiter it) nogil - void PyArray_ITER_NEXT(flatiter it) nogil - void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil - void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil - void* PyArray_ITER_DATA(flatiter it) nogil - bint PyArray_ITER_NOTDONE(flatiter it) nogil - - void PyArray_MultiIter_RESET(broadcast multi) nogil - void PyArray_MultiIter_NEXT(broadcast multi) nogil - void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil - void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil - void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil - void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil - bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil - - # Functions from __multiarray_api.h - - # Functions taking dtype and returning object/ndarray are disabled - # for now as they steal dtype references. I'm conservative and disable - # more than is probably needed until it can be checked further. - int PyArray_SetNumericOps (object) - object PyArray_GetNumericOps () - int PyArray_INCREF (ndarray) - int PyArray_XDECREF (ndarray) - void PyArray_SetStringFunction (object, int) - dtype PyArray_DescrFromType (int) - object PyArray_TypeObjectFromType (int) - char * PyArray_Zero (ndarray) - char * PyArray_One (ndarray) - #object PyArray_CastToType (ndarray, dtype, int) - int PyArray_CastTo (ndarray, ndarray) - int PyArray_CastAnyTo (ndarray, ndarray) - int PyArray_CanCastSafely (int, int) - npy_bool PyArray_CanCastTo (dtype, dtype) - int PyArray_ObjectType (object, int) - dtype PyArray_DescrFromObject (object, dtype) - #ndarray* PyArray_ConvertToCommonType (object, int *) - dtype PyArray_DescrFromScalar (object) - dtype PyArray_DescrFromTypeObject (object) - npy_intp PyArray_Size (object) - #object PyArray_Scalar (void *, dtype, object) - #object PyArray_FromScalar (object, dtype) - void PyArray_ScalarAsCtype (object, void *) - #int PyArray_CastScalarToCtype (object, void *, dtype) - #int PyArray_CastScalarDirect (object, dtype, void *, int) - object PyArray_ScalarFromObject (object) - #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int) - object PyArray_FromDims (int, int *, int) - #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *) - #object PyArray_FromAny (object, dtype, int, int, int, object) - object PyArray_EnsureArray (object) - object PyArray_EnsureAnyArray (object) - #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *) - #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *) - #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp) - #object PyArray_FromIter (object, dtype, npy_intp) - object PyArray_Return (ndarray) - #object PyArray_GetField (ndarray, dtype, int) - #int PyArray_SetField (ndarray, dtype, int, object) - object PyArray_Byteswap (ndarray, npy_bool) - object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER) - int PyArray_MoveInto (ndarray, ndarray) - int PyArray_CopyInto (ndarray, ndarray) - int PyArray_CopyAnyInto (ndarray, ndarray) - int PyArray_CopyObject (ndarray, object) - object PyArray_NewCopy (ndarray, NPY_ORDER) - object PyArray_ToList (ndarray) - object PyArray_ToString (ndarray, NPY_ORDER) - int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *) - int PyArray_Dump (object, object, int) - object PyArray_Dumps (object, int) - int PyArray_ValidType (int) - void PyArray_UpdateFlags (ndarray, int) - object PyArray_New (type, int, npy_intp *, int, npy_intp *, - void *, int, int, object) - #dtype PyArray_DescrNew (dtype) - dtype PyArray_DescrNewFromType (int) - double PyArray_GetPriority (object, double) - object PyArray_IterNew (object) - object PyArray_MultiIterNew (int, ...) - - int PyArray_PyIntAsInt (object) - npy_intp PyArray_PyIntAsIntp (object) - int PyArray_Broadcast (broadcast) - void PyArray_FillObjectArray (ndarray, object) - int PyArray_FillWithScalar (ndarray, object) - npy_bool PyArray_CheckStrides ( - int, int, npy_intp, npy_intp, npy_intp *, npy_intp *) - dtype PyArray_DescrNewByteorder (dtype, char) - object PyArray_IterAllButAxis (object, int *) - #object PyArray_CheckFromAny (object, dtype, int, int, int, object) - #object PyArray_FromArray (ndarray, dtype, int) - object PyArray_FromInterface (object) - object PyArray_FromStructInterface (object) - #object PyArray_FromArrayAttr (object, dtype, object) - #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*) - int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND) - object PyArray_NewFlagsObject (object) - npy_bool PyArray_CanCastScalar (type, type) - #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t) - int PyArray_RemoveSmallest (broadcast) - int PyArray_ElementStrides (object) - void PyArray_Item_INCREF (char *, dtype) - void PyArray_Item_XDECREF (char *, dtype) - object PyArray_FieldNames (object) - object PyArray_Transpose (ndarray, PyArray_Dims *) - object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE) - object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE) - object PyArray_PutMask (ndarray, object, object) - object PyArray_Repeat (ndarray, object, int) - object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE) - int PyArray_Sort (ndarray, int, NPY_SORTKIND) - object PyArray_ArgSort (ndarray, int, NPY_SORTKIND) - object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE) - object PyArray_ArgMax (ndarray, int, ndarray) - object PyArray_ArgMin (ndarray, int, ndarray) - object PyArray_Reshape (ndarray, object) - object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER) - object PyArray_Squeeze (ndarray) - #object PyArray_View (ndarray, dtype, type) - object PyArray_SwapAxes (ndarray, int, int) - object PyArray_Max (ndarray, int, ndarray) - object PyArray_Min (ndarray, int, ndarray) - object PyArray_Ptp (ndarray, int, ndarray) - object PyArray_Mean (ndarray, int, int, ndarray) - object PyArray_Trace (ndarray, int, int, int, int, ndarray) - object PyArray_Diagonal (ndarray, int, int, int) - object PyArray_Clip (ndarray, object, object, ndarray) - object PyArray_Conjugate (ndarray, ndarray) - object PyArray_Nonzero (ndarray) - object PyArray_Std (ndarray, int, int, ndarray, int) - object PyArray_Sum (ndarray, int, int, ndarray) - object PyArray_CumSum (ndarray, int, int, ndarray) - object PyArray_Prod (ndarray, int, int, ndarray) - object PyArray_CumProd (ndarray, int, int, ndarray) - object PyArray_All (ndarray, int, ndarray) - object PyArray_Any (ndarray, int, ndarray) - object PyArray_Compress (ndarray, object, int, ndarray) - object PyArray_Flatten (ndarray, NPY_ORDER) - object PyArray_Ravel (ndarray, NPY_ORDER) - npy_intp PyArray_MultiplyList (npy_intp *, int) - int PyArray_MultiplyIntList (int *, int) - void * PyArray_GetPtr (ndarray, npy_intp*) - int PyArray_CompareLists (npy_intp *, npy_intp *, int) - #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype) - #int PyArray_As1D (object*, char **, int *, int) - #int PyArray_As2D (object*, char ***, int *, int *, int) - int PyArray_Free (object, void *) - #int PyArray_Converter (object, object*) - int PyArray_IntpFromSequence (object, npy_intp *, int) - object PyArray_Concatenate (object, int) - object PyArray_InnerProduct (object, object) - object PyArray_MatrixProduct (object, object) - object PyArray_CopyAndTranspose (object) - object PyArray_Correlate (object, object, int) - int PyArray_TypestrConvert (int, int) - #int PyArray_DescrConverter (object, dtype*) - #int PyArray_DescrConverter2 (object, dtype*) - int PyArray_IntpConverter (object, PyArray_Dims *) - #int PyArray_BufferConverter (object, chunk) - int PyArray_AxisConverter (object, int *) - int PyArray_BoolConverter (object, npy_bool *) - int PyArray_ByteorderConverter (object, char *) - int PyArray_OrderConverter (object, NPY_ORDER *) - unsigned char PyArray_EquivTypes (dtype, dtype) - #object PyArray_Zeros (int, npy_intp *, dtype, int) - #object PyArray_Empty (int, npy_intp *, dtype, int) - object PyArray_Where (object, object, object) - object PyArray_Arange (double, double, double, int) - #object PyArray_ArangeObj (object, object, object, dtype) - int PyArray_SortkindConverter (object, NPY_SORTKIND *) - object PyArray_LexSort (object, int) - object PyArray_Round (ndarray, int, ndarray) - unsigned char PyArray_EquivTypenums (int, int) - int PyArray_RegisterDataType (dtype) - int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *) - int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND) - #void PyArray_InitArrFuncs (PyArray_ArrFuncs *) - object PyArray_IntTupleFromIntp (int, npy_intp *) - int PyArray_TypeNumFromName (char *) - int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *) - #int PyArray_OutputConverter (object, ndarray*) - object PyArray_BroadcastToShape (object, npy_intp *, int) - void _PyArray_SigintHandler (int) - void* _PyArray_GetSigintBuf () - #int PyArray_DescrAlignConverter (object, dtype*) - #int PyArray_DescrAlignConverter2 (object, dtype*) - int PyArray_SearchsideConverter (object, void *) - object PyArray_CheckAxis (ndarray, int *, int) - npy_intp PyArray_OverflowMultiplyList (npy_intp *, int) - int PyArray_CompareString (char *, char *, size_t) - - -# Typedefs that matches the runtime dtype objects in -# the numpy module. - -# The ones that are commented out needs an IFDEF function -# in Cython to enable them only on the right systems. - -ctypedef npy_int8 int8_t -ctypedef npy_int16 int16_t -ctypedef npy_int32 int32_t -ctypedef npy_int64 int64_t -#ctypedef npy_int96 int96_t -#ctypedef npy_int128 int128_t - -ctypedef npy_uint8 uint8_t -ctypedef npy_uint16 uint16_t -ctypedef npy_uint32 uint32_t -ctypedef npy_uint64 uint64_t -#ctypedef npy_uint96 uint96_t -#ctypedef npy_uint128 uint128_t - -ctypedef npy_float16 float16_t -ctypedef npy_float32 float32_t -ctypedef npy_float64 float64_t -#ctypedef npy_float80 float80_t -#ctypedef npy_float128 float128_t - -ctypedef float complex complex64_t -ctypedef double complex complex128_t - -# The int types are mapped a bit surprising -- -# numpy.int corresponds to 'l' and numpy.long to 'q' -ctypedef npy_long int_t -ctypedef npy_longlong long_t -ctypedef npy_longlong longlong_t - -ctypedef npy_ulong uint_t -ctypedef npy_ulonglong ulong_t -ctypedef npy_ulonglong ulonglong_t - -ctypedef npy_intp intp_t -ctypedef npy_uintp uintp_t - -ctypedef npy_double float_t -ctypedef npy_double double_t -ctypedef npy_longdouble longdouble_t - -ctypedef npy_cfloat cfloat_t -ctypedef npy_cdouble cdouble_t -ctypedef npy_clongdouble clongdouble_t - -ctypedef npy_cdouble complex_t - -cdef inline object PyArray_MultiIterNew1(a): - return PyArray_MultiIterNew(1, <void*>a) - -cdef inline object PyArray_MultiIterNew2(a, b): - return PyArray_MultiIterNew(2, <void*>a, <void*>b) - -cdef inline object PyArray_MultiIterNew3(a, b, c): - return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) - -cdef inline object PyArray_MultiIterNew4(a, b, c, d): - return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) - -cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): - return PyArray_MultiIterNew(5, <void*>a, <void*>b, - <void*>c, <void*> d, <void*> e) - -cdef inline char* _util_dtypestring(dtype descr, char* f, - char* end, int* offset) except NULL: - # Recursive utility function used in __getbuffer__ to get format - # string. The new location in the format string is returned. - - cdef dtype child - cdef int delta_offset - cdef tuple i - cdef int endian_detector = 1 - cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) - cdef tuple fields - - for childname in descr.names: - fields = descr.fields[childname] - child, new_offset = fields - - if (end - f) - (new_offset - offset[0]) < 15: - raise RuntimeError( - u"Format string allocated too short, see comment in numpy.pxd") - - if ((child.byteorder == '>' and little_endian) or - (child.byteorder == '<' and not little_endian)): - raise ValueError(u"Non-native byte order not supported") - # One could encode it in the format string and have Cython - # complain instead, BUT: < and > in format strings also imply - # standardized sizes for datatypes, and we rely on native in - # order to avoid reencoding data types based on their size. - # - # A proper PEP 3118 exporter for other clients than Cython - # must deal properly with this! - - # Output padding bytes - while offset[0] < new_offset: - f[0] = 120 # "x"; pad byte - f += 1 - offset[0] += 1 - - offset[0] += child.itemsize - - if not PyDataType_HASFIELDS(child): - t = child.type_num - if end - f < 5: - raise RuntimeError(u"Format string allocated too short.") - - # Until ticket #99 is fixed, use integers to avoid warnings - if t == NPY_BYTE: f[0] = 98 #"b" - elif t == NPY_UBYTE: f[0] = 66 #"B" - elif t == NPY_SHORT: f[0] = 104 #"h" - elif t == NPY_USHORT: f[0] = 72 #"H" - elif t == NPY_INT: f[0] = 105 #"i" - elif t == NPY_UINT: f[0] = 73 #"I" - elif t == NPY_LONG: f[0] = 108 #"l" - elif t == NPY_ULONG: f[0] = 76 #"L" - elif t == NPY_LONGLONG: f[0] = 113 #"q" - elif t == NPY_ULONGLONG: f[0] = 81 #"Q" - elif t == NPY_FLOAT: f[0] = 102 #"f" - elif t == NPY_DOUBLE: f[0] = 100 #"d" - elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" - elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf - elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd - elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg - elif t == NPY_OBJECT: f[0] = 79 #"O" - else: - raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) - f += 1 - else: - # Cython ignores struct boundary information ("T{...}"), - # so don't output it - f = _util_dtypestring(child, f, end, offset) - return f - - -# -# ufunc API -# - -cdef extern from "numpy/ufuncobject.h": - - ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, - npy_intp *, void *) - - ctypedef extern class numpy.ufunc [object PyUFuncObject]: - cdef: - int nin, nout, nargs - int identity - PyUFuncGenericFunction *functions - void **data - int ntypes - int check_return - char *name - char *types - char *doc - void *ptr - PyObject *obj - PyObject *userloops - - cdef enum: - PyUFunc_Zero - PyUFunc_One - PyUFunc_None - UFUNC_ERR_IGNORE - UFUNC_ERR_WARN - UFUNC_ERR_RAISE - UFUNC_ERR_CALL - UFUNC_ERR_PRINT - UFUNC_ERR_LOG - UFUNC_MASK_DIVIDEBYZERO - UFUNC_MASK_OVERFLOW - UFUNC_MASK_UNDERFLOW - UFUNC_MASK_INVALID - UFUNC_SHIFT_DIVIDEBYZERO - UFUNC_SHIFT_OVERFLOW - UFUNC_SHIFT_UNDERFLOW - UFUNC_SHIFT_INVALID - UFUNC_FPE_DIVIDEBYZERO - UFUNC_FPE_OVERFLOW - UFUNC_FPE_UNDERFLOW - UFUNC_FPE_INVALID - UFUNC_ERR_DEFAULT - UFUNC_ERR_DEFAULT2 - - object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *, - void **, char *, int, int, int, int, char *, char *, int) - int PyUFunc_RegisterLoopForType(ufunc, int, - PyUFuncGenericFunction, int *, void *) - int PyUFunc_GenericFunction \ - (ufunc, PyObject *, PyObject *, PyArrayObject **) - void PyUFunc_f_f_As_d_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_d_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_f_f \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_g_g \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_F_F_As_D_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_F_F \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_D_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_G_G \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_O_O \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_ff_f_As_dd_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_ff_f \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_dd_d \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_gg_g \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_FF_F_As_DD_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_DD_D \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_FF_F \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_GG_G \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_OO_O \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_O_O_method \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_OO_O_method \ - (char **, npy_intp *, npy_intp *, void *) - void PyUFunc_On_Om \ - (char **, npy_intp *, npy_intp *, void *) - int PyUFunc_GetPyValues \ - (char *, int *, int *, PyObject **) - int PyUFunc_checkfperr \ - (int, PyObject *, int *) - void PyUFunc_clearfperr() - int PyUFunc_getfperr() - int PyUFunc_handlefperr \ - (int, PyObject *, int, int *) - int PyUFunc_ReplaceLoopBySignature \ - (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *) - object PyUFunc_FromFuncAndDataAndSignature \ - (PyUFuncGenericFunction *, void **, char *, int, int, int, - int, char *, char *, int, char *) - - void import_ufunc() - - -cdef inline void set_array_base(ndarray arr, object base): - cdef PyObject* baseptr - if base is None: - baseptr = NULL - else: - Py_INCREF(base) # important to do this before decref below! - baseptr = <PyObject*>base - Py_XDECREF(arr.base) - arr.base = baseptr - -cdef inline object get_array_base(ndarray arr): - if arr.base is NULL: - return None - else: - return <object>arr.base
The existence of the src/numpy.pxd file causes ambiguity as to where cimports are coming from. src/numpy.pxd? cython's numpy/__init__.pxd? What about the np.get_include() uses in setup.py? This PR gets rid of the unnecessary file by picking out the one definition that is actually needed.
https://api.github.com/repos/pandas-dev/pandas/pulls/19418
2018-01-27T00:29:53Z
2018-01-27T16:33:25Z
2018-01-27T16:33:25Z
2018-01-27T18:29:05Z
Remove unused files from src/klib
diff --git a/pandas/_libs/src/klib/ktypes.h b/pandas/_libs/src/klib/ktypes.h deleted file mode 100644 index 981f17372a2d5..0000000000000 --- a/pandas/_libs/src/klib/ktypes.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __KTYPES_H -#define __KTYPES_H - -/* compipler specific configuration */ - -#endif /* __KTYPES_H */ diff --git a/pandas/_libs/src/klib/kvec.h b/pandas/_libs/src/klib/kvec.h deleted file mode 100644 index c5e6e6c407dfc..0000000000000 --- a/pandas/_libs/src/klib/kvec.h +++ /dev/null @@ -1,151 +0,0 @@ -/* The MIT License - - Copyright (c) 2008, by Attractive Chaos <attractor@live.co.uk> - - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: - - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE. -*/ - -/* - An example: - -#include "kvec.h" -int main() { - kvec_t(int) array; - kv_init(array); - kv_push(int, array, 10); // append - kv_a(int, array, 20) = 5; // dynamic - kv_A(array, 20) = 4; // static - kv_destroy(array); - return 0; -} -*/ - -/* - 2008-09-22 (0.1.0): - - * The initial version. - -*/ - -#ifndef AC_KVEC_H -#define AC_KVEC_H - -#include <stdlib.h> -#include <Python.h> -#include <numpy/ndarraytypes.h> - -#ifndef PANDAS_INLINE - #if defined(__GNUC__) - #define PANDAS_INLINE static __inline__ - #elif defined(_MSC_VER) - #define PANDAS_INLINE static __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define PANDAS_INLINE static inline - #else - #define PANDAS_INLINE - #endif -#endif - -#define kv_roundup32(x) (--(x), (x)|=(x)>>1, (x)|=(x)>>2, (x)|=(x)>>4, (x)|=(x)>>8, (x)|=(x)>>16, ++(x)) - -#define kvec_t(type) struct { size_t n, m; type *a; } -#define kv_init(v) ((v).n = (v).m = 0, (v).a = 0) -#define kv_destroy(v) free((v).a) -#define kv_A(v, i) ((v).a[(i)]) -#define kv_pop(v) ((v).a[--(v).n]) -#define kv_size(v) ((v).n) -#define kv_max(v) ((v).m) - -#define kv_resize(type, v, s) ((v).m = (s), (v).a = (type*)realloc((v).a, sizeof(type) * (v).m)) - -#define kv_copy(type, v1, v0) do { \ - if ((v1).m < (v0).n) kv_resize(type, v1, (v0).n); \ - (v1).n = (v0).n; \ - memcpy((v1).a, (v0).a, sizeof(type) * (v0).n); \ - } while (0) \ - -#define kv_push(type, v, x) do { \ - if ((v)->n == (v)->m) { \ - (v)->m = (v)->m? (v)->m<<1 : 2; \ - (v)->a = (type*)realloc((v)->a, sizeof(type) * (v)->m); \ - } \ - (v)->a[(v)->n++] = (x); \ - } while (0) - -#define kv_pushp(type, v) (((v).n == (v).m)? \ - ((v).m = ((v).m? (v).m<<1 : 2), \ - (v).a = (type*)realloc((v).a, sizeof(type) * (v).m), 0) \ - : 0), ((v).a + ((v).n++)) - -#define kv_a(type, v, i) ((v).m <= (size_t)(i)? \ - ((v).m = (v).n = (i) + 1, kv_roundup32((v).m), \ - (v).a = (type*)realloc((v).a, sizeof(type) * (v).m), 0) \ - : (v).n <= (size_t)(i)? (v).n = (i) \ - : 0), (v).a[(i)] - -// #define kv_int64_push(v, x) (kv_push(int64_t, (v), (x))) - -typedef struct { - size_t n, m; - int64_t* a; -} kv_int64_t; - -typedef struct { - size_t n, m; - double* a; -} kv_double; - -typedef struct { - size_t n, m; - PyObject** a; -} kv_object_t; - -void PANDAS_INLINE kv_object_push(kv_object_t *v, PyObject *x) { - do { - if (v->n == v->m) { - v->m = v->m? v->m<<1 : 2; - v->a = (PyObject**)realloc(v->a, sizeof(PyObject*) * v->m); - } - v->a[v->n++] = x; - } while (0); - // kv_push(PyObject*, v, x); - Py_INCREF(x); -} - -void PANDAS_INLINE kv_int64_push(kv_int64_t *v, int64_t x) { - kv_push(int64_t, v, x); -} - -void PANDAS_INLINE kv_double_push(kv_double *v, double x) { - kv_push(double, v, x); -} - -void PANDAS_INLINE kv_object_destroy(kv_object_t *v) { - int i; - for (i = 0; i < v->n; ++i) - { - Py_XDECREF(v->a[i]); - } - free(v->a); -} - - -#endif diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 61e3752a49639..e7f334b267461 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -44,7 +44,6 @@ Numeric decoder derived from from TCL library #include <numpy/arrayobject.h> // NOLINT(build/include_order) #include <numpy/arrayscalars.h> // NOLINT(build/include_order) #include <numpy/npy_math.h> // NOLINT(build/include_order) -#include <numpy_helper.h> // NOLINT(build/include_order) #include <stdio.h> // NOLINT(build/include_order) #include <ultrajson.h> // NOLINT(build/include_order) #include <np_datetime.h> // NOLINT(build/include_order) @@ -60,6 +59,8 @@ static PyTypeObject *cls_series; static PyTypeObject *cls_index; static PyTypeObject *cls_nat; +npy_int64 get_nat(void) { return NPY_MIN_INT64; } + typedef void *(*PFN_PyTypeToJSON)(JSOBJ obj, JSONTypeContext *ti, void *outValue, size_t *_outLen); diff --git a/setup.py b/setup.py index 7ade1544ec5cd..859d50303ecb1 100755 --- a/setup.py +++ b/setup.py @@ -694,10 +694,9 @@ def pxd(name): 'pandas/_libs/src/ujson/lib/ultrajsonenc.c', 'pandas/_libs/src/ujson/lib/ultrajsondec.c'] + np_datetime_sources), - include_dirs=(['pandas/_libs/src/ujson/python', - 'pandas/_libs/src/ujson/lib', - 'pandas/_libs/src/datetime'] + - common_include), + include_dirs=['pandas/_libs/src/ujson/python', + 'pandas/_libs/src/ujson/lib', + 'pandas/_libs/src/datetime'], extra_compile_args=(['-D_GNU_SOURCE'] + extra_compile_args))
De-couple ujson from numpy_helper
https://api.github.com/repos/pandas-dev/pandas/pulls/19415
2018-01-26T18:45:41Z
2018-01-27T01:06:43Z
2018-01-27T01:06:43Z
2018-01-31T06:49:26Z
Refactor out libwriters, fix references to Timestamp, Timedelta
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index c3a654b01022c..e1d59f807a7fd 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -21,14 +21,7 @@ from cpython cimport (Py_INCREF, PyTuple_SET_ITEM, PyBytes_Check, PyUnicode_Check, PyTuple_New, - PyObject_RichCompareBool, - PyBytes_GET_SIZE, - PyUnicode_GET_SIZE) - -try: - from cpython cimport PyString_GET_SIZE -except ImportError: - from cpython cimport PyUnicode_GET_SIZE as PyString_GET_SIZE + PyObject_RichCompareBool) cimport cpython @@ -38,7 +31,7 @@ from cpython.datetime cimport (PyDateTime_Check, PyDate_Check, PyDateTime_IMPORT) PyDateTime_IMPORT -from tslib import NaT, Timestamp, Timedelta, array_to_datetime +from tslib import NaT, array_to_datetime from missing cimport checknull @@ -127,28 +120,6 @@ def item_from_zerodim(object val): return util.unbox_if_zerodim(val) -@cython.wraparound(False) -@cython.boundscheck(False) -def fast_unique(ndarray[object] values): - cdef: - Py_ssize_t i, n = len(values) - list uniques = [] - dict table = {} - object val, stub = 0 - - for i from 0 <= i < n: - val = values[i] - if val not in table: - table[val] = stub - uniques.append(val) - try: - uniques.sort() - except Exception: - pass - - return uniques - - @cython.wraparound(False) @cython.boundscheck(False) def fast_unique_multiple(list arrays): @@ -368,30 +339,6 @@ def has_infs_f8(ndarray[float64_t] arr): return False -def convert_timestamps(ndarray values): - cdef: - object val, f, result - dict cache = {} - Py_ssize_t i, n = len(values) - ndarray[object] out - - # for HDFStore, a bit temporary but... - - from datetime import datetime - f = datetime.fromtimestamp - - out = np.empty(n, dtype='O') - - for i in range(n): - val = util.get_value_1d(values, i) - if val in cache: - out[i] = cache[val] - else: - cache[val] = out[i] = f(val) - - return out - - def maybe_indices_to_slice(ndarray[int64_t] indices, int max_len): cdef: Py_ssize_t i, n = len(indices) @@ -731,145 +678,6 @@ def clean_index_list(list obj): return np.asarray(obj), 0 -ctypedef fused pandas_string: - str - unicode - bytes - - -@cython.boundscheck(False) -@cython.wraparound(False) -cpdef Py_ssize_t max_len_string_array(pandas_string[:] arr): - """ return the maximum size of elements in a 1-dim string array """ - cdef: - Py_ssize_t i, m = 0, l = 0, length = arr.shape[0] - pandas_string v - - for i in range(length): - v = arr[i] - if PyString_Check(v): - l = PyString_GET_SIZE(v) - elif PyBytes_Check(v): - l = PyBytes_GET_SIZE(v) - elif PyUnicode_Check(v): - l = PyUnicode_GET_SIZE(v) - - if l > m: - m = l - - return m - - -@cython.boundscheck(False) -@cython.wraparound(False) -def string_array_replace_from_nan_rep( - ndarray[object, ndim=1] arr, object nan_rep, - object replace=None): - """ - Replace the values in the array with 'replacement' if - they are 'nan_rep'. Return the same array. - """ - - cdef int length = arr.shape[0], i = 0 - if replace is None: - replace = np.nan - - for i from 0 <= i < length: - if arr[i] == nan_rep: - arr[i] = replace - - return arr - - -@cython.boundscheck(False) -@cython.wraparound(False) -def convert_json_to_lines(object arr): - """ - replace comma separated json with line feeds, paying special attention - to quotes & brackets - """ - cdef: - Py_ssize_t i = 0, num_open_brackets_seen = 0, length - bint in_quotes = 0, is_escaping = 0 - ndarray[uint8_t] narr - unsigned char v, comma, left_bracket, right_brack, newline - - newline = ord('\n') - comma = ord(',') - left_bracket = ord('{') - right_bracket = ord('}') - quote = ord('"') - backslash = ord('\\') - - narr = np.frombuffer(arr.encode('utf-8'), dtype='u1').copy() - length = narr.shape[0] - for i in range(length): - v = narr[i] - if v == quote and i > 0 and not is_escaping: - in_quotes = ~in_quotes - if v == backslash or is_escaping: - is_escaping = ~is_escaping - if v == comma: # commas that should be \n - if num_open_brackets_seen == 0 and not in_quotes: - narr[i] = newline - elif v == left_bracket: - if not in_quotes: - num_open_brackets_seen += 1 - elif v == right_bracket: - if not in_quotes: - num_open_brackets_seen -= 1 - - return narr.tostring().decode('utf-8') - - -@cython.boundscheck(False) -@cython.wraparound(False) -def write_csv_rows(list data, ndarray data_index, - int nlevels, ndarray cols, object writer): - - cdef int N, j, i, ncols - cdef list rows - cdef object val - - # In crude testing, N>100 yields little marginal improvement - N=100 - - # pre-allocate rows - ncols = len(cols) - rows = [[None] * (nlevels + ncols) for x in range(N)] - - j = -1 - if nlevels == 1: - for j in range(len(data_index)): - row = rows[j % N] - row[0] = data_index[j] - for i in range(ncols): - row[1 + i] = data[i][j] - - if j >= N - 1 and j % N == N - 1: - writer.writerows(rows) - elif nlevels > 1: - for j in range(len(data_index)): - row = rows[j % N] - row[:nlevels] = list(data_index[j]) - for i in range(ncols): - row[nlevels + i] = data[i][j] - - if j >= N - 1 and j % N == N - 1: - writer.writerows(rows) - else: - for j in range(len(data_index)): - row = rows[j % N] - for i in range(ncols): - row[i] = data[i][j] - - if j >= N - 1 and j % N == N - 1: - writer.writerows(rows) - - if j >= 0 and (j < N - 1 or (j % N) != N - 1): - writer.writerows(rows[:((j + 1) % N)]) - - # ------------------------------------------------------------------------------ # Groupby-related functions diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index efe61716d0831..89d2de6de213a 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -2225,3 +2225,37 @@ def _maybe_encode(values): if values is None: return [] return [x.encode('utf-8') if isinstance(x, unicode) else x for x in values] + + +def sanitize_objects(ndarray[object] values, set na_values, + convert_empty=True): + """ + Convert specified values, including the given set na_values and empty + strings if convert_empty is True, to np.nan. + + Parameters + ---------- + values : ndarray[object] + na_values : set + convert_empty : bool (default True) + """ + cdef: + Py_ssize_t i, n + object val, onan + Py_ssize_t na_count = 0 + dict memo = {} + + n = len(values) + onan = np.nan + + for i from 0 <= i < n: + val = values[i] + if (convert_empty and val == '') or (val in na_values): + values[i] = onan + na_count += 1 + elif val in memo: + values[i] = memo[val] + else: + memo[val] = val + + return na_count diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index b29a2e519efcd..75bff34e4a391 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -6,7 +6,7 @@ from tslibs.nattype import NaT from tslibs.conversion cimport convert_to_tsobject from tslibs.timedeltas cimport convert_to_timedelta64 from tslibs.timezones cimport get_timezone, tz_compare -from datetime import datetime, timedelta + iNaT = util.get_nat() cdef bint PY2 = sys.version_info[0] == 2 @@ -1405,30 +1405,6 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, return objects -def sanitize_objects(ndarray[object] values, set na_values, - convert_empty=True): - cdef: - Py_ssize_t i, n - object val, onan - Py_ssize_t na_count = 0 - dict memo = {} - - n = len(values) - onan = np.nan - - for i from 0 <= i < n: - val = values[i] - if (convert_empty and val == '') or (val in na_values): - values[i] = onan - na_count += 1 - elif val in memo: - values[i] = memo[val] - else: - memo[val] = val - - return na_count - - def maybe_convert_bool(ndarray[object] arr, true_values=None, false_values=None): cdef: diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx new file mode 100644 index 0000000000000..6f07d04b3fad3 --- /dev/null +++ b/pandas/_libs/writers.pyx @@ -0,0 +1,174 @@ +# -*- coding: utf-8 -*- + +cimport cython +from cython cimport Py_ssize_t + +from cpython cimport (PyString_Check, PyBytes_Check, PyUnicode_Check, + PyBytes_GET_SIZE, PyUnicode_GET_SIZE) + +try: + from cpython cimport PyString_GET_SIZE +except ImportError: + from cpython cimport PyUnicode_GET_SIZE as PyString_GET_SIZE + +import numpy as np +cimport numpy as cnp +from numpy cimport ndarray, uint8_t +cnp.import_array() + +cimport util + + +ctypedef fused pandas_string: + str + unicode + bytes + + +@cython.boundscheck(False) +@cython.wraparound(False) +def write_csv_rows(list data, ndarray data_index, + int nlevels, ndarray cols, object writer): + """ + Write the given data to the writer object, pre-allocating where possible + for performance improvements. + + Parameters + ---------- + data : list + data_index : ndarray + nlevels : int + cols : ndarray + writer : object + """ + cdef int N, j, i, ncols + cdef list rows + cdef object val + + # In crude testing, N>100 yields little marginal improvement + N = 100 + + # pre-allocate rows + ncols = len(cols) + rows = [[None] * (nlevels + ncols) for x in range(N)] + + j = -1 + if nlevels == 1: + for j in range(len(data_index)): + row = rows[j % N] + row[0] = data_index[j] + for i in range(ncols): + row[1 + i] = data[i][j] + + if j >= N - 1 and j % N == N - 1: + writer.writerows(rows) + elif nlevels > 1: + for j in range(len(data_index)): + row = rows[j % N] + row[:nlevels] = list(data_index[j]) + for i in range(ncols): + row[nlevels + i] = data[i][j] + + if j >= N - 1 and j % N == N - 1: + writer.writerows(rows) + else: + for j in range(len(data_index)): + row = rows[j % N] + for i in range(ncols): + row[i] = data[i][j] + + if j >= N - 1 and j % N == N - 1: + writer.writerows(rows) + + if j >= 0 and (j < N - 1 or (j % N) != N - 1): + writer.writerows(rows[:((j + 1) % N)]) + + +@cython.boundscheck(False) +@cython.wraparound(False) +def convert_json_to_lines(object arr): + """ + replace comma separated json with line feeds, paying special attention + to quotes & brackets + """ + cdef: + Py_ssize_t i = 0, num_open_brackets_seen = 0, length + bint in_quotes = 0, is_escaping = 0 + ndarray[uint8_t] narr + unsigned char v, comma, left_bracket, right_brack, newline + + newline = ord('\n') + comma = ord(',') + left_bracket = ord('{') + right_bracket = ord('}') + quote = ord('"') + backslash = ord('\\') + + narr = np.frombuffer(arr.encode('utf-8'), dtype='u1').copy() + length = narr.shape[0] + for i in range(length): + v = narr[i] + if v == quote and i > 0 and not is_escaping: + in_quotes = ~in_quotes + if v == backslash or is_escaping: + is_escaping = ~is_escaping + if v == comma: # commas that should be \n + if num_open_brackets_seen == 0 and not in_quotes: + narr[i] = newline + elif v == left_bracket: + if not in_quotes: + num_open_brackets_seen += 1 + elif v == right_bracket: + if not in_quotes: + num_open_brackets_seen -= 1 + + return narr.tostring().decode('utf-8') + + +# stata, pytables +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef Py_ssize_t max_len_string_array(pandas_string[:] arr): + """ return the maximum size of elements in a 1-dim string array """ + cdef: + Py_ssize_t i, m = 0, l = 0, length = arr.shape[0] + pandas_string v + + for i in range(length): + v = arr[i] + if PyString_Check(v): + l = PyString_GET_SIZE(v) + elif PyBytes_Check(v): + l = PyBytes_GET_SIZE(v) + elif PyUnicode_Check(v): + l = PyUnicode_GET_SIZE(v) + + if l > m: + m = l + + return m + + +# ------------------------------------------------------------------ +# PyTables Helpers + + +@cython.boundscheck(False) +@cython.wraparound(False) +def string_array_replace_from_nan_rep( + ndarray[object, ndim=1] arr, object nan_rep, + object replace=None): + """ + Replace the values in the array with 'replacement' if + they are 'nan_rep'. Return the same array. + """ + + cdef int length = arr.shape[0], i = 0 + if replace is None: + replace = np.nan + + for i from 0 <= i < length: + if arr[i] == nan_rep: + arr[i] = replace + + return arr diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py index 6a298f5137eb1..c3128be0f5599 100644 --- a/pandas/core/computation/scope.py +++ b/pandas/core/computation/scope.py @@ -48,7 +48,7 @@ def _raw_hex_id(obj): _DEFAULT_GLOBALS = { - 'Timestamp': pandas._libs.lib.Timestamp, + 'Timestamp': pandas._libs.tslib.Timestamp, 'datetime': datetime.datetime, 'True': True, 'False': False, diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 5155662d2f97d..b2816343fc8eb 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -282,7 +282,7 @@ def maybe_promote(dtype, fill_value=np.nan): fill_value = iNaT elif issubclass(dtype.type, np.timedelta64): try: - fill_value = lib.Timedelta(fill_value).value + fill_value = tslib.Timedelta(fill_value).value except Exception: # as for datetimes, cannot upcast to object fill_value = iNaT diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6e777281b11e1..aaa4ae4773108 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -10,7 +10,7 @@ import numpy as np import pandas as pd -from pandas._libs import tslib, lib, properties +from pandas._libs import tslib, properties from pandas.core.dtypes.common import ( _ensure_int64, _ensure_object, @@ -7216,9 +7216,9 @@ def describe_categorical_1d(data): if is_datetime64_dtype(data): asint = data.dropna().values.view('i8') names += ['top', 'freq', 'first', 'last'] - result += [lib.Timestamp(top), freq, - lib.Timestamp(asint.min()), - lib.Timestamp(asint.max())] + result += [tslib.Timestamp(top), freq, + tslib.Timestamp(asint.min()), + tslib.Timestamp(asint.max())] else: names += ['top', 'freq'] result += [top, freq] diff --git a/pandas/core/internals.py b/pandas/core/internals.py index f3e5e4c99a899..22d38d3df071e 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2656,7 +2656,7 @@ def _try_coerce_args(self, values, other): other = other.asi8 other_mask = isna(other) elif isinstance(other, (np.datetime64, datetime, date)): - other = lib.Timestamp(other) + other = tslib.Timestamp(other) tz = getattr(other, 'tz', None) # test we can have an equal time zone @@ -2675,7 +2675,7 @@ def _try_coerce_result(self, result): if result.dtype.kind in ['i', 'f', 'O']: result = result.astype('M8[ns]') elif isinstance(result, (np.integer, np.float, np.datetime64)): - result = lib.Timestamp(result, tz=self.values.tz) + result = tslib.Timestamp(result, tz=self.values.tz) if isinstance(result, np.ndarray): # allow passing of > 1dim if its trivial if result.ndim > 1: diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index eda86f12d501d..d4851f579dda4 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -268,7 +268,7 @@ def _wrap_results(result, dtype): if is_datetime64_dtype(dtype): if not isinstance(result, np.ndarray): - result = lib.Timestamp(result) + result = tslib.Timestamp(result) else: result = result.view(dtype) elif is_timedelta64_dtype(dtype): @@ -278,7 +278,7 @@ def _wrap_results(result, dtype): if np.fabs(result) > _int64_max: raise ValueError("overflow in timedelta operation") - result = lib.Timedelta(result, unit='ns') + result = tslib.Timedelta(result, unit='ns') else: result = result.astype('i8').view(dtype) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 706bec9e44892..961c8c004e9e3 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -24,7 +24,7 @@ from pandas.compat.numpy import function as nv from pandas._libs import lib, tslib -from pandas._libs.lib import Timestamp +from pandas._libs.tslib import Timestamp from pandas._libs.tslibs.period import IncompatibleFrequency from pandas.util._decorators import Appender, Substitution diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index bca0b64cb53fe..269c81b380b5e 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -38,7 +38,7 @@ _stringify_path) from pandas.io.formats.printing import adjoin, justify, pprint_thing from pandas.io.formats.common import get_level_lengths -from pandas._libs import lib +from pandas._libs import lib, writers as libwriters from pandas._libs.tslib import (iNaT, Timestamp, Timedelta, format_array_from_datetime) from pandas.core.indexes.datetimes import DatetimeIndex @@ -1789,7 +1789,8 @@ def _save_chunk(self, start_i, end_i): date_format=self.date_format, quoting=self.quoting) - lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer) + libwriters.write_csv_rows(self.data, ix, self.nlevels, + self.cols, self.writer) # ---------------------------------------------------------------------- diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py index 595031b04e367..c7901f4352d00 100644 --- a/pandas/io/json/normalize.py +++ b/pandas/io/json/normalize.py @@ -5,7 +5,7 @@ from collections import defaultdict import numpy as np -from pandas._libs.lib import convert_json_to_lines +from pandas._libs.writers import convert_json_to_lines from pandas import compat, DataFrame diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 5135bb01fb378..af1441f4a0fc9 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -1596,11 +1596,12 @@ def _infer_types(self, values, na_values, try_num_bool=True): except Exception: result = values if values.dtype == np.object_: - na_count = lib.sanitize_objects(result, na_values, False) + na_count = parsers.sanitize_objects(result, na_values, + False) else: result = values if values.dtype == np.object_: - na_count = lib.sanitize_objects(values, na_values, False) + na_count = parsers.sanitize_objects(values, na_values, False) if result.dtype == np.object_ and try_num_bool: result = lib.maybe_convert_bool(values, diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 5376473f83f22..0d833807602e1 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -47,7 +47,7 @@ from pandas.core.config import get_option from pandas.core.computation.pytables import Expr, maybe_expression -from pandas._libs import algos, lib +from pandas._libs import algos, lib, writers as libwriters from pandas._libs.tslibs import timezones from distutils.version import LooseVersion @@ -3843,7 +3843,7 @@ def read(self, where=None, columns=None, **kwargs): # need a better algorithm tuple_index = long_index.values - unique_tuples = lib.fast_unique(tuple_index) + unique_tuples = unique(tuple_index) unique_tuples = com._asarray_tuplesafe(unique_tuples) indexer = match(unique_tuples, tuple_index) @@ -4561,7 +4561,8 @@ def _convert_string_array(data, encoding, itemsize=None): # create the sized dtype if itemsize is None: - itemsize = lib.max_len_string_array(_ensure_object(data.ravel())) + ensured = _ensure_object(data.ravel()) + itemsize = libwriters.max_len_string_array(ensured) data = np.asarray(data, dtype="S%d" % itemsize) return data @@ -4590,7 +4591,7 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None): encoding = _ensure_encoding(encoding) if encoding is not None and len(data): - itemsize = lib.max_len_string_array(_ensure_object(data)) + itemsize = libwriters.max_len_string_array(_ensure_object(data)) if compat.PY3: dtype = "U{0}".format(itemsize) else: @@ -4604,7 +4605,7 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None): if nan_rep is None: nan_rep = 'nan' - data = lib.string_array_replace_from_nan_rep(data, nan_rep) + data = libwriters.string_array_replace_from_nan_rep(data, nan_rep) return data.reshape(shape) @@ -4621,7 +4622,7 @@ def _get_converter(kind, encoding): if kind == 'datetime64': return lambda x: np.asarray(x, dtype='M8[ns]') elif kind == 'datetime': - return lib.convert_timestamps + return lambda x: to_datetime(x, cache=True).to_pydatetime() elif kind == 'string': return lambda x: _unconvert_string_array(x, encoding=encoding) else: # pragma: no cover diff --git a/pandas/io/stata.py b/pandas/io/stata.py index b409cf20e9a09..16665e19985f1 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -16,8 +16,9 @@ import numpy as np from dateutil.relativedelta import relativedelta -from pandas._libs.lib import max_len_string_array, infer_dtype +from pandas._libs.lib import infer_dtype from pandas._libs.tslib import NaT, Timestamp +from pandas._libs.writers import max_len_string_array import pandas as pd from pandas import compat, to_timedelta, to_datetime, isna, DatetimeIndex diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 66ee7fa98491f..07163615c6ba4 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -23,7 +23,7 @@ from pandas.compat import lrange import pandas.compat as compat -import pandas._libs.lib as lib +from pandas._libs import tslib import pandas.core.common as com from pandas.core.index import Index @@ -52,7 +52,7 @@ def get_pairs(): pairs = [ - (lib.Timestamp, DatetimeConverter), + (tslib.Timestamp, DatetimeConverter), (Period, PeriodConverter), (pydt.datetime, DatetimeConverter), (pydt.date, DatetimeConverter), @@ -312,7 +312,7 @@ def try_parse(values): if isinstance(values, (datetime, pydt.date)): return _dt_to_float_ordinal(values) elif isinstance(values, np.datetime64): - return _dt_to_float_ordinal(lib.Timestamp(values)) + return _dt_to_float_ordinal(tslib.Timestamp(values)) elif isinstance(values, pydt.time): return dates.date2num(values) elif (is_integer(values) or is_float(values)): diff --git a/pandas/tests/indexes/datetimes/test_construction.py b/pandas/tests/indexes/datetimes/test_construction.py index b59dd25ead57f..197a42bdaacbb 100644 --- a/pandas/tests/indexes/datetimes/test_construction.py +++ b/pandas/tests/indexes/datetimes/test_construction.py @@ -7,7 +7,6 @@ import pandas as pd from pandas import offsets import pandas.util.testing as tm -from pandas._libs import lib from pandas._libs.tslib import OutOfBoundsDatetime from pandas._libs.tslibs import conversion from pandas import (DatetimeIndex, Index, Timestamp, datetime, date_range, @@ -537,7 +536,7 @@ def test_datetimeindex_constructor_misc(self): arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04'] idx2 = DatetimeIndex(arr) - arr = [lib.Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005', + arr = [Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005', '2005-01-04'] idx3 = DatetimeIndex(arr) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 508c3a73f48c7..974099f1fbbe9 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -29,7 +29,7 @@ from pandas.core.indexes.datetimes import _to_m8 import pandas as pd -from pandas._libs.lib import Timestamp +from pandas._libs.tslib import Timestamp class TestIndex(Base): diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index aedc957ec67da..e59456b8a2d5e 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -19,7 +19,7 @@ from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.indexes.base import InvalidIndexError from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike -from pandas._libs.lib import Timestamp +from pandas._libs.tslib import Timestamp import pandas.util.testing as tm diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index 3de1c4c982654..0c1bec7a6f1a9 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -13,7 +13,7 @@ import pandas.util.testing as tm import pandas as pd -from pandas._libs.lib import Timestamp +from pandas._libs.tslib import Timestamp from pandas.tests.indexes.common import Base diff --git a/pandas/tests/io/json/test_ujson.py b/pandas/tests/io/json/test_ujson.py index cd1685f282bd2..e949772981eb7 100644 --- a/pandas/tests/io/json/test_ujson.py +++ b/pandas/tests/io/json/test_ujson.py @@ -425,7 +425,7 @@ def test_npy_nat(self): assert ujson.encode(input) == 'null', "Expected null" def test_datetime_units(self): - from pandas._libs.lib import Timestamp + from pandas._libs.tslib import Timestamp val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504) stamp = Timestamp(val) diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index 8525cb42c2455..bc972076c6a80 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -11,7 +11,7 @@ import pytest import numpy as np -from pandas._libs.lib import Timestamp +from pandas._libs.tslib import Timestamp import pandas as pd import pandas.util.testing as tm diff --git a/pandas/tests/io/parser/converters.py b/pandas/tests/io/parser/converters.py index 1176b1e84e29b..ae35d45591dc5 100644 --- a/pandas/tests/io/parser/converters.py +++ b/pandas/tests/io/parser/converters.py @@ -13,7 +13,7 @@ import pandas as pd import pandas.util.testing as tm -from pandas._libs.lib import Timestamp +from pandas._libs.tslib import Timestamp from pandas import DataFrame, Index from pandas.compat import parse_date, StringIO, lmap diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py index b7d0dd1a3484f..919b357f14236 100644 --- a/pandas/tests/io/parser/parse_dates.py +++ b/pandas/tests/io/parser/parse_dates.py @@ -11,7 +11,7 @@ import pytest import numpy as np from pandas._libs.tslibs import parsing -from pandas._libs.lib import Timestamp +from pandas._libs.tslib import Timestamp import pandas as pd import pandas.io.parsers as parsers diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py index ec240531925e3..7717102b64fc5 100644 --- a/pandas/tests/io/parser/test_parsers.py +++ b/pandas/tests/io/parser/test_parsers.py @@ -5,7 +5,7 @@ from pandas import read_csv, read_table, DataFrame import pandas.core.common as com -from pandas._libs.lib import Timestamp +from pandas._libs.tslib import Timestamp from pandas.compat import StringIO from .common import ParserTests diff --git a/pandas/tests/io/parser/usecols.py b/pandas/tests/io/parser/usecols.py index 8767055239cd5..195fb4cba2aed 100644 --- a/pandas/tests/io/parser/usecols.py +++ b/pandas/tests/io/parser/usecols.py @@ -11,7 +11,7 @@ import pandas.util.testing as tm from pandas import DataFrame, Index -from pandas._libs.lib import Timestamp +from pandas._libs.tslib import Timestamp from pandas.compat import StringIO diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py index fbfbad547ce1b..e5c3d6f7d3ee1 100644 --- a/pandas/tests/series/test_indexing.py +++ b/pandas/tests/series/test_indexing.py @@ -17,7 +17,7 @@ Categorical) from pandas.core.indexing import IndexingError from pandas.tseries.offsets import BDay -from pandas._libs import tslib, lib +from pandas._libs import tslib from pandas.compat import lrange, range from pandas import compat @@ -2707,7 +2707,7 @@ def test_fancy_getitem(self): assert s['1/2/2009'] == 48 assert s['2009-1-2'] == 48 assert s[datetime(2009, 1, 2)] == 48 - assert s[lib.Timestamp(datetime(2009, 1, 2))] == 48 + assert s[Timestamp(datetime(2009, 1, 2))] == 48 pytest.raises(KeyError, s.__getitem__, '2009-1-3') assert_series_equal(s['3/6/2009':'2009-06-05'], diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py index 10061204df42a..502f0c3bced61 100644 --- a/pandas/tests/test_lib.py +++ b/pandas/tests/test_lib.py @@ -3,7 +3,7 @@ import pytest import numpy as np -from pandas._libs import lib +from pandas._libs import lib, writers as libwriters import pandas.util.testing as tm @@ -12,19 +12,19 @@ class TestMisc(object): def test_max_len_string_array(self): arr = a = np.array(['foo', 'b', np.nan], dtype='object') - assert lib.max_len_string_array(arr) == 3 + assert libwriters.max_len_string_array(arr) == 3 # unicode arr = a.astype('U').astype(object) - assert lib.max_len_string_array(arr) == 3 + assert libwriters.max_len_string_array(arr) == 3 # bytes for python3 arr = a.astype('S').astype(object) - assert lib.max_len_string_array(arr) == 3 + assert libwriters.max_len_string_array(arr) == 3 # raises pytest.raises(TypeError, - lambda: lib.max_len_string_array(arr.astype('U'))) + lambda: libwriters.max_len_string_array(arr.astype('U'))) def test_fast_unique_multiple_list_gen_sort(self): keys = [['p', 'a'], ['n', 'd'], ['a', 's']] diff --git a/setup.py b/setup.py index 721e6f62bd3e4..4d42379eef11b 100755 --- a/setup.py +++ b/setup.py @@ -328,6 +328,7 @@ class CheckSDist(sdist_class): 'pandas/_libs/tslibs/frequencies.pyx', 'pandas/_libs/tslibs/resolution.pyx', 'pandas/_libs/tslibs/parsing.pyx', + 'pandas/_libs/writers.pyx', 'pandas/io/sas/sas.pyx'] def initialize_options(self): @@ -616,6 +617,9 @@ def pxd(name): '_libs.window': { 'pyxfile': '_libs/window', 'pxdfiles': ['_libs/skiplist', '_libs/src/util']}, + '_libs.writers': { + 'pyxfile': '_libs/writers', + 'pxdfiles': ['_libs/src/util']}, 'io.sas._sas': { 'pyxfile': 'io/sas/sas'}}
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19413
2018-01-26T17:49:43Z
2018-02-01T11:33:01Z
2018-02-01T11:33:01Z
2018-02-11T21:57:03Z
[#7292] BUG: asfreq / pct_change strange behavior
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 4dde76dee46a5..b5b52c7b9c89b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -419,6 +419,7 @@ Datetimelike - Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`) - Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`) - Bug in :func:`Series.truncate` which raises ``TypeError`` with a monotonic ``PeriodIndex`` (:issue:`17717`) +- Bug in :func:`~DataFrame.pct_change` using ``periods`` and ``freq`` returned different length outputs (:issue:`7292`) Timezones ^^^^^^^^^ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 6e777281b11e1..bee954aa9bba8 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7315,6 +7315,7 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, rs = (data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1) + rs = rs.reindex_like(data) if freq is None: mask = isna(com._values_from_object(self)) np.putmask(rs.values, mask, np.nan) diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 3af798acdede5..e6b47fd69cb05 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -108,7 +108,9 @@ def test_pct_change(self): rs = self.tsframe.pct_change(freq='5D') filled = self.tsframe.fillna(method='pad') - assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1) + assert_frame_equal(rs, + (filled / filled.shift(freq='5D') - 1) + .reindex_like(filled)) def test_pct_change_shift_over_nas(self): s = Series([1., 1.5, np.nan, 2.5, 3.]) @@ -120,6 +122,38 @@ def test_pct_change_shift_over_nas(self): edf = DataFrame({'a': expected, 'b': expected}) assert_frame_equal(chg, edf) + def test_pct_change_periods_freq(self): + # GH 7292 + rs_freq = self.tsframe.pct_change(freq='5B') + rs_periods = self.tsframe.pct_change(5) + assert_frame_equal(rs_freq, rs_periods) + + rs_freq = self.tsframe.pct_change(freq='3B', fill_method=None) + rs_periods = self.tsframe.pct_change(3, fill_method=None) + assert_frame_equal(rs_freq, rs_periods) + + rs_freq = self.tsframe.pct_change(freq='3B', fill_method='bfill') + rs_periods = self.tsframe.pct_change(3, fill_method='bfill') + assert_frame_equal(rs_freq, rs_periods) + + rs_freq = self.tsframe.pct_change(freq='7B', + fill_method='pad', + limit=1) + rs_periods = self.tsframe.pct_change(7, fill_method='pad', limit=1) + assert_frame_equal(rs_freq, rs_periods) + + rs_freq = self.tsframe.pct_change(freq='7B', + fill_method='bfill', + limit=3) + rs_periods = self.tsframe.pct_change(7, fill_method='bfill', limit=3) + assert_frame_equal(rs_freq, rs_periods) + + empty_ts = DataFrame(index=self.tsframe.index, + columns=self.tsframe.columns) + rs_freq = empty_ts.pct_change(freq='14B') + rs_periods = empty_ts.pct_change(14) + assert_frame_equal(rs_freq, rs_periods) + def test_frame_ctor_datetime64_column(self): rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s') dates = np.asarray(rng) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 7be801629e387..7a1aff1cc223c 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -344,7 +344,9 @@ def test_pct_change(self): rs = self.ts.pct_change(freq='5D') filled = self.ts.fillna(method='pad') - assert_series_equal(rs, filled / filled.shift(freq='5D') - 1) + assert_series_equal(rs, + (filled / filled.shift(freq='5D') - 1) + .reindex_like(filled)) def test_pct_change_shift_over_nas(self): s = Series([1., 1.5, np.nan, 2.5, 3.]) @@ -353,6 +355,33 @@ def test_pct_change_shift_over_nas(self): expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2]) assert_series_equal(chg, expected) + def test_pct_change_periods_freq(self): + # GH 7292 + rs_freq = self.ts.pct_change(freq='5B') + rs_periods = self.ts.pct_change(5) + assert_series_equal(rs_freq, rs_periods) + + rs_freq = self.ts.pct_change(freq='3B', fill_method=None) + rs_periods = self.ts.pct_change(3, fill_method=None) + assert_series_equal(rs_freq, rs_periods) + + rs_freq = self.ts.pct_change(freq='3B', fill_method='bfill') + rs_periods = self.ts.pct_change(3, fill_method='bfill') + assert_series_equal(rs_freq, rs_periods) + + rs_freq = self.ts.pct_change(freq='7B', fill_method='pad', limit=1) + rs_periods = self.ts.pct_change(7, fill_method='pad', limit=1) + assert_series_equal(rs_freq, rs_periods) + + rs_freq = self.ts.pct_change(freq='7B', fill_method='bfill', limit=3) + rs_periods = self.ts.pct_change(7, fill_method='bfill', limit=3) + assert_series_equal(rs_freq, rs_periods) + + empty_ts = Series(index=self.ts.index) + rs_freq = empty_ts.pct_change(freq='14B') + rs_periods = empty_ts.pct_change(14) + assert_series_equal(rs_freq, rs_periods) + def test_autocorr(self): # Just run the function corr1 = self.ts.autocorr()
- [x] closes https://github.com/pandas-dev/pandas/issues/7292 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19410
2018-01-26T12:46:18Z
2018-01-31T11:34:13Z
2018-01-31T11:34:12Z
2018-01-31T11:37:19Z
Pin Thrift to 0.10.*
diff --git a/ci/requirements-3.6_WIN.run b/ci/requirements-3.6_WIN.run index db2d429a2a4ff..3042888763863 100644 --- a/ci/requirements-3.6_WIN.run +++ b/ci/requirements-3.6_WIN.run @@ -12,5 +12,6 @@ numexpr pytables matplotlib blosc +thrift=0.10* fastparquet pyarrow
fastparquet compatibility in https://github.com/dask/fastparquet/pull/281, which will be released before long. But let's pin for now.
https://api.github.com/repos/pandas-dev/pandas/pulls/19408
2018-01-26T08:16:35Z
2018-01-26T10:11:58Z
2018-01-26T10:11:58Z
2018-02-16T11:54:28Z
DOC: catch warnings in test_feather & other
diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 4508d5c1e1781..6e1b6e14861c3 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -215,7 +215,10 @@ def read(self, path, columns=None, **kwargs): # We need to retain the original path(str) while also # pass the S3File().open function to fsatparquet impl. s3, _, _ = get_filepath_or_buffer(path) - parquet_file = self.api.ParquetFile(path, open_with=s3.s3.open) + try: + parquet_file = self.api.ParquetFile(path, open_with=s3.s3.open) + finally: + s3.close() else: path, _, _ = get_filepath_or_buffer(path) parquet_file = self.api.ParquetFile(path) diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index e9909400ce429..9d04111d64125 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -1,5 +1,6 @@ """ test feather-format compat """ from distutils.version import LooseVersion +from warnings import catch_warnings import numpy as np @@ -31,7 +32,9 @@ def check_round_trip(self, df, **kwargs): with ensure_clean() as path: to_feather(df, path) - result = read_feather(path, **kwargs) + + with catch_warnings(record=True): + result = read_feather(path, **kwargs) assert_frame_equal(result, df) def test_error(self): diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 8a6a22abe23fa..6c172c80514e7 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -148,7 +148,8 @@ def check_round_trip(df, engine=None, path=None, def compare(repeat): for _ in range(repeat): df.to_parquet(path, **write_kwargs) - actual = read_parquet(path, **read_kwargs) + with catch_warnings(record=True): + actual = read_parquet(path, **read_kwargs) tm.assert_frame_equal(expected, actual, check_names=check_names) @@ -228,35 +229,20 @@ def test_cross_engine_fp_pa(df_cross_compat, pa, fp): with tm.ensure_clean() as path: df.to_parquet(path, engine=fp, compression=None) - result = read_parquet(path, engine=pa) - tm.assert_frame_equal(result, df) - - result = read_parquet(path, engine=pa, columns=['a', 'd']) - tm.assert_frame_equal(result, df[['a', 'd']]) - - -def check_round_trip_equals(df, path, engine, - write_kwargs, read_kwargs, - expected, check_names): - - df.to_parquet(path, engine, **write_kwargs) - actual = read_parquet(path, engine, **read_kwargs) - tm.assert_frame_equal(expected, actual, - check_names=check_names) + with catch_warnings(record=True): + result = read_parquet(path, engine=pa) + tm.assert_frame_equal(result, df) - # repeat - df.to_parquet(path, engine, **write_kwargs) - actual = read_parquet(path, engine, **read_kwargs) - tm.assert_frame_equal(expected, actual, - check_names=check_names) + result = read_parquet(path, engine=pa, columns=['a', 'd']) + tm.assert_frame_equal(result, df[['a', 'd']]) class Base(object): def check_error_on_write(self, df, engine, exc): # check that we are raising the exception on writing - with pytest.raises(exc): - with tm.ensure_clean() as path: + with tm.ensure_clean() as path: + with pytest.raises(exc): to_parquet(df, path, engine, compression=None) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 34e634f56aec6..941bdcbc8b064 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -205,6 +205,7 @@ def decompress_file(path, compression): raise ValueError(msg) yield f + f.close() def assert_almost_equal(left, right, check_exact=False,
https://api.github.com/repos/pandas-dev/pandas/pulls/19407
2018-01-26T03:50:33Z
2018-01-26T11:54:08Z
2018-01-26T11:54:08Z
2018-01-26T12:44:46Z
standardize cimports of numpy as "cnp"
diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index 9a7af71e74574..5d17488963b1c 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -1,20 +1,14 @@ # cython: profile=False -cimport numpy as np -import numpy as np - cimport cython from cython cimport Py_ssize_t -np.import_array() - -cdef float64_t FP_ERR = 1e-13 - -cimport util - from libc.stdlib cimport malloc, free from libc.string cimport memmove +from libc.math cimport fabs, sqrt +import numpy as np +cimport numpy as cnp from numpy cimport (ndarray, NPY_INT64, NPY_UINT64, NPY_INT32, NPY_INT16, NPY_INT8, NPY_FLOAT32, NPY_FLOAT64, @@ -22,18 +16,19 @@ from numpy cimport (ndarray, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, float32_t, float64_t, double_t) +cnp.import_array() -cdef double NaN = <double> np.NaN -cdef double nan = NaN - -from libc.math cimport fabs, sqrt - -# this is our util.pxd +cimport util from util cimport numeric, get_nat import missing +cdef float64_t FP_ERR = 1e-13 + +cdef double NaN = <double> np.NaN +cdef double nan = NaN + cdef int64_t iNaT = get_nat() cdef: diff --git a/pandas/_libs/algos_rank_helper.pxi.in b/pandas/_libs/algos_rank_helper.pxi.in index 8ccc6e036da80..2f40bd4349a2e 100644 --- a/pandas/_libs/algos_rank_helper.pxi.in +++ b/pandas/_libs/algos_rank_helper.pxi.in @@ -50,7 +50,7 @@ def rank_1d_{{dtype}}(object in_arr, ties_method='average', ascending=True, ndarray[float64_t] ranks ndarray[int64_t] argsorted - ndarray[np.uint8_t, cast=True] sorted_mask + ndarray[uint8_t, cast=True] sorted_mask {{if dtype == 'uint64'}} {{ctype}} val diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index 72c2834b0bd57..07b4b80603e03 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -1,6 +1,22 @@ # cython: profile=False -from cpython cimport PyObject, Py_INCREF, PyList_Check, PyTuple_Check +cimport cython + +from cpython cimport (PyObject, Py_INCREF, PyList_Check, PyTuple_Check, + PyMem_Malloc, PyMem_Realloc, PyMem_Free, + PyString_Check, PyBytes_Check, + PyUnicode_Check) + +from libc.stdlib cimport malloc, free + +import numpy as np +cimport numpy as cnp +from numpy cimport ndarray, uint8_t, uint32_t +cnp.import_array() + +cdef extern from "numpy/npy_math.h": + double NAN "NPY_NAN" + from khash cimport ( khiter_t, @@ -23,29 +39,13 @@ from khash cimport ( kh_put_pymap, kh_resize_pymap) -from numpy cimport ndarray, uint8_t, uint32_t - -from libc.stdlib cimport malloc, free -from cpython cimport (PyMem_Malloc, PyMem_Realloc, PyMem_Free, - PyString_Check, PyBytes_Check, - PyUnicode_Check) - from util cimport _checknan cimport util -import numpy as np -nan = np.nan - -cdef extern from "numpy/npy_math.h": - double NAN "NPY_NAN" - -cimport cython -cimport numpy as cnp - from missing cimport checknull -cnp.import_array() -cnp.import_ufunc() + +nan = np.nan cdef int64_t iNaT = util.get_nat() _SIZE_HINT_LIMIT = (1 << 20) + 7 diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index 15aef867ba413..996ece063b980 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -1,17 +1,19 @@ # cython: profile=False +from datetime import datetime, timedelta, date -from numpy cimport (ndarray, float64_t, int32_t, int64_t, uint8_t, uint64_t, - NPY_DATETIME, NPY_TIMEDELTA) cimport cython -cimport numpy as cnp +from cpython cimport PyTuple_Check, PyList_Check +from cpython.slice cimport PySlice_Check +import numpy as np +cimport numpy as cnp +from numpy cimport (ndarray, float64_t, int32_t, int64_t, uint8_t, uint64_t, + NPY_DATETIME, NPY_TIMEDELTA) cnp.import_array() -cnp.import_ufunc() -cimport util -import numpy as np +cimport util from tslibs.conversion cimport maybe_datetimelike_to_i8 @@ -20,10 +22,6 @@ from hashtable cimport HashTable from pandas._libs import algos, hashtable as _hash from pandas._libs.tslibs import period as periodlib from pandas._libs.tslib import Timestamp, Timedelta -from datetime import datetime, timedelta, date - -from cpython cimport PyTuple_Check, PyList_Check -from cpython.slice cimport PySlice_Check cdef int64_t iNaT = util.get_nat() diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index a5abe324254ce..b46a05a0842c3 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -10,7 +10,6 @@ cdef extern from "Python.h": Py_ssize_t PY_SSIZE_T_MAX import numpy as np -cimport numpy as np from numpy cimport int64_t cdef extern from "compat_helper.h": diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 0718f8bd2b970..c0b2ca66e30a6 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -1,4 +1,4 @@ -cimport numpy as np +cimport numpy as cnp import numpy as np cimport util diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx index 344c5d25d0c3d..27d2a639d13e6 100644 --- a/pandas/_libs/join.pyx +++ b/pandas/_libs/join.pyx @@ -1,16 +1,15 @@ # cython: profile=False -cimport numpy as np -import numpy as np - cimport cython from cython cimport Py_ssize_t -np.import_array() - +import numpy as np +cimport numpy as cnp from numpy cimport (ndarray, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, float32_t, float64_t) +cnp.import_array() + cdef double NaN = <double> np.NaN cdef double nan = NaN diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index db0ff2931d96f..c3a654b01022c 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -5,7 +5,7 @@ cimport cython from cython cimport Py_ssize_t import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM, PyArray_ITER_DATA, PyArray_ITER_NEXT, PyArray_IterNew, flatiter, NPY_OBJECT, @@ -13,9 +13,7 @@ from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM, float32_t, float64_t, uint8_t, uint64_t, complex128_t) -# initialize numpy -np.import_array() -np.import_ufunc() +cnp.import_array() from cpython cimport (Py_INCREF, PyTuple_SET_ITEM, PyList_Check, PyFloat_Check, @@ -95,7 +93,7 @@ cpdef bint is_scalar(object val): """ - return (np.PyArray_IsAnyScalar(val) + return (cnp.PyArray_IsAnyScalar(val) # As of numpy-1.9, PyArray_IsAnyScalar misses bytearrays on Py3. or PyBytes_Check(val) # We differ from numpy (as of 1.10), which claims that None is @@ -710,7 +708,7 @@ def clean_index_list(list obj): for i in range(n): v = obj[i] - if not (PyList_Check(v) or np.PyArray_Check(v) or hasattr(v, '_data')): + if not (PyList_Check(v) or util.is_array(v) or hasattr(v, '_data')): all_arrays = 0 break diff --git a/pandas/_libs/missing.pyx b/pandas/_libs/missing.pyx index 0b60fc2c5b4d1..dfd044131afb4 100644 --- a/pandas/_libs/missing.pyx +++ b/pandas/_libs/missing.pyx @@ -7,9 +7,9 @@ cimport cython from cython cimport Py_ssize_t import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport ndarray, int64_t, uint8_t -np.import_array() +cnp.import_array() cimport util diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index 4ca87a777e497..3588ac14c87d1 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -8,13 +8,13 @@ from cpython cimport Py_INCREF from libc.stdlib cimport malloc, free import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport (ndarray, int64_t, PyArray_SETITEM, PyArray_ITER_NEXT, PyArray_ITER_DATA, PyArray_IterNew, flatiter) -np.import_array() +cnp.import_array() cimport util from lib import maybe_convert_objects diff --git a/pandas/_libs/reshape.pyx b/pandas/_libs/reshape.pyx index c4104b66e009f..1d7893f69c31d 100644 --- a/pandas/_libs/reshape.pyx +++ b/pandas/_libs/reshape.pyx @@ -1,16 +1,15 @@ # cython: profile=False -cimport numpy as np -import numpy as np - cimport cython from cython cimport Py_ssize_t -np.import_array() - +import numpy as np +cimport numpy as cnp from numpy cimport (ndarray, int8_t, int16_t, int32_t, int64_t, uint8_t, uint16_t, uint32_t, uint64_t, float32_t, float64_t) +cnp.import_array() + cdef double NaN = <double> np.NaN cdef double nan = NaN diff --git a/pandas/_libs/skiplist.pyx b/pandas/_libs/skiplist.pyx index c96413edfb0f2..5ede31b24118d 100644 --- a/pandas/_libs/skiplist.pyx +++ b/pandas/_libs/skiplist.pyx @@ -8,20 +8,20 @@ from libc.math cimport log +import numpy as np +cimport numpy as cnp +from numpy cimport double_t +cnp.import_array() + + # MSVC does not have log2! cdef double Log2(double x): return log(x) / log(2.) -cimport numpy as np -import numpy as np -from numpy cimport double_t from random import random -# initialize numpy -np.import_array() - # TODO: optimize this, make less messy cdef class Node: diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx index bb8b0ed14e1d9..2abd270652433 100644 --- a/pandas/_libs/sparse.pyx +++ b/pandas/_libs/sparse.pyx @@ -1,12 +1,15 @@ -from numpy cimport (ndarray, uint8_t, int64_t, int32_t, int16_t, int8_t, - float64_t, float32_t) -cimport numpy as np +# -*- coding: utf-8 -*- +import operator +import sys cimport cython import numpy as np -import operator -import sys +cimport numpy as cnp +from numpy cimport (ndarray, uint8_t, int64_t, int32_t, int16_t, int8_t, + float64_t, float32_t) +cnp.import_array() + from distutils.version import LooseVersion @@ -15,8 +18,6 @@ _np_version = np.version.short_version _np_version_under1p10 = LooseVersion(_np_version) < LooseVersion('1.10') _np_version_under1p11 = LooseVersion(_np_version) < LooseVersion('1.11') -np.import_array() -np.import_ufunc() # ----------------------------------------------------------------------------- # Preamble stuff diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index 557ca57145f2b..f14d508a625d0 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -609,13 +609,13 @@ cdef class Validator: cdef: Py_ssize_t n - np.dtype dtype + cnp.dtype dtype bint skipna def __cinit__( self, Py_ssize_t n, - np.dtype dtype=np.dtype(np.object_), + cnp.dtype dtype=np.dtype(np.object_), bint skipna=False ): self.n = n @@ -823,7 +823,7 @@ cdef class TemporalValidator(Validator): def __cinit__( self, Py_ssize_t n, - np.dtype dtype=np.dtype(np.object_), + cnp.dtype dtype=np.dtype(np.object_), bint skipna=False ): self.n = n diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index c7035df8ac15c..81df7981096ba 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- # cython: profile=False -cimport numpy as np +cimport numpy as cnp from numpy cimport int64_t, ndarray, float64_t import numpy as np -np.import_array() +cnp.import_array() from cpython cimport PyFloat_Check diff --git a/pandas/_libs/tslibs/ccalendar.pyx b/pandas/_libs/tslibs/ccalendar.pyx index ebd5fc12775a4..ae52f7dd30165 100644 --- a/pandas/_libs/tslibs/ccalendar.pyx +++ b/pandas/_libs/tslibs/ccalendar.pyx @@ -8,10 +8,9 @@ Cython implementations of functions resembling the stdlib calendar module cimport cython from cython cimport Py_ssize_t -import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport int64_t, int32_t -np.import_array() +cnp.import_array() # ---------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 9cfe41172fedc..a32bfc1f6836c 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -5,9 +5,9 @@ cimport cython from cython cimport Py_ssize_t import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport int64_t, int32_t, ndarray -np.import_array() +cnp.import_array() import pytz diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 18101c834c737..a8a865eec38dd 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -9,9 +9,9 @@ cimport cython from cython cimport Py_ssize_t import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport ndarray, int64_t, int32_t, int8_t -np.import_array() +cnp.import_array() from ccalendar cimport (get_days_in_month, is_leapyear, dayofweek, diff --git a/pandas/_libs/tslibs/frequencies.pyx b/pandas/_libs/tslibs/frequencies.pyx index cce3600371300..abaf8cad09bdb 100644 --- a/pandas/_libs/tslibs/frequencies.pyx +++ b/pandas/_libs/tslibs/frequencies.pyx @@ -4,10 +4,9 @@ import re cimport cython -import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport int64_t -np.import_array() +cnp.import_array() from util cimport is_integer_object, is_string_object diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 39f9437f0cecf..9f4ef4e515058 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -13,9 +13,9 @@ from cpython.datetime cimport (datetime, PyDateTime_IMPORT import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport int64_t -np.import_array() +cnp.import_array() from util cimport (get_nat, is_integer_object, is_float_object, diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index a0ac6389c0646..e02818dd818df 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -10,9 +10,9 @@ from cpython.datetime cimport datetime, timedelta, time as dt_time from dateutil.relativedelta import relativedelta import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport int64_t -np.import_array() +cnp.import_array() from util cimport is_string_object, is_integer_object diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index a9a5500cd7447..09aeff852a0f2 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -18,9 +18,9 @@ from datetime import datetime import time import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport int64_t, ndarray -np.import_array() +cnp.import_array() # Avoid import from outside _libs if sys.version_info.major == 2: diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 6eb867377bf54..b166babe5992c 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -4,9 +4,9 @@ from cython cimport Py_ssize_t import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport ndarray, int64_t -np.import_array() +cnp.import_array() from util cimport is_string_object, get_nat diff --git a/pandas/_libs/tslibs/strptime.pyx b/pandas/_libs/tslibs/strptime.pyx index 2921291973373..e7dabb94f8975 100644 --- a/pandas/_libs/tslibs/strptime.pyx +++ b/pandas/_libs/tslibs/strptime.pyx @@ -27,7 +27,6 @@ from cpython cimport PyFloat_Check cimport cython import numpy as np -cimport numpy as np from numpy cimport ndarray, int64_t from datetime import date as datetime_date diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index b2c9c464c7cbf..1e6ea7794dfff 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -10,9 +10,9 @@ from cython cimport Py_ssize_t from cpython cimport PyUnicode_Check, Py_NE, Py_EQ, PyObject_RichCompare import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport int64_t, ndarray -np.import_array() +cnp.import_array() from cpython.datetime cimport (datetime, timedelta, PyDateTime_CheckExact, diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 1ddb299598fd0..b9be9c16eb6c3 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -6,9 +6,9 @@ from cpython cimport (PyObject_RichCompareBool, PyObject_RichCompare, Py_GT, Py_GE, Py_EQ, Py_NE, Py_LT, Py_LE) import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport int64_t, int32_t, ndarray -np.import_array() +cnp.import_array() from datetime import time as datetime_time from cpython.datetime cimport (datetime, diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 242b8262a8721..c22e0b8e555a3 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -18,9 +18,9 @@ UTC = pytz.utc import numpy as np -cimport numpy as np +cimport numpy as cnp from numpy cimport ndarray, int64_t -np.import_array() +cnp.import_array() # ---------------------------------------------------------------------- from util cimport is_string_object, is_integer_object, get_nat diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index e46bf24c36f18..cacb073da581c 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -1,43 +1,40 @@ # cython: profile=False # cython: boundscheck=False, wraparound=False, cdivision=True +cimport cython from cython cimport Py_ssize_t -cimport numpy as np +from libc.stdlib cimport malloc, free + import numpy as np +cimport numpy as cnp +from numpy cimport ndarray, double_t, int64_t, float64_t +cnp.import_array() -cimport cython -np.import_array() +cdef extern from "../src/headers/math.h": + int signbit(double) nogil + double sqrt(double x) nogil cimport util - -from libc.stdlib cimport malloc, free - -from numpy cimport ndarray, double_t, int64_t, float64_t +from util cimport numeric from skiplist cimport (IndexableSkiplist, node_t, skiplist_t, skiplist_init, skiplist_destroy, skiplist_get, skiplist_insert, skiplist_remove) -cdef np.float32_t MINfloat32 = np.NINF -cdef np.float64_t MINfloat64 = np.NINF +cdef cnp.float32_t MINfloat32 = np.NINF +cdef cnp.float64_t MINfloat64 = np.NINF -cdef np.float32_t MAXfloat32 = np.inf -cdef np.float64_t MAXfloat64 = np.inf +cdef cnp.float32_t MAXfloat32 = np.inf +cdef cnp.float64_t MAXfloat64 = np.inf cdef double NaN = <double> np.NaN cdef inline int int_max(int a, int b): return a if a >= b else b cdef inline int int_min(int a, int b): return a if a <= b else b -from util cimport numeric - -cdef extern from "../src/headers/math.h": - int signbit(double) nogil - double sqrt(double x) nogil - # Cython implementations of rolling sum, mean, variance, skewness, # other statistical moment functions diff --git a/pandas/io/sas/sas.pyx b/pandas/io/sas/sas.pyx index 41c03cb2799a3..e2a1107969990 100644 --- a/pandas/io/sas/sas.pyx +++ b/pandas/io/sas/sas.pyx @@ -2,16 +2,16 @@ # cython: boundscheck=False, initializedcheck=False import numpy as np -cimport numpy as np -from numpy cimport uint8_t, uint16_t, int8_t, int64_t +cimport numpy as cnp +from numpy cimport uint8_t, uint16_t, int8_t, int64_t, ndarray import sas_constants as const # rle_decompress decompresses data using a Run Length Encoding # algorithm. It is partially documented here: # # https://cran.r-project.org/web/packages/sas7bdat/vignettes/sas7bdat.pdf -cdef np.ndarray[uint8_t, ndim=1] rle_decompress( - int result_length, np.ndarray[uint8_t, ndim=1] inbuff): +cdef ndarray[uint8_t, ndim=1] rle_decompress( + int result_length, ndarray[uint8_t, ndim=1] inbuff): cdef: uint8_t control_byte, x @@ -114,8 +114,8 @@ cdef np.ndarray[uint8_t, ndim=1] rle_decompress( # rdc_decompress decompresses data using the Ross Data Compression algorithm: # # http://collaboration.cmc.ec.gc.ca/science/rpn/biblio/ddj/Website/articles/CUJ/1992/9210/ross/ross.htm -cdef np.ndarray[uint8_t, ndim=1] rdc_decompress( - int result_length, np.ndarray[uint8_t, ndim=1] inbuff): +cdef ndarray[uint8_t, ndim=1] rdc_decompress( + int result_length, ndarray[uint8_t, ndim=1] inbuff): cdef: uint8_t cmd @@ -226,8 +226,8 @@ cdef class Parser(object): int subheader_pointer_length int current_page_type bint is_little_endian - np.ndarray[uint8_t, ndim=1] (*decompress)( - int result_length, np.ndarray[uint8_t, ndim=1] inbuff) + ndarray[uint8_t, ndim=1] (*decompress)( + int result_length, ndarray[uint8_t, ndim=1] inbuff) object parser def __init__(self, object parser): @@ -391,7 +391,7 @@ cdef class Parser(object): Py_ssize_t j int s, k, m, jb, js, current_row int64_t lngt, start, ct - np.ndarray[uint8_t, ndim=1] source + ndarray[uint8_t, ndim=1] source int64_t[:] column_types int64_t[:] lengths int64_t[:] offsets
In a few files this collects scattered numpy imports and puts them all in one place. Removes unnecessary `np.import_ufunc()` calls. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19405
2018-01-26T01:19:49Z
2018-01-27T01:09:11Z
2018-01-27T01:09:11Z
2018-01-31T06:49:26Z
Make DateOffset.kwds a property
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 66e88e181ac0f..9cebe09280eba 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -341,6 +341,7 @@ Other API Changes - :func:`DatetimeIndex.shift` and :func:`TimedeltaIndex.shift` will now raise ``NullFrequencyError`` (which subclasses ``ValueError``, which was raised in older versions) when the index object frequency is ``None`` (:issue:`19147`) - Addition and subtraction of ``NaN`` from a :class:`Series` with ``dtype='timedelta64[ns]'`` will raise a ``TypeError` instead of treating the ``NaN`` as ``NaT`` (:issue:`19274`) - Set operations (union, difference...) on :class:`IntervalIndex` with incompatible index types will now raise a ``TypeError`` rather than a ``ValueError`` (:issue:`19329`) +- :class:`DateOffset` objects render more simply, e.g. "<DateOffset: days=1>" instead of "<DateOffset: kwds={'days': 1}>" (:issue:`19403`) .. _whatsnew_0230.deprecations: diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index e02818dd818df..8caf9ea0e0389 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -302,6 +302,14 @@ class _BaseOffset(object): _normalize_cache = True _cacheable = False _day_opt = None + _attributes = frozenset(['n', 'normalize']) + + @property + def kwds(self): + # for backwards-compatibility + kwds = {name: getattr(self, name, None) for name in self._attributes + if name not in ['n', 'normalize']} + return {name: kwds[name] for name in kwds if kwds[name] is not None} def __call__(self, other): return self.apply(other) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 8dd41c022d163..76219a07f4943 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -71,9 +71,11 @@ def f(self): if field in ['is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']: - month_kw = (self.freq.kwds.get('startingMonth', - self.freq.kwds.get('month', 12)) - if self.freq else 12) + freq = self.freq + month_kw = 12 + if freq: + kwds = freq.kwds + month_kw = kwds.get('startingMonth', kwds.get('month', 12)) result = fields.get_start_end_field(values, field, self.freqstr, month_kw) diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index b086884ecd250..d96ebab615d12 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -218,7 +218,7 @@ def test_offset_freqstr(self, offset_types): freqstr = offset.freqstr if freqstr not in ('<Easter>', - "<DateOffset: kwds={'days': 1}>", + "<DateOffset: days=1>", 'LWOM-SAT', ): code = get_offset(freqstr) assert offset.rule_code == code diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index ec206e0997d0b..2e4be7fbdeebf 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -185,6 +185,8 @@ def __add__(date): """ _use_relativedelta = False _adjust_dst = False + _attributes = frozenset(['n', 'normalize'] + + list(liboffsets.relativedelta_kwds)) # default for prior pickles normalize = False @@ -192,9 +194,9 @@ def __add__(date): def __init__(self, n=1, normalize=False, **kwds): self.n = self._validate_n(n) self.normalize = normalize - self.kwds = kwds self._offset, self._use_relativedelta = _determine_offset(kwds) + self.__dict__.update(kwds) @apply_wraps def apply(self, other): @@ -238,30 +240,31 @@ def apply_index(self, i): y : DatetimeIndex """ - if not type(self) is DateOffset: + if type(self) is not DateOffset: raise NotImplementedError("DateOffset subclass {name} " "does not have a vectorized " "implementation".format( name=self.__class__.__name__)) + kwds = self.kwds relativedelta_fast = set(['years', 'months', 'weeks', 'days', 'hours', 'minutes', 'seconds', 'microseconds']) # relativedelta/_offset path only valid for base DateOffset if (self._use_relativedelta and - set(self.kwds).issubset(relativedelta_fast)): + set(kwds).issubset(relativedelta_fast)): - months = ((self.kwds.get('years', 0) * 12 + - self.kwds.get('months', 0)) * self.n) + months = ((kwds.get('years', 0) * 12 + + kwds.get('months', 0)) * self.n) if months: shifted = liboffsets.shift_months(i.asi8, months) i = i._shallow_copy(shifted) - weeks = (self.kwds.get('weeks', 0)) * self.n + weeks = (kwds.get('weeks', 0)) * self.n if weeks: i = (i.to_period('W') + weeks).to_timestamp() + \ i.to_perioddelta('W') - timedelta_kwds = {k: v for k, v in self.kwds.items() + timedelta_kwds = {k: v for k, v in kwds.items() if k in ['days', 'hours', 'minutes', 'seconds', 'microseconds']} if timedelta_kwds: @@ -273,7 +276,7 @@ def apply_index(self, i): return i + (self._offset * self.n) else: # relativedelta with other keywords - kwd = set(self.kwds) - relativedelta_fast + kwd = set(kwds) - relativedelta_fast raise NotImplementedError("DateOffset with relativedelta " "keyword(s) {kwd} not able to be " "applied vectorized".format(kwd=kwd)) @@ -284,7 +287,7 @@ def isAnchored(self): return (self.n == 1) def _params(self): - all_paras = dict(list(vars(self).items()) + list(self.kwds.items())) + all_paras = self.__dict__.copy() if 'holidays' in all_paras and not all_paras['holidays']: all_paras.pop('holidays') exclude = ['kwds', 'name', 'normalize', 'calendar'] @@ -301,15 +304,8 @@ def _repr_attrs(self): exclude = set(['n', 'inc', 'normalize']) attrs = [] for attr in sorted(self.__dict__): - if attr.startswith('_'): + if attr.startswith('_') or attr == 'kwds': continue - elif attr == 'kwds': # TODO: get rid of this - kwds_new = {} - for key in self.kwds: - if not hasattr(self, key): - kwds_new[key] = self.kwds[key] - if len(kwds_new) > 0: - attrs.append('kwds={kwds_new}'.format(kwds_new=kwds_new)) elif attr not in exclude: value = getattr(self, attr) attrs.append('{attr}={value}'.format(attr=attr, value=value)) @@ -427,6 +423,30 @@ def _offset_str(self): def nanos(self): raise ValueError("{name} is a non-fixed frequency".format(name=self)) + def __setstate__(self, state): + """Reconstruct an instance from a pickled state""" + if 'offset' in state: + # Older (<0.22.0) versions have offset attribute instead of _offset + if '_offset' in state: # pragma: no cover + raise AssertionError('Unexpected key `_offset`') + state['_offset'] = state.pop('offset') + state['kwds']['offset'] = state['_offset'] + + if '_offset' in state and not isinstance(state['_offset'], timedelta): + # relativedelta, we need to populate using its kwds + offset = state['_offset'] + odict = offset.__dict__ + kwds = {key: odict[key] for key in odict if odict[key]} + state.update(kwds) + + self.__dict__ = state + if 'weekmask' in state and 'holidays' in state: + calendar, holidays = _get_calendar(weekmask=self.weekmask, + holidays=self.holidays, + calendar=None) + self.calendar = calendar + self.holidays = holidays + class SingleConstructorOffset(DateOffset): @classmethod @@ -450,10 +470,9 @@ def __init__(self, weekmask, holidays, calendar): # following two attributes. See DateOffset._params() # holidays, weekmask - # assumes self.kwds already exists - self.kwds['weekmask'] = self.weekmask = weekmask - self.kwds['holidays'] = self.holidays = holidays - self.kwds['calendar'] = self.calendar = calendar + self.weekmask = weekmask + self.holidays = holidays + self.calendar = calendar class BusinessMixin(object): @@ -490,23 +509,6 @@ def __getstate__(self): return state - def __setstate__(self, state): - """Reconstruct an instance from a pickled state""" - if 'offset' in state: - # Older versions have offset attribute instead of _offset - if '_offset' in state: # pragma: no cover - raise ValueError('Unexpected key `_offset`') - state['_offset'] = state.pop('offset') - state['kwds']['offset'] = state['_offset'] - self.__dict__ = state - if 'weekmask' in state and 'holidays' in state: - calendar, holidays = _get_calendar(weekmask=self.weekmask, - holidays=self.holidays, - calendar=None) - self.kwds['calendar'] = self.calendar = calendar - self.kwds['holidays'] = self.holidays = holidays - self.kwds['weekmask'] = state['weekmask'] - class BusinessDay(BusinessMixin, SingleConstructorOffset): """ @@ -514,11 +516,11 @@ class BusinessDay(BusinessMixin, SingleConstructorOffset): """ _prefix = 'B' _adjust_dst = True + _attributes = frozenset(['n', 'normalize', 'offset']) def __init__(self, n=1, normalize=False, offset=timedelta(0)): self.n = self._validate_n(n) self.normalize = normalize - self.kwds = {'offset': offset} self._offset = offset def _offset_str(self): @@ -615,10 +617,8 @@ class BusinessHourMixin(BusinessMixin): def __init__(self, start='09:00', end='17:00', offset=timedelta(0)): # must be validated here to equality check - kwds = {'offset': offset} - self.start = kwds['start'] = liboffsets._validate_business_time(start) - self.end = kwds['end'] = liboffsets._validate_business_time(end) - self.kwds.update(kwds) + self.start = liboffsets._validate_business_time(start) + self.end = liboffsets._validate_business_time(end) self._offset = offset @cache_readonly @@ -843,12 +843,12 @@ class BusinessHour(BusinessHourMixin, SingleConstructorOffset): """ _prefix = 'BH' _anchor = 0 + _attributes = frozenset(['n', 'normalize', 'start', 'end', 'offset']) def __init__(self, n=1, normalize=False, start='09:00', end='17:00', offset=timedelta(0)): self.n = self._validate_n(n) self.normalize = normalize - self.kwds = {} super(BusinessHour, self).__init__(start=start, end=end, offset=offset) @@ -872,13 +872,14 @@ class CustomBusinessDay(_CustomMixin, BusinessDay): """ _cacheable = False _prefix = 'C' + _attributes = frozenset(['n', 'normalize', + 'weekmask', 'holidays', 'calendar', 'offset']) def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', holidays=None, calendar=None, offset=timedelta(0)): self.n = self._validate_n(n) self.normalize = normalize self._offset = offset - self.kwds = {'offset': offset} _CustomMixin.__init__(self, weekmask, holidays, calendar) @@ -930,6 +931,9 @@ class CustomBusinessHour(_CustomMixin, BusinessHourMixin, """ _prefix = 'CBH' _anchor = 0 + _attributes = frozenset(['n', 'normalize', + 'weekmask', 'holidays', 'calendar', + 'start', 'end', 'offset']) def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', holidays=None, calendar=None, @@ -937,7 +941,6 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', self.n = self._validate_n(n) self.normalize = normalize self._offset = offset - self.kwds = {'offset': offset} _CustomMixin.__init__(self, weekmask, holidays, calendar) BusinessHourMixin.__init__(self, start=start, end=end, offset=offset) @@ -949,11 +952,11 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', class MonthOffset(SingleConstructorOffset): _adjust_dst = True + _attributes = frozenset(['n', 'normalize']) def __init__(self, n=1, normalize=False): self.n = self._validate_n(n) self.normalize = normalize - self.kwds = {} @property def name(self): @@ -1024,6 +1027,8 @@ class _CustomBusinessMonth(_CustomMixin, BusinessMixin, MonthOffset): calendar : pd.HolidayCalendar or np.busdaycalendar """ _cacheable = False + _attributes = frozenset(['n', 'normalize', + 'weekmask', 'holidays', 'calendar', 'offset']) onOffset = DateOffset.onOffset # override MonthOffset method apply_index = DateOffset.apply_index # override MonthOffset method @@ -1033,7 +1038,6 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', self.n = self._validate_n(n) self.normalize = normalize self._offset = offset - self.kwds = {'offset': offset} _CustomMixin.__init__(self, weekmask, holidays, calendar) @@ -1102,6 +1106,7 @@ class SemiMonthOffset(DateOffset): _adjust_dst = True _default_day_of_month = 15 _min_day_of_month = 2 + _attributes = frozenset(['n', 'normalize', 'day_of_month']) def __init__(self, n=1, normalize=False, day_of_month=None): if day_of_month is None: @@ -1115,7 +1120,6 @@ def __init__(self, n=1, normalize=False, day_of_month=None): self.n = self._validate_n(n) self.normalize = normalize - self.kwds = {'day_of_month': self.day_of_month} @classmethod def _from_name(cls, suffix=None): @@ -1319,6 +1323,7 @@ class Week(DateOffset): _adjust_dst = True _inc = timedelta(weeks=1) _prefix = 'W' + _attributes = frozenset(['n', 'normalize', 'weekday']) def __init__(self, n=1, normalize=False, weekday=None): self.n = self._validate_n(n) @@ -1330,8 +1335,6 @@ def __init__(self, n=1, normalize=False, weekday=None): raise ValueError('Day must be 0<=day<=6, got {day}' .format(day=self.weekday)) - self.kwds = {'weekday': weekday} - def isAnchored(self): return (self.n == 1 and self.weekday is not None) @@ -1450,6 +1453,7 @@ class WeekOfMonth(_WeekOfMonthMixin, DateOffset): """ _prefix = 'WOM' _adjust_dst = True + _attributes = frozenset(['n', 'normalize', 'week', 'weekday']) def __init__(self, n=1, normalize=False, week=0, weekday=0): self.n = self._validate_n(n) @@ -1467,8 +1471,6 @@ def __init__(self, n=1, normalize=False, week=0, weekday=0): raise ValueError('Week must be 0<=week<=3, got {week}' .format(week=self.week)) - self.kwds = {'weekday': weekday, 'week': week} - def _get_offset_day(self, other): """ Find the day in the same month as other that has the same @@ -1526,6 +1528,7 @@ class LastWeekOfMonth(_WeekOfMonthMixin, DateOffset): """ _prefix = 'LWOM' _adjust_dst = True + _attributes = frozenset(['n', 'normalize', 'weekday']) def __init__(self, n=1, normalize=False, weekday=0): self.n = self._validate_n(n) @@ -1539,8 +1542,6 @@ def __init__(self, n=1, normalize=False, weekday=0): raise ValueError('Day must be 0<=day<=6, got {day}' .format(day=self.weekday)) - self.kwds = {'weekday': weekday} - def _get_offset_day(self, other): """ Find the day in the same month as other that has the same @@ -1584,6 +1585,7 @@ class QuarterOffset(DateOffset): _default_startingMonth = None _from_name_startingMonth = None _adjust_dst = True + _attributes = frozenset(['n', 'normalize', 'startingMonth']) # TODO: Consider combining QuarterOffset and YearOffset __init__ at some # point. Also apply_index, onOffset, rule_code if # startingMonth vs month attr names are resolved @@ -1595,8 +1597,6 @@ def __init__(self, n=1, normalize=False, startingMonth=None): startingMonth = self._default_startingMonth self.startingMonth = startingMonth - self.kwds = {'startingMonth': startingMonth} - def isAnchored(self): return (self.n == 1 and self.startingMonth is not None) @@ -1690,6 +1690,7 @@ class QuarterBegin(QuarterOffset): class YearOffset(DateOffset): """DateOffset that just needs a month""" _adjust_dst = True + _attributes = frozenset(['n', 'normalize', 'month']) def _get_offset_day(self, other): # override BaseOffset method to use self.month instead of other.month @@ -1725,8 +1726,6 @@ def __init__(self, n=1, normalize=False, month=None): if self.month < 1 or self.month > 12: raise ValueError('Month must go from 1 to 12') - self.kwds = {'month': month} - @classmethod def _from_name(cls, suffix=None): kwargs = {} @@ -1811,6 +1810,7 @@ class FY5253(DateOffset): """ _prefix = 'RE' _adjust_dst = True + _attributes = frozenset(['weekday', 'startingMonth', 'variation']) def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1, variation="nearest"): @@ -1821,9 +1821,6 @@ def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1, self.variation = variation - self.kwds = {'weekday': weekday, 'startingMonth': startingMonth, - 'variation': variation} - if self.n == 0: raise ValueError('N cannot be 0') @@ -2012,6 +2009,8 @@ class FY5253Quarter(DateOffset): _prefix = 'REQ' _adjust_dst = True + _attributes = frozenset(['weekday', 'startingMonth', 'qtr_with_extra_week', + 'variation']) def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1, qtr_with_extra_week=1, variation="nearest"): @@ -2023,10 +2022,6 @@ def __init__(self, n=1, normalize=False, weekday=0, startingMonth=1, self.qtr_with_extra_week = qtr_with_extra_week self.variation = variation - self.kwds = {'weekday': weekday, 'startingMonth': startingMonth, - 'qtr_with_extra_week': qtr_with_extra_week, - 'variation': variation} - if self.n == 0: raise ValueError('N cannot be 0') @@ -2170,11 +2165,11 @@ class Easter(DateOffset): 1583-4099. """ _adjust_dst = True + _attributes = frozenset(['n', 'normalize']) def __init__(self, n=1, normalize=False): self.n = self._validate_n(n) self.normalize = normalize - self.kwds = {} @apply_wraps def apply(self, other): @@ -2217,12 +2212,12 @@ def f(self, other): class Tick(SingleConstructorOffset): _inc = Timedelta(microseconds=1000) _prefix = 'undefined' + _attributes = frozenset(['n', 'normalize']) def __init__(self, n=1, normalize=False): # TODO: do Tick classes with normalize=True make sense? self.n = self._validate_n(n) self.normalize = normalize - self.kwds = {} __gt__ = _tick_comp(operator.gt) __ge__ = _tick_comp(operator.ge)
Returning to an older goal of making DateOffset immutable, this PR moves towards getting rid of `DateOffset.kwds` by making it a property instead of regular attribute. This uses the `_get_attributes_dict` pattern, albeit without actually using a `_get_attributes_dict` method. I expect this to entail a small perf penalty since lookups are slower, but that's small potatoes next to the speedups we'll get from caching once these are immutable. ``` asv continuous -E virtualenv -f 1.1 master HEAD -b offset -b timeseries [...] before after ratio [d3f7d2a6] [fe7a7187] + 11.46μs 41.89μs 3.66 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<DateOffset: kwds={'months': 2, 'days': 2}>) + 2.03ms 5.53ms 2.72 timeseries.ResampleSeries.time_resample('datetime', '5min', 'mean') + 2.15ms 4.68ms 2.17 timeseries.ResampleSeries.time_resample('datetime', '1D', 'ohlc') + 14.36μs 29.72μs 2.07 offset.OffestDatetimeArithmetic.time_add_10(<YearEnd: month=12>) + 2.56ms 4.42ms 1.73 timeseries.ToDatetimeCache.time_dup_string_tzoffset_dates(True) + 11.83μs 18.85μs 1.59 offset.OffestDatetimeArithmetic.time_add(<DateOffset: kwds={'months': 2, 'days': 2}>) + 9.60μs 14.96μs 1.56 offset.OffestDatetimeArithmetic.time_apply(<DateOffset: kwds={'months': 2, 'days': 2}>) + 28.55μs 42.49μs 1.49 offset.OffestDatetimeArithmetic.time_subtract(<DateOffset: kwds={'months': 2, 'days': 2}>) + 12.76μs 18.56μs 1.45 offset.OffestDatetimeArithmetic.time_add(<BusinessYearEnd: month=12>) + 7.69μs 10.75μs 1.40 timeseries.AsOf.time_asof_single_early('Series') + 8.71ms 11.63ms 1.34 timeseries.ResampleSeries.time_resample('period', '5min', 'mean') + 15.21μs 20.20μs 1.33 offset.OffestDatetimeArithmetic.time_add_10(<MonthEnd>) + 14.20μs 18.47μs 1.30 offset.OffestDatetimeArithmetic.time_add_10(<BusinessQuarterEnd: startingMonth=3>) + 17.44μs 21.72μs 1.25 offset.OffestDatetimeArithmetic.time_subtract_10(<SemiMonthBegin: day_of_month=15>) + 11.22μs 13.54μs 1.21 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<YearEnd: month=12>) + 13.63μs 16.37μs 1.20 offset.OffestDatetimeArithmetic.time_subtract(<BusinessMonthBegin>) + 10.22μs 12.27μs 1.20 offset.OffestDatetimeArithmetic.time_apply(<BusinessDay>) + 65.62μs 78.64μs 1.20 offset.OffestDatetimeArithmetic.time_add_10(<DateOffset: kwds={'months': 2, 'days': 2}>) + 14.46μs 17.28μs 1.19 offset.OffestDatetimeArithmetic.time_subtract(<QuarterBegin: startingMonth=3>) + 16.11μs 19.14μs 1.19 offset.OffestDatetimeArithmetic.time_subtract(<BusinessDay>) + 16.31μs 19.07μs 1.17 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessYearBegin: month=1>) + 2.35ms 2.73ms 1.16 timeseries.DatetimeIndex.time_unique('tz_naive') + 15.43μs 17.89μs 1.16 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessMonthBegin>) + 10.68μs 12.38μs 1.16 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<MonthEnd>) + 10.57μs 12.24μs 1.16 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessMonthEnd>) + 14.26μs 16.48μs 1.16 offset.OffestDatetimeArithmetic.time_add_10(<MonthBegin>) + 121.98μs 140.64μs 1.15 offset.OffestDatetimeArithmetic.time_subtract_10(<DateOffset: kwds={'months': 2, 'days': 2}>) + 16.60μs 19.11μs 1.15 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessYearEnd: month=12>) + 15.66μs 18.00μs 1.15 offset.OffestDatetimeArithmetic.time_subtract(<YearEnd: month=12>) + 16.78μs 19.25μs 1.15 offset.OffestDatetimeArithmetic.time_subtract(<SemiMonthEnd: day_of_month=15>) + 13.87μs 15.89μs 1.15 offset.OffestDatetimeArithmetic.time_add_10(<BusinessYearEnd: month=12>) + 14.96μs 17.13μs 1.14 offset.OffestDatetimeArithmetic.time_subtract(<BusinessQuarterBegin: startingMonth=3>) + 15.27μs 17.43μs 1.14 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessMonthEnd>) + 12.88μs 14.66μs 1.14 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessDay>) + 251.65μs 285.47μs 1.13 offset.OffsetDatetimeIndexArithmetic.time_add_offset(<BusinessQuarterEnd: startingMonth=3>) + 13.84μs 15.66μs 1.13 offset.OffestDatetimeArithmetic.time_add_10(<YearBegin: month=1>) + 14.68ms 16.59ms 1.13 timeseries.DatetimeIndex.time_to_time('tz_naive') + 16.66μs 18.79μs 1.13 offset.OffestDatetimeArithmetic.time_subtract_10(<YearEnd: month=12>) + 10.00μs 11.19μs 1.12 offset.OffestDatetimeArithmetic.time_apply(<BusinessYearBegin: month=1>) + 10.30μs 11.50μs 1.12 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessYearEnd: month=12>) + 17.44μs 19.47μs 1.12 offset.OffestDatetimeArithmetic.time_subtract(<SemiMonthBegin: day_of_month=15>) + 18.07μs 20.13μs 1.11 offset.OffestDatetimeArithmetic.time_add_10(<SemiMonthEnd: day_of_month=15>) + 18.09μs 20.08μs 1.11 offset.OffestDatetimeArithmetic.time_add(<CustomBusinessDay>) + 14.65μs 16.26μs 1.11 offset.OffestDatetimeArithmetic.time_add_10(<BusinessMonthEnd>) + 5.32ms 5.91ms 1.11 offset.OffsetDatetimeIndexArithmetic.time_add_offset(<SemiMonthBegin: day_of_month=15>) + 11.54μs 12.80μs 1.11 offset.OffestDatetimeArithmetic.time_add(<MonthBegin>) + 12.23μs 13.55μs 1.11 offset.OffestDatetimeArithmetic.time_add(<BusinessQuarterEnd: startingMonth=3>) + 15.25μs 16.87μs 1.11 offset.OffestDatetimeArithmetic.time_subtract_10(<MonthEnd>) + 17.78μs 19.66μs 1.11 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessDay>) + 18.13μs 20.05μs 1.11 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessQuarterEnd: startingMonth=3>) + 21.89μs 24.19μs 1.10 offset.OffestDatetimeArithmetic.time_subtract_10(<Day>) + 9.94μs 10.97μs 1.10 offset.OffestDatetimeArithmetic.time_apply(<SemiMonthEnd: day_of_month=15>) + 10.74μs 11.85μs 1.10 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessYearBegin: month=1>) + 18.51μs 20.39μs 1.10 offset.OffestDatetimeArithmetic.time_subtract_10(<SemiMonthEnd: day_of_month=15>) + 39.52ms 43.52ms 1.10 timeseries.Factorize.time_factorize('Asia/Tokyo') - 16.22μs 14.75μs 0.91 offset.OffestDatetimeArithmetic.time_add_10(<QuarterBegin: startingMonth=3>) - 73.13ms 65.36ms 0.89 timeseries.ToDatetimeCache.time_dup_string_tzoffset_dates(False) - 13.39μs 11.60μs 0.87 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessQuarterBegin: startingMonth=3>) - 25.04μs 21.37μs 0.85 offset.OffestDatetimeArithmetic.time_add_10(<Day>) - 14.25μs 11.66μs 0.82 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<QuarterEnd: startingMonth=3>) - 44.47ms 34.93ms 0.79 timeseries.Factorize.time_factorize(None) - 1.98ms 1.45ms 0.73 timeseries.ToDatetimeCache.time_dup_string_dates(False) - 4.23ms 1.69ms 0.40 timeseries.ResampleSeries.time_resample('datetime', '1D', 'mean') asv continuous -E virtualenv -f 1.1 master HEAD -b offset -b timeseries [...] + 17.01μs 35.30μs 2.08 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessQuarterBegin: startingMonth=3>) + 3.02ms 4.90ms 1.62 timeseries.AsOf.time_asof_nan_single('DataFrame') + 77.45ms 124.97ms 1.61 timeseries.Factorize.time_factorize('Asia/Tokyo') + 12.33μs 19.34μs 1.57 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<DateOffset: days=2, months=2>) + 9.65μs 14.70μs 1.52 offset.OffestDatetimeArithmetic.time_apply(<DateOffset: days=2, months=2>) + 11.65μs 17.30μs 1.48 offset.OffestDatetimeArithmetic.time_add(<DateOffset: days=2, months=2>) + 27.81μs 40.60μs 1.46 offset.OffestDatetimeArithmetic.time_subtract(<DateOffset: days=2, months=2>) + 71.60μs 91.28μs 1.27 offset.OffestDatetimeArithmetic.time_add(<CustomBusinessMonthEnd>) + 92.47μs 113.39μs 1.23 offset.OffestDatetimeArithmetic.time_subtract(<CustomBusinessMonthBegin>) + 18.89μs 23.02μs 1.22 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<Day>) + 10.49μs 12.66μs 1.21 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessMonthBegin>) + 16.88μs 20.14μs 1.19 offset.OffestDatetimeArithmetic.time_subtract(<SemiMonthEnd: day_of_month=15>) + 15.45μs 18.27μs 1.18 offset.OffestDatetimeArithmetic.time_add_10(<QuarterEnd: startingMonth=3>) + 8.37μs 9.90μs 1.18 offset.OffestDatetimeArithmetic.time_apply(<MonthBegin>) + 62.24μs 73.22μs 1.18 offset.OffestDatetimeArithmetic.time_add_10(<DateOffset: days=2, months=2>) + 21.38μs 24.98μs 1.17 offset.OffestDatetimeArithmetic.time_add_10(<Day>) + 16.28μs 18.91μs 1.16 offset.OffestDatetimeArithmetic.time_subtract_10(<QuarterBegin: startingMonth=3>) + 12.20μs 14.11μs 1.16 offset.OffestDatetimeArithmetic.time_add(<BusinessQuarterEnd: startingMonth=3>) + 11.51μs 13.30μs 1.16 offset.OffestDatetimeArithmetic.time_add(<BusinessYearBegin: month=1>) + 12.89μs 14.83μs 1.15 offset.OffestDatetimeArithmetic.time_add(<SemiMonthBegin: day_of_month=15>) + 14.39μs 16.51μs 1.15 offset.OffestDatetimeArithmetic.time_add_10(<QuarterBegin: startingMonth=3>) + 10.50μs 12.03μs 1.15 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<YearEnd: month=12>) + 2.32ms 2.66ms 1.14 timeseries.DatetimeIndex.time_unique('tz_naive') + 15.92μs 18.08μs 1.14 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessMonthEnd>) + 123.41μs 140.13μs 1.14 offset.OffestDatetimeArithmetic.time_subtract_10(<DateOffset: days=2, months=2>) + 71.20ms 80.52ms 1.13 offset.OffsetSeriesArithmetic.time_add_offset(<CustomBusinessMonthEnd>) + 17.68μs 19.89μs 1.13 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessYearBegin: month=1>) + 15.36μs 17.23μs 1.12 offset.OffestDatetimeArithmetic.time_subtract_10(<MonthBegin>) + 15.98μs 17.82μs 1.12 offset.OffestDatetimeArithmetic.time_add_10(<SemiMonthBegin: day_of_month=15>) + 14.64μs 16.31μs 1.11 offset.OffestDatetimeArithmetic.time_add_10(<BusinessMonthEnd>) + 8.96μs 9.94μs 1.11 offset.OffestDatetimeArithmetic.time_apply(<BusinessYearEnd: month=12>) + 17.27μs 19.16μs 1.11 offset.OffestDatetimeArithmetic.time_subtract_10(<YearEnd: month=12>) + 14.44μs 16.01μs 1.11 offset.OffestDatetimeArithmetic.time_subtract(<BusinessYearBegin: month=1>) + 15.26μs 16.90μs 1.11 offset.OffestDatetimeArithmetic.time_add_10(<BusinessDay>) + 14.19μs 15.70μs 1.11 offset.OffestDatetimeArithmetic.time_subtract(<MonthBegin>) + 13.72μs 15.16μs 1.11 offset.OffestDatetimeArithmetic.time_add_10(<MonthEnd>) + 127.38μs 140.62μs 1.10 offset.OffestDatetimeArithmetic.time_add_10(<CustomBusinessMonthBegin>) + 13.77μs 15.20μs 1.10 offset.OffestDatetimeArithmetic.time_subtract(<BusinessMonthEnd>) - 17.66μs 15.89μs 0.90 offset.OffestDatetimeArithmetic.time_subtract_10(<MonthEnd>) - 4.07ms 3.66ms 0.90 timeseries.ToDatetimeISO8601.time_iso8601_nosep - 25.01μs 22.41μs 0.90 offset.OffestDatetimeArithmetic.time_subtract_10(<Day>) - 11.44μs 10.22μs 0.89 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<MonthEnd>) - 18.64μs 16.19μs 0.87 timeseries.AsOf.time_asof_single('Series') - 92.96ms 77.71ms 0.84 timeseries.Factorize.time_factorize(None) - 23.06μs 19.02μs 0.83 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessYearEnd: month=12>) - 13.05μs 10.37μs 0.79 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<YearBegin: month=1>) - 23.95μs 14.14μs 0.59 offset.OffestDatetimeArithmetic.time_add(<MonthBegin>) - 444.49μs 260.00μs 0.58 offset.OffsetDatetimeIndexArithmetic.time_add_offset(<BusinessYearEnd: month=12>) - 4.31ms 2.46ms 0.57 timeseries.ToDatetimeCache.time_unique_seconds_and_unit(False) - 21.43μs 11.69μs 0.55 offset.OffestDatetimeArithmetic.time_apply(<BusinessDay>) asv continuous -E virtualenv -f 1.1 master HEAD -b offset -b timeseries [...] before after ratio [d3f7d2a6] [fe7a7187] + 16.58μs 26.15μs 1.58 offset.OffestDatetimeArithmetic.time_subtract(<BusinessQuarterEnd: startingMonth=3>) + 9.75μs 14.73μs 1.51 offset.OffestDatetimeArithmetic.time_apply(<DateOffset: days=2, months=2>) + 12.16μs 18.11μs 1.49 offset.OffestDatetimeArithmetic.time_add(<DateOffset: days=2, months=2>) + 12.21μs 17.69μs 1.45 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<DateOffset: days=2, months=2>) + 28.97μs 40.89μs 1.41 offset.OffestDatetimeArithmetic.time_subtract(<DateOffset: days=2, months=2>) + 1.63ms 2.25ms 1.38 timeseries.ResampleSeries.time_resample('datetime', '1D', 'mean') + 14.71μs 20.02μs 1.36 offset.OffestDatetimeArithmetic.time_subtract(<BusinessYearBegin: month=1>) + 14.63μs 19.70μs 1.35 offset.OffestDatetimeArithmetic.time_subtract(<BusinessMonthBegin>) + 19.52μs 26.26μs 1.35 offset.OffestDatetimeArithmetic.time_add(<CustomBusinessDay>) + 22.16ms 29.79ms 1.34 offset.OffsetSeriesArithmetic.time_add_offset(<CustomBusinessDay>) + 20.69ms 26.91ms 1.30 timeseries.IrregularOps.time_add + 16.77μs 20.77μs 1.24 offset.OffestDatetimeArithmetic.time_subtract(<SemiMonthEnd: day_of_month=15>) + 15.76μs 19.40μs 1.23 offset.OffestDatetimeArithmetic.time_subtract_10(<MonthBegin>) + 17.31μs 21.15μs 1.22 offset.OffestDatetimeArithmetic.time_subtract_10(<QuarterBegin: startingMonth=3>) + 15.31μs 18.65μs 1.22 offset.OffestDatetimeArithmetic.time_subtract(<BusinessQuarterBegin: startingMonth=3>) + 14.71μs 17.75μs 1.21 offset.OffestDatetimeArithmetic.time_add_10(<YearBegin: month=1>) + 15.88μs 19.10μs 1.20 offset.OffestDatetimeArithmetic.time_subtract(<BusinessDay>) + 6.38ms 7.55ms 1.18 timeseries.AsOf.time_asof('DataFrame') + 16.25μs 18.97μs 1.17 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessQuarterEnd: startingMonth=3>) + 66.50μs 76.79μs 1.15 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<CustomBusinessMonthEnd>) + 19.02μs 21.70μs 1.14 offset.OffestDatetimeArithmetic.time_add(<Day>) + 13.95μs 15.83μs 1.13 offset.OffestDatetimeArithmetic.time_subtract(<MonthEnd>) + 18.43μs 20.88μs 1.13 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<Day>) + 64.88μs 72.92μs 1.12 offset.OffestDatetimeArithmetic.time_add_10(<DateOffset: days=2, months=2>) + 12.91μs 14.43μs 1.12 offset.OffestDatetimeArithmetic.time_add(<QuarterEnd: startingMonth=3>) + 15.23μs 16.99μs 1.12 offset.OffestDatetimeArithmetic.time_add_10(<BusinessDay>) + 13.70μs 15.24μs 1.11 offset.OffestDatetimeArithmetic.time_add_10(<YearEnd: month=12>) + 131.58μs 145.99μs 1.11 offset.OffestDatetimeArithmetic.time_subtract_10(<DateOffset: days=2, months=2>) + 11.07μs 12.26μs 1.11 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessQuarterBegin: startingMonth=3>) + 11.14μs 12.34μs 1.11 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<MonthBegin>) + 11.60μs 12.84μs 1.11 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessMonthEnd>) + 2.44ms 2.70ms 1.11 timeseries.DatetimeIndex.time_unique('tz_naive') + 15.79μs 17.46μs 1.11 offset.OffestDatetimeArithmetic.time_subtract_10(<BusinessMonthEnd>) + 21.47μs 23.70μs 1.10 offset.OffestDatetimeArithmetic.time_subtract(<Day>) + 10.57μs 11.64μs 1.10 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessMonthBegin>) + 2.98μs 3.28μs 1.10 timeseries.DatetimeIndex.time_get('repeated') - 10.04μs 9.07μs 0.90 offset.OffestDatetimeArithmetic.time_apply(<BusinessMonthBegin>) - 12.54μs 11.29μs 0.90 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<YearEnd: month=12>) - 10.30μs 9.25μs 0.90 offset.OffestDatetimeArithmetic.time_apply(<QuarterBegin: startingMonth=3>) - 16.50μs 14.67μs 0.89 offset.OffestDatetimeArithmetic.time_add_10(<QuarterEnd: startingMonth=3>) - 8.11ms 6.98ms 0.86 timeseries.Factorize.time_factorize(None) - 7.85ms 6.72ms 0.86 timeseries.Factorize.time_factorize('Asia/Tokyo') - 20.75μs 13.16μs 0.63 offset.OffestDatetimeArithmetic.time_add(<MonthEnd>) - 2.00ms 1.21ms 0.60 timeseries.ResampleDataFrame.time_method('mean') - 3.99ms 2.34ms 0.59 timeseries.ToDatetimeCache.time_dup_string_dates(True) - 19.81μs 11.38μs 0.57 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<BusinessYearBegin: month=1>) - 30.71μs 12.91μs 0.42 offset.OffestDatetimeArithmetic.time_add(<BusinessQuarterEnd: startingMonth=3>) asv continuous -E virtualenv -f 1.1 master HEAD -b offset -b timeseries [...] before after ratio [d3f7d2a6] [fe7a7187] + 13.89μs 26.44μs 1.90 offset.OffestDatetimeArithmetic.time_add(<BusinessMonthEnd>) + 9.67μs 15.55μs 1.61 offset.OffestDatetimeArithmetic.time_apply(<DateOffset: days=2, months=2>) + 11.64μs 17.55μs 1.51 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<DateOffset: days=2, months=2>) + 12.28μs 17.62μs 1.43 offset.OffestDatetimeArithmetic.time_add(<DateOffset: days=2, months=2>) + 30.04μs 40.47μs 1.35 offset.OffestDatetimeArithmetic.time_subtract(<DateOffset: days=2, months=2>) + 14.31μs 17.84μs 1.25 offset.OffestDatetimeArithmetic.time_subtract(<YearBegin: month=1>) + 14.24μs 17.55μs 1.23 offset.OffestDatetimeArithmetic.time_add_10(<QuarterBegin: startingMonth=3>) + 16.80μs 20.62μs 1.23 offset.OffestDatetimeArithmetic.time_add_10(<SemiMonthEnd: day_of_month=15>) + 15.42μs 18.40μs 1.19 offset.OffestDatetimeArithmetic.time_subtract_10(<MonthBegin>) + 61.16μs 72.26μs 1.18 offset.OffestDatetimeArithmetic.time_add_10(<DateOffset: days=2, months=2>) + 14.30μs 16.82μs 1.18 offset.OffestDatetimeArithmetic.time_add_10(<BusinessQuarterEnd: startingMonth=3>) + 14.67μs 17.24μs 1.18 offset.OffestDatetimeArithmetic.time_add_10(<QuarterEnd: startingMonth=3>) + 9.54μs 11.21μs 1.17 offset.OffestDatetimeArithmetic.time_apply(<QuarterEnd: startingMonth=3>) + 17.11μs 20.00μs 1.17 offset.OffestDatetimeArithmetic.time_subtract_10(<QuarterBegin: startingMonth=3>) + 15.54μs 18.01μs 1.16 offset.OffestDatetimeArithmetic.time_apply(<CustomBusinessDay>) + 10.34μs 11.95μs 1.16 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<MonthEnd>) + 123.20μs 141.04μs 1.14 offset.OffestDatetimeArithmetic.time_subtract_10(<DateOffset: days=2, months=2>) + 13.93μs 15.85μs 1.14 offset.OffestDatetimeArithmetic.time_add_10(<YearBegin: month=1>) + 11.39μs 12.85μs 1.13 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<YearEnd: month=12>) + 16.79μs 18.84μs 1.12 offset.OffestDatetimeArithmetic.time_subtract_10(<QuarterEnd: startingMonth=3>) + 2.35ms 2.64ms 1.12 timeseries.DatetimeIndex.time_unique('tz_naive') + 4.16μs 4.67μs 1.12 timeseries.DatetimeIndex.time_get('dst') + 6.03μs 6.73μs 1.12 timeseries.DatetimeIndex.time_get('tz_aware') + 16.77μs 18.74μs 1.12 offset.OffestDatetimeArithmetic.time_add_10(<BusinessDay>) + 10.47μs 11.69μs 1.12 offset.OffestDatetimeArithmetic.time_apply(<BusinessDay>) + 12.36μs 13.67μs 1.11 offset.OffestDatetimeArithmetic.time_add(<YearBegin: month=1>) + 9.49μs 10.49μs 1.11 offset.OffestDatetimeArithmetic.time_apply(<BusinessQuarterEnd: startingMonth=3>) + 105.28μs 116.31μs 1.10 offset.OffestDatetimeArithmetic.time_apply(<CustomBusinessMonthBegin>) + 18.86μs 20.84μs 1.10 offset.OffestDatetimeArithmetic.time_subtract_10(<SemiMonthEnd: day_of_month=15>) + 14.58μs 16.07μs 1.10 offset.OffestDatetimeArithmetic.time_subtract(<MonthBegin>) - 4.33ms 3.93ms 0.91 offset.OnOffset.time_on_offset(<CustomBusinessMonthBegin>) - 2.30ms 2.05ms 0.89 timeseries.ResampleSeries.time_resample('datetime', '1D', 'ohlc') - 148.28ms 131.47ms 0.89 timeseries.DatetimeIndex.time_to_pydatetime('tz_aware') - 10.14μs 8.93μs 0.88 offset.OffestDatetimeArithmetic.time_apply(<MonthEnd>) - 13.64μs 11.91μs 0.87 offset.OffestDatetimeArithmetic.time_apply_np_dt64(<SemiMonthBegin: day_of_month=15>) - 32.12μs 27.72μs 0.86 offset.OffestDatetimeArithmetic.time_add_10(<CustomBusinessDay>) - 16.82μs 13.94μs 0.83 offset.OffestDatetimeArithmetic.time_add_10(<MonthEnd>) - 3.27μs 2.59μs 0.79 timeseries.DatetimeIndex.time_get('repeated') - 313.89μs 235.57μs 0.75 offset.OffsetDatetimeIndexArithmetic.time_add_offset(<QuarterEnd: startingMonth=3>) - 23.48μs 15.20μs 0.65 offset.OffestDatetimeArithmetic.time_add(<BusinessYearEnd: month=12>) - 3.96ms 2.55ms 0.64 timeseries.ResampleSeries.time_resample('period', '5min', 'ohlc') - 23.80μs 13.81μs 0.58 offset.OffestDatetimeArithmetic.time_add(<BusinessDay>) - 3.13ms 1.45ms 0.46 timeseries.ToDatetimeCache.time_dup_string_dates(False) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19403
2018-01-26T00:07:48Z
2018-02-02T11:32:49Z
2018-02-02T11:32:49Z
2018-02-04T16:42:54Z
DOC: correct merge_ordered example
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 8ee30bf72d313..99ea2c4fe4688 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -194,19 +194,17 @@ def merge_ordered(left, right, on=None, 5 e 3 b >>> merge_ordered(A, B, fill_method='ffill', left_by='group') - key lvalue group rvalue - 0 a 1 a NaN - 1 b 1 a 1 - 2 c 2 a 2 - 3 d 2 a 3 - 4 e 3 a 3 - 5 f 3 a 4 - 6 a 1 b NaN - 7 b 1 b 1 - 8 c 2 b 2 - 9 d 2 b 3 - 10 e 3 b 3 - 11 f 3 b 4 + group key lvalue rvalue + 0 a a 1 NaN + 1 a b 1 1.0 + 2 a c 2 2.0 + 3 a d 2 3.0 + 4 a e 3 3.0 + 5 b a 1 NaN + 6 b b 1 1.0 + 7 b c 2 2.0 + 8 b d 2 3.0 + 9 b e 3 3.0 Returns ------- diff --git a/pandas/tests/reshape/merge/test_merge_ordered.py b/pandas/tests/reshape/merge/test_merge_ordered.py index a4c8793cc0ade..31c484a483d18 100644 --- a/pandas/tests/reshape/merge/test_merge_ordered.py +++ b/pandas/tests/reshape/merge/test_merge_ordered.py @@ -81,3 +81,21 @@ def test_empty_sequence_concat(self): pd.concat([pd.DataFrame()]) pd.concat([None, pd.DataFrame()]) pd.concat([pd.DataFrame(), None]) + + def test_doc_example(self): + left = DataFrame({'key': ['a', 'c', 'e', 'a', 'c', 'e'], + 'lvalue': [1, 2, 3] * 2, + 'group': list('aaabbb')}) + + right = DataFrame({'key': ['b', 'c', 'd'], + 'rvalue': [1, 2, 3]}) + + result = merge_ordered(left, right, fill_method='ffill', + left_by='group') + + expected = DataFrame({'group': list('aaaaabbbbb'), + 'key': ['a', 'b', 'c', 'd', 'e'] * 2, + 'lvalue': [1, 1, 2, 2, 3] * 2, + 'rvalue': [nan, 1, 2, 3, 3] * 2}) + + assert_frame_equal(result, expected)
- update of incorrect documentation example for merge_ordered. - adding a test corresponding to this example. - [ ] closes #19393 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19401
2018-01-25T22:41:27Z
2018-01-26T11:23:45Z
2018-01-26T11:23:45Z
2018-01-26T11:23:46Z
DOC: changes to use code-block declaration
diff --git a/doc/source/developer.rst b/doc/source/developer.rst index 5c3b114ce7299..0ef097da090f2 100644 --- a/doc/source/developer.rst +++ b/doc/source/developer.rst @@ -153,7 +153,7 @@ Libraries can use the decorators pandas objects. All of these follow a similar convention: you decorate a class, providing the name of attribute to add. The class's `__init__` method gets the object being decorated. For example: -.. ipython:: python +.. code-block:: python @pd.api.extensions.register_dataframe_accessor("geo") class GeoAccessor(object):
- [ ] closes #19400 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19400
2018-01-25T21:57:20Z
2018-01-26T11:18:38Z
2018-01-26T11:18:37Z
2018-01-26T11:18:38Z
Fix invalid relativedelta_kwds
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 7509c502f27ed..473a4bb72e6d9 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1245,7 +1245,6 @@ Offsets - Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`) - Bug in :class:`FY5253` where date offsets could incorrectly raise an ``AssertionError`` in arithmetic operations (:issue:`14774`) - Numeric ^^^^^^^ - Bug in :class:`Series` constructor with an int or float list where specifying ``dtype=str``, ``dtype='str'`` or ``dtype='U'`` failed to convert the data elements to strings (:issue:`16605`) diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index b015495b095b6..59589478f48f5 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -402,7 +402,8 @@ Timezones Offsets ^^^^^^^ -- +- Bug in :class:`FY5253` where date offsets could incorrectly raise an ``AssertionError`` in arithmetic operatons (:issue:`14774`) +- Bug in :class:`DateOffset` where keyword arguments ``week`` and ``milliseconds`` were accepted and ignored. Passing these will now raise ``ValueError`` (:issue:`19398`) - Numeric diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 7881529f04ed3..3ba2270a851d5 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -254,10 +254,10 @@ def _validate_business_time(t_input): relativedelta_kwds = set([ 'years', 'months', 'weeks', 'days', - 'year', 'month', 'week', 'day', 'weekday', + 'year', 'month', 'day', 'weekday', 'hour', 'minute', 'second', 'microsecond', 'nanosecond', 'nanoseconds', - 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds']) + 'hours', 'minutes', 'seconds', 'microseconds']) def _determine_offset(kwds): diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index db69bfadfcf49..35ee0d37e2b1a 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -3085,6 +3085,13 @@ def test_valid_month_attributes(kwd, month_classes): cls(**{kwd: 3}) +@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds))) +def test_valid_relativedelta_kwargs(kwd): + # Check that all the arguments specified in liboffsets.relativedelta_kwds + # are in fact valid relativedelta keyword args + DateOffset(**{kwd: 1}) + + @pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds))) def test_valid_tick_attributes(kwd, tick_classes): # GH#18226 diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 5d076bf33a8ac..dd4356aac1cd5 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -158,6 +158,54 @@ def __add__(date): date + BDay(0) == BDay.rollforward(date) Since 0 is a bit weird, we suggest avoiding its use. + + Parameters + ---------- + n : int, default 1 + The number of time periods the offset represents. + normalize : bool, default False + Whether to round the result of a DateOffset addition down to the + previous midnight. + **kwds + Temporal parameter that add to or replace the offset value. + + Parameters that **add** to the offset (like Timedelta): + + - years + - months + - weeks + - days + - hours + - minutes + - seconds + - microseconds + - nanoseconds + + Parameters that **replace** the offset value: + + - year + - month + - day + - weekday + - hour + - minute + - second + - microsecond + - nanosecond + + See Also + -------- + dateutil.relativedelta.relativedelta + + Examples + -------- + >>> ts = pd.Timestamp('2017-01-01 09:10:11') + >>> ts + DateOffset(months=3) + Timestamp('2017-04-01 09:10:11') + + >>> ts = pd.Timestamp('2017-01-01 09:10:11') + >>> ts + DateOffset(month=3) + Timestamp('2017-03-01 09:10:11') """ _params = cache_readonly(BaseOffset._params.fget) _use_relativedelta = False
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19398
2018-01-25T19:16:02Z
2018-07-20T13:29:26Z
2018-07-20T13:29:26Z
2020-04-05T17:40:50Z
Remove unused Index attributes
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 74c6abeb0ad12..626f3dc86556a 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -141,12 +141,10 @@ class Index(IndexOpsMixin, PandasObject): _join_precedence = 1 # Cython methods - _arrmap = libalgos.arrmap_object _left_indexer_unique = libjoin.left_join_indexer_unique_object _left_indexer = libjoin.left_join_indexer_object _inner_indexer = libjoin.inner_join_indexer_object _outer_indexer = libjoin.outer_join_indexer_object - _box_scalars = False _typ = 'index' _data = None @@ -155,9 +153,6 @@ class Index(IndexOpsMixin, PandasObject): asi8 = None _comparables = ['name'] _attributes = ['name'] - _allow_index_ops = True - _allow_datetime_index_ops = False - _allow_period_index_ops = False _is_numeric_dtype = False _can_hold_na = True diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index afc86a51c02b4..8dd41c022d163 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -282,7 +282,6 @@ def _join_i8_wrapper(joinf, **kwargs): _left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64) _left_indexer_unique = _join_i8_wrapper( libjoin.left_join_indexer_unique_int64, with_indexers=False) - _arrmap = None @classmethod def _add_comparison_methods(cls): diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 232770e582763..3bf783b5a2faa 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -207,7 +207,6 @@ class IntervalIndex(IntervalMixin, Index): _typ = 'intervalindex' _comparables = ['name'] _attributes = ['name', 'closed'] - _allow_index_ops = True # we would like our indexing holder to defer to us _defer_to_indexing = True diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 5e6ebb7588ab9..b02aee0495d8c 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -1,6 +1,6 @@ import numpy as np from pandas._libs import (index as libindex, - algos as libalgos, join as libjoin) + join as libjoin) from pandas.core.dtypes.common import ( is_dtype_equal, pandas_dtype, @@ -158,7 +158,6 @@ class Int64Index(NumericIndex): __doc__ = _num_index_shared_docs['class_descr'] % _int64_descr_args _typ = 'int64index' - _arrmap = libalgos.arrmap_int64 _left_indexer_unique = libjoin.left_join_indexer_unique_int64 _left_indexer = libjoin.left_join_indexer_int64 _inner_indexer = libjoin.inner_join_indexer_int64 @@ -217,7 +216,6 @@ class UInt64Index(NumericIndex): __doc__ = _num_index_shared_docs['class_descr'] % _uint64_descr_args _typ = 'uint64index' - _arrmap = libalgos.arrmap_uint64 _left_indexer_unique = libjoin.left_join_indexer_unique_uint64 _left_indexer = libjoin.left_join_indexer_uint64 _inner_indexer = libjoin.inner_join_indexer_uint64 @@ -296,7 +294,6 @@ class Float64Index(NumericIndex): _typ = 'float64index' _engine_type = libindex.Float64Engine - _arrmap = libalgos.arrmap_float64 _left_indexer_unique = libjoin.left_join_indexer_unique_float64 _left_indexer = libjoin.left_join_indexer_float64 _inner_indexer = libjoin.inner_join_indexer_float64 diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 8b35b1a231551..1f8542ed5ee60 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -204,7 +204,6 @@ class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index): DatetimeIndex : Index with datetime64 data TimedeltaIndex : Index of timedelta64 data """ - _box_scalars = True _typ = 'periodindex' _attributes = ['name', 'freq'] diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index b88ee88210cfe..4b543262fc485 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -170,7 +170,6 @@ def _join_i8_wrapper(joinf, **kwargs): _left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64) _left_indexer_unique = _join_i8_wrapper( libjoin.left_join_indexer_unique_int64, with_indexers=False) - _arrmap = None # define my properties & methods for delegation _other_ops = [] diff --git a/pandas/core/series.py b/pandas/core/series.py index a14eb69d86377..78b4c3a70a519 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -144,7 +144,6 @@ class Series(base.IndexOpsMixin, generic.NDFrame): _deprecations = generic.NDFrame._deprecations | frozenset( ['asobject', 'sortlevel', 'reshape', 'get_value', 'set_value', 'from_csv', 'valid']) - _allow_index_ops = True def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False): diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index c468908db5449..df2547fc7b0da 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -265,8 +265,8 @@ class TestIndexOps(Ops): def setup_method(self, method): super(TestIndexOps, self).setup_method(method) - self.is_valid_objs = [o for o in self.objs if o._allow_index_ops] - self.not_valid_objs = [o for o in self.objs if not o._allow_index_ops] + self.is_valid_objs = self.objs + self.not_valid_objs = [] def test_none_comparison(self):
https://api.github.com/repos/pandas-dev/pandas/pulls/19397
2018-01-25T19:11:18Z
2018-01-27T01:12:24Z
2018-01-27T01:12:24Z
2018-01-31T06:49:27Z
Centralize ops kwarg specification
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 3db2dd849ccee..ba8a15b60ba56 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -39,7 +39,8 @@ ABCSeries, ABCDataFrame, ABCIndex, - ABCPeriodIndex) + ABCPeriodIndex, + ABCSparseSeries) def _gen_eval_kwargs(name): @@ -109,6 +110,31 @@ def _gen_fill_zeros(name): return fill_value +def _get_frame_op_default_axis(name): + """ + Only DataFrame cares about default_axis, specifically: + special methods have default_axis=None and flex methods + have default_axis='columns'. + + Parameters + ---------- + name : str + + Returns + ------- + default_axis: str or None + """ + if name.replace('__r', '__') in ['__and__', '__or__', '__xor__']: + # bool methods + return 'columns' + elif name.startswith('__'): + # __add__, __mul__, ... + return None + else: + # add, mul, ... + return 'columns' + + # ----------------------------------------------------------------------------- # Docstring Generation and Templates @@ -281,17 +307,17 @@ def _gen_fill_zeros(name): _agg_doc_PANEL = """ -Wrapper method for {wrp_method} +Wrapper method for {op_name} Parameters ---------- -other : {construct} or {cls_name} -axis : {{{axis_order}}} +other : DataFrame or Panel +axis : {{items, major_axis, minor_axis}} Axis to broadcast over Returns ------- -{cls_name} +Panel """ @@ -337,14 +363,18 @@ def _make_flex_doc(op_name, typ): # methods -def _create_methods(arith_method, comp_method, bool_method, - use_numexpr, special=False, default_axis='columns', - have_divmod=False): +def _create_methods(cls, arith_method, comp_method, bool_method, + special=False): # creates actual methods based upon arithmetic, comp and bool method # constructors. - # NOTE: Only frame cares about default_axis, specifically: special methods - # have default axis None, whereas flex methods have default axis 'columns' + # numexpr is available for non-sparse classes + subtyp = getattr(cls, '_subtyp', '') + use_numexpr = 'sparse' not in subtyp + + have_divmod = issubclass(cls, ABCSeries) + # divmod is available for Series and SparseSeries + # if we're not using numexpr, then don't pass a str_rep if use_numexpr: op = lambda x: x @@ -360,44 +390,28 @@ def names(x): else: names = lambda x: x - # Inframe, all special methods have default_axis=None, flex methods have - # default_axis set to the default (columns) # yapf: disable new_methods = dict( - add=arith_method(operator.add, names('add'), op('+'), - default_axis=default_axis), - radd=arith_method(lambda x, y: y + x, names('radd'), op('+'), - default_axis=default_axis), - sub=arith_method(operator.sub, names('sub'), op('-'), - default_axis=default_axis), - mul=arith_method(operator.mul, names('mul'), op('*'), - default_axis=default_axis), - truediv=arith_method(operator.truediv, names('truediv'), op('/'), - default_axis=default_axis), - floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'), - default_axis=default_axis), + add=arith_method(operator.add, names('add'), op('+')), + radd=arith_method(lambda x, y: y + x, names('radd'), op('+')), + sub=arith_method(operator.sub, names('sub'), op('-')), + mul=arith_method(operator.mul, names('mul'), op('*')), + truediv=arith_method(operator.truediv, names('truediv'), op('/')), + floordiv=arith_method(operator.floordiv, names('floordiv'), op('//')), # Causes a floating point exception in the tests when numexpr enabled, # so for now no speedup - mod=arith_method(operator.mod, names('mod'), None, - default_axis=default_axis), - pow=arith_method(operator.pow, names('pow'), op('**'), - default_axis=default_axis), + mod=arith_method(operator.mod, names('mod'), None), + pow=arith_method(operator.pow, names('pow'), op('**')), # not entirely sure why this is necessary, but previously was included # so it's here to maintain compatibility - rmul=arith_method(operator.mul, names('rmul'), op('*'), - default_axis=default_axis), - rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'), - default_axis=default_axis), + rmul=arith_method(operator.mul, names('rmul'), op('*')), + rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-')), rtruediv=arith_method(lambda x, y: operator.truediv(y, x), - names('rtruediv'), op('/'), - default_axis=default_axis), + names('rtruediv'), op('/')), rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x), - names('rfloordiv'), op('//'), - default_axis=default_axis), - rpow=arith_method(lambda x, y: y**x, names('rpow'), op('**'), - default_axis=default_axis), - rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'), - default_axis=default_axis)) + names('rfloordiv'), op('//')), + rpow=arith_method(lambda x, y: y**x, names('rpow'), op('**')), + rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'))) # yapf: enable new_methods['div'] = new_methods['truediv'] new_methods['rdiv'] = new_methods['rtruediv'] @@ -425,10 +439,7 @@ def names(x): names('rxor'), op('^')))) if have_divmod: # divmod doesn't have an op that is supported by numexpr - new_methods['divmod'] = arith_method(divmod, - names('divmod'), - None, - default_axis=default_axis) + new_methods['divmod'] = arith_method(divmod, names('divmod'), None) new_methods = {names(k): v for k, v in new_methods.items()} return new_methods @@ -444,8 +455,7 @@ def add_methods(cls, new_methods, force): # Arithmetic def add_special_arithmetic_methods(cls, arith_method=None, comp_method=None, bool_method=None, - use_numexpr=True, force=False, - have_divmod=False): + force=False): """ Adds the full suite of special arithmetic methods (``__add__``, ``__sub__``, etc.) to the class. @@ -454,27 +464,17 @@ def add_special_arithmetic_methods(cls, arith_method=None, ---------- arith_method : function (optional) factory for special arithmetic methods, with op string: - f(op, name, str_rep, default_axis=None) + f(op, name, str_rep) comp_method : function (optional) factory for rich comparison - signature: f(op, name, str_rep) bool_method : function (optional) factory for boolean methods - signature: f(op, name, str_rep) - use_numexpr : bool, default True - whether to accelerate with numexpr, defaults to True force : bool, default False if False, checks whether function is defined **on ``cls.__dict__``** before defining if True, always defines functions on class base - have_divmod : bool, (optional) - should a divmod method be added? this method is special because it - returns a tuple of cls instead of a single element of type cls """ - - # in frame, special methods have default_axis = None, comp methods use - # 'columns' - - new_methods = _create_methods(arith_method, comp_method, - bool_method, use_numexpr, default_axis=None, - special=True, have_divmod=have_divmod) + new_methods = _create_methods(cls, arith_method, comp_method, bool_method, + special=True) # inplace operators (I feel like these should get passed an `inplace=True` # or just be removed @@ -517,7 +517,7 @@ def f(self, other): def add_flex_arithmetic_methods(cls, flex_arith_method, flex_comp_method=None, flex_bool_method=None, - use_numexpr=True, force=False): + force=False): """ Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. @@ -525,20 +525,16 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, Parameters ---------- flex_arith_method : function - factory for special arithmetic methods, with op string: - f(op, name, str_rep, default_axis=None) + factory for flex arithmetic methods, with op string: + f(op, name, str_rep) flex_comp_method : function, optional, factory for rich comparison - signature: f(op, name, str_rep) - use_numexpr : bool, default True - whether to accelerate with numexpr, defaults to True force : bool, default False if False, checks whether function is defined **on ``cls.__dict__``** before defining if True, always defines functions on class base """ - # in frame, default axis is 'columns', doesn't matter for series and panel - new_methods = _create_methods(flex_arith_method, + new_methods = _create_methods(cls, flex_arith_method, flex_comp_method, flex_bool_method, - use_numexpr, default_axis='columns', special=False) new_methods.update(dict(multiply=new_methods['mul'], subtract=new_methods['sub'], @@ -597,7 +593,7 @@ def _construct_divmod_result(left, result, index, name, dtype): ) -def _arith_method_SERIES(op, name, str_rep, default_axis=None): +def _arith_method_SERIES(op, name, str_rep): """ Wrapper function for Series arithmetic operations, to avoid code duplication. @@ -637,15 +633,9 @@ def safe_na_op(lvalues, rvalues): with np.errstate(all='ignore'): return na_op(lvalues, rvalues) except Exception: - if isinstance(rvalues, ABCSeries): - if is_object_dtype(rvalues): - # if dtype is object, try elementwise op - return libalgos.arrmap_object(rvalues, - lambda x: op(lvalues, x)) - else: - if is_object_dtype(lvalues): - return libalgos.arrmap_object(lvalues, - lambda x: op(x, rvalues)) + if is_object_dtype(lvalues): + return libalgos.arrmap_object(lvalues, + lambda x: op(x, rvalues)) raise def wrapper(left, right, name=name, na_op=na_op): @@ -671,7 +661,7 @@ def wrapper(left, right, name=name, na_op=na_op): lvalues = left.values rvalues = right if isinstance(rvalues, ABCSeries): - rvalues = getattr(rvalues, 'values', rvalues) + rvalues = rvalues.values result = safe_na_op(lvalues, rvalues) return construct_result(left, result, @@ -933,7 +923,7 @@ def wrapper(self, other): return wrapper -def _flex_method_SERIES(op, name, str_rep, default_axis=None): +def _flex_method_SERIES(op, name, str_rep): doc = _make_flex_doc(name, 'series') @Appender(doc) @@ -964,8 +954,7 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0): series_special_funcs = dict(arith_method=_arith_method_SERIES, comp_method=_comp_method_SERIES, - bool_method=_bool_method_SERIES, - have_divmod=True) + bool_method=_bool_method_SERIES) # ----------------------------------------------------------------------------- @@ -1015,9 +1004,10 @@ def to_series(right): return right -def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns'): +def _arith_method_FRAME(op, name, str_rep=None): eval_kwargs = _gen_eval_kwargs(name) fill_zeros = _gen_fill_zeros(name) + default_axis = _get_frame_op_default_axis(name) def na_op(x, y): import pandas.core.computation.expressions as expressions @@ -1088,7 +1078,8 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): return f -def _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns'): +def _flex_comp_method_FRAME(op, name, str_rep=None): + default_axis = _get_frame_op_default_axis(name) def na_op(x, y): try: @@ -1167,8 +1158,7 @@ def f(self, other): # ----------------------------------------------------------------------------- # Panel -def _arith_method_PANEL(op, name, str_rep=None, default_axis=None): - +def _arith_method_PANEL(op, name, str_rep=None): # work only for scalars def f(self, other): if not is_scalar(other): @@ -1228,6 +1218,122 @@ def f(self, other, axis=None): return f +def _flex_method_PANEL(op, name, str_rep=None): + eval_kwargs = _gen_eval_kwargs(name) + fill_zeros = _gen_fill_zeros(name) + + def na_op(x, y): + import pandas.core.computation.expressions as expressions + + try: + result = expressions.evaluate(op, str_rep, x, y, + errors='raise', + **eval_kwargs) + except TypeError: + result = op(x, y) + + # handles discrepancy between numpy and numexpr on division/mod + # by 0 though, given that these are generally (always?) + # non-scalars, I'm not sure whether it's worth it at the moment + result = missing.fill_zeros(result, x, y, name, fill_zeros) + return result + + if name in _op_descriptions: + doc = _make_flex_doc(name, 'panel') + else: + # doc strings substitors + doc = _agg_doc_PANEL.format(op_name=name) + + @Appender(doc) + def f(self, other, axis=0): + return self._combine(other, na_op, axis=axis) + + f.__name__ = name + return f + + panel_special_funcs = dict(arith_method=_arith_method_PANEL, comp_method=_comp_method_PANEL, bool_method=_arith_method_PANEL) + + +# ----------------------------------------------------------------------------- +# Sparse + + +def _arith_method_SPARSE_SERIES(op, name, str_rep=None): + """ + Wrapper function for Series arithmetic operations, to avoid + code duplication. + + str_rep is not used, but is present for compatibility. + """ + + def wrapper(self, other): + if isinstance(other, ABCDataFrame): + return NotImplemented + elif isinstance(other, ABCSeries): + if not isinstance(other, ABCSparseSeries): + other = other.to_sparse(fill_value=self.fill_value) + return _sparse_series_op(self, other, op, name) + elif is_scalar(other): + with np.errstate(all='ignore'): + new_values = op(self.values, other) + return self._constructor(new_values, + index=self.index, + name=self.name) + else: # pragma: no cover + raise TypeError('operation with {other} not supported' + .format(other=type(other))) + + wrapper.__name__ = name + if name.startswith("__"): + # strip special method names, e.g. `__add__` needs to be `add` when + # passed to _sparse_series_op + name = name[2:-2] + return wrapper + + +def _sparse_series_op(left, right, op, name): + left, right = left.align(right, join='outer', copy=False) + new_index = left.index + new_name = com._maybe_match_name(left, right) + + from pandas.core.sparse.array import _sparse_array_op + result = _sparse_array_op(left.values, right.values, op, name, + series=True) + return left._constructor(result, index=new_index, name=new_name) + + +def _arith_method_SPARSE_ARRAY(op, name, str_rep=None): + """ + Wrapper function for Series arithmetic operations, to avoid + code duplication. + """ + + def wrapper(self, other): + from pandas.core.sparse.array import ( + SparseArray, _sparse_array_op, _wrap_result, _get_fill) + if isinstance(other, np.ndarray): + if len(self) != len(other): + raise AssertionError("length mismatch: {self} vs. {other}" + .format(self=len(self), other=len(other))) + if not isinstance(other, SparseArray): + dtype = getattr(other, 'dtype', None) + other = SparseArray(other, fill_value=self.fill_value, + dtype=dtype) + return _sparse_array_op(self, other, op, name) + elif is_scalar(other): + with np.errstate(all='ignore'): + fill = op(_get_fill(self), np.asarray(other)) + result = op(self.sp_values, other) + + return _wrap_result(name, result, self.sp_index, fill) + else: # pragma: no cover + raise TypeError('operation with {other} not supported' + .format(other=type(other))) + + if name.startswith("__"): + name = name[2:-2] + wrapper.__name__ = name + return wrapper diff --git a/pandas/core/panel.py b/pandas/core/panel.py index afdd9bae3006f..2cb80e938afb9 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -16,7 +16,6 @@ from pandas.core.dtypes.missing import notna import pandas.core.ops as ops -import pandas.core.missing as missing import pandas.core.common as com from pandas import compat from pandas.compat import (map, zip, range, u, OrderedDict) @@ -1521,52 +1520,6 @@ def _extract_axis(self, data, axis=0, intersect=False): return _ensure_index(index) - @classmethod - def _add_aggregate_operations(cls, use_numexpr=True): - """ add the operations to the cls; evaluate the doc strings again """ - - def _panel_arith_method(op, name, str_rep=None, default_axis=None): - - eval_kwargs = ops._gen_eval_kwargs(name) - fill_zeros = ops._gen_fill_zeros(name) - - def na_op(x, y): - import pandas.core.computation.expressions as expressions - - try: - result = expressions.evaluate(op, str_rep, x, y, - errors='raise', - **eval_kwargs) - except TypeError: - result = op(x, y) - - # handles discrepancy between numpy and numexpr on division/mod - # by 0 though, given that these are generally (always?) - # non-scalars, I'm not sure whether it's worth it at the moment - result = missing.fill_zeros(result, x, y, name, fill_zeros) - return result - - if name in ops._op_descriptions: - doc = ops._make_flex_doc(name, 'panel') - else: - # doc strings substitors - doc = ops._agg_doc_PANEL.format( - construct=cls._constructor_sliced.__name__, - cls_name=cls.__name__, wrp_method=name, - axis_order=', '.join(cls._AXIS_ORDERS)) - - @Appender(doc) - def f(self, other, axis=0): - return self._combine(other, na_op, axis=axis) - - f.__name__ = name - return f - - # add `div`, `mul`, `pow`, etc.. - ops.add_flex_arithmetic_methods( - cls, _panel_arith_method, use_numexpr=use_numexpr, - flex_comp_method=ops._comp_method_PANEL) - Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0, stat_axis=1, aliases={'major': 'major_axis', @@ -1575,7 +1528,8 @@ def f(self, other, axis=0): 'minor_axis': 'columns'}) ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs) -Panel._add_aggregate_operations() +ops.add_flex_arithmetic_methods(Panel, ops._flex_method_PANEL, + flex_comp_method=ops._comp_method_PANEL) Panel._add_numeric_operations() diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index 059e399593971..fa07400a0706e 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -14,8 +14,7 @@ from pandas.compat import range from pandas.compat.numpy import function as nv -from pandas.core.dtypes.generic import ( - ABCSparseArray, ABCSparseSeries) +from pandas.core.dtypes.generic import ABCSparseSeries from pandas.core.dtypes.common import ( _ensure_platform_int, is_float, is_integer, @@ -43,38 +42,6 @@ _sparray_doc_kwargs = dict(klass='SparseArray') -def _arith_method_SPARSE_ARRAY(op, name, str_rep=None, default_axis=None): - """ - Wrapper function for Series arithmetic operations, to avoid - code duplication. - """ - - def wrapper(self, other): - if isinstance(other, np.ndarray): - if len(self) != len(other): - raise AssertionError("length mismatch: {self} vs. {other}" - .format(self=len(self), other=len(other))) - if not isinstance(other, ABCSparseArray): - dtype = getattr(other, 'dtype', None) - other = SparseArray(other, fill_value=self.fill_value, - dtype=dtype) - return _sparse_array_op(self, other, op, name) - elif is_scalar(other): - with np.errstate(all='ignore'): - fill = op(_get_fill(self), np.asarray(other)) - result = op(self.sp_values, other) - - return _wrap_result(name, result, self.sp_index, fill) - else: # pragma: no cover - raise TypeError('operation with {other} not supported' - .format(other=type(other))) - - if name.startswith("__"): - name = name[2:-2] - wrapper.__name__ = name - return wrapper - - def _get_fill(arr): # coerce fill_value to arr dtype if possible # int64 SparseArray can have NaN as fill_value if there is no missing @@ -864,7 +831,6 @@ def _make_index(length, indices, kind): ops.add_special_arithmetic_methods(SparseArray, - arith_method=_arith_method_SPARSE_ARRAY, - comp_method=_arith_method_SPARSE_ARRAY, - bool_method=_arith_method_SPARSE_ARRAY, - use_numexpr=False) + arith_method=ops._arith_method_SPARSE_ARRAY, + comp_method=ops._arith_method_SPARSE_ARRAY, + bool_method=ops._arith_method_SPARSE_ARRAY) diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index c7f5b0ba67c19..cc08ccf77ad26 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -981,7 +981,5 @@ def homogenize(series_dict): # use unaccelerated ops for sparse objects -ops.add_flex_arithmetic_methods(SparseDataFrame, use_numexpr=False, - **ops.frame_flex_funcs) -ops.add_special_arithmetic_methods(SparseDataFrame, use_numexpr=False, - **ops.frame_special_funcs) +ops.add_flex_arithmetic_methods(SparseDataFrame, **ops.frame_flex_funcs) +ops.add_special_arithmetic_methods(SparseDataFrame, **ops.frame_special_funcs) diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 2c8fd20f8eab1..4e207f9d1838c 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -9,12 +9,10 @@ import warnings from pandas.core.dtypes.missing import isna, notna -from pandas.core.dtypes.common import is_scalar from pandas.compat.numpy import function as nv from pandas.core.index import Index, _ensure_index, InvalidIndexError from pandas.core.series import Series -from pandas.core.frame import DataFrame from pandas.core.internals import SingleBlockManager from pandas.core import generic import pandas.core.common as com @@ -23,7 +21,7 @@ from pandas.util._decorators import Appender from pandas.core.sparse.array import ( - make_sparse, _sparse_array_op, SparseArray, + make_sparse, SparseArray, _make_index) from pandas._libs.sparse import BlockIndex, IntIndex import pandas._libs.sparse as splib @@ -37,53 +35,6 @@ axes_single_arg="{0, 'index'}", optional_labels='', optional_axis='') -# ----------------------------------------------------------------------------- -# Wrapper function for Series arithmetic methods - - -def _arith_method_SPARSE_SERIES(op, name, str_rep=None, default_axis=None): - """ - Wrapper function for Series arithmetic operations, to avoid - code duplication. - - str_rep and default_axis are not used, but are - present for compatibility. - """ - - def wrapper(self, other): - if isinstance(other, Series): - if not isinstance(other, SparseSeries): - other = other.to_sparse(fill_value=self.fill_value) - return _sparse_series_op(self, other, op, name) - elif isinstance(other, DataFrame): - return NotImplemented - elif is_scalar(other): - with np.errstate(all='ignore'): - new_values = op(self.values, other) - return self._constructor(new_values, - index=self.index, - name=self.name) - else: # pragma: no cover - raise TypeError('operation with {other} not supported' - .format(other=type(other))) - - wrapper.__name__ = name - if name.startswith("__"): - # strip special method names, e.g. `__add__` needs to be `add` when - # passed to _sparse_series_op - name = name[2:-2] - return wrapper - - -def _sparse_series_op(left, right, op, name): - left, right = left.align(right, join='outer', copy=False) - new_index = left.index - new_name = com._maybe_match_name(left, right) - - result = _sparse_array_op(left.values, right.values, op, name, - series=True) - return left._constructor(result, index=new_index, name=new_name) - class SparseSeries(Series): """Data structure for labeled, sparse floating point data @@ -861,14 +812,11 @@ def from_coo(cls, A, dense_index=False): # overwrite series methods with unaccelerated versions -ops.add_special_arithmetic_methods(SparseSeries, use_numexpr=False, - **ops.series_special_funcs) -ops.add_flex_arithmetic_methods(SparseSeries, use_numexpr=False, - **ops.series_flex_funcs) +ops.add_special_arithmetic_methods(SparseSeries, **ops.series_special_funcs) +ops.add_flex_arithmetic_methods(SparseSeries, **ops.series_flex_funcs) # overwrite basic arithmetic to use SparseSeries version # force methods to overwrite previous definitions. ops.add_special_arithmetic_methods(SparseSeries, - arith_method=_arith_method_SPARSE_SERIES, - comp_method=_arith_method_SPARSE_SERIES, - bool_method=None, use_numexpr=False, - force=True) + ops._arith_method_SPARSE_SERIES, + comp_method=ops._arith_method_SPARSE_SERIES, + bool_method=None, force=True)
Follow-up to #19346, making **kwargs more explicit, documenting how they are chosen, moving one method from `Panel` that belongs in `core.ops`. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19396
2018-01-25T17:58:22Z
2018-01-27T01:20:21Z
2018-01-27T01:20:21Z
2018-01-31T06:49:27Z
Separate non-scalar tests from test_timestamps
diff --git a/pandas/tests/frame/test_apply.py b/pandas/tests/frame/test_apply.py index 65dd166e1f6a8..e0fc6c470fe57 100644 --- a/pandas/tests/frame/test_apply.py +++ b/pandas/tests/frame/test_apply.py @@ -428,6 +428,16 @@ def test_applymap(self): result = frame.applymap(func) tm.assert_frame_equal(result, frame) + def test_applymap_box_timestamps(self): + # #2689, #2627 + ser = pd.Series(date_range('1/1/2000', periods=10)) + + def func(x): + return (x.hour, x.day, x.month) + + # it works! + pd.DataFrame(ser).applymap(func) + def test_applymap_box(self): # ufunc will not be boxed. Same test cases as the test_map_box df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'), diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index a91dbd905e12c..fb8dd1a43aa7f 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -144,6 +144,25 @@ def test_numpy_minmax(self): tm.assert_raises_regex( ValueError, errmsg, np.argmax, dr, out=0) + def test_round_daily(self): + dti = pd.date_range('20130101 09:10:11', periods=5) + result = dti.round('D') + expected = pd.date_range('20130101', periods=5) + tm.assert_index_equal(result, expected) + + dti = dti.tz_localize('UTC').tz_convert('US/Eastern') + result = dti.round('D') + expected = pd.date_range('20130101', + periods=5).tz_localize('US/Eastern') + tm.assert_index_equal(result, expected) + + result = dti.round('s') + tm.assert_index_equal(result, dti) + + # invalid + for freq in ['Y', 'M', 'foobar']: + pytest.raises(ValueError, lambda: dti.round(freq)) + def test_round(self): for tz in self.tz: rng = pd.date_range(start='2016-01-01', periods=5, diff --git a/pandas/tests/io/test_html.py b/pandas/tests/io/test_html.py index fd0c2b9d0218c..151a0750b7f6e 100644 --- a/pandas/tests/io/test_html.py +++ b/pandas/tests/io/test_html.py @@ -868,6 +868,13 @@ def test_fallback_success(self): banklist_data = os.path.join(DATA_PATH, 'banklist.html') self.read_html(banklist_data, '.*Water.*', flavor=['lxml', 'html5lib']) + def test_to_html_timestamp(self): + rng = date_range('2000-01-01', periods=10) + df = DataFrame(np.random.randn(10, 4), index=rng) + + result = df.to_html() + assert '2000-01-01' in result + def test_parse_dates_list(self): df = DataFrame({'date': date_range('1/1/2001', periods=10)}) expected = df.to_html() diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index a3e9a0442ea0b..2b72eef2c6712 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -19,17 +19,13 @@ from pandas.tseries import offsets -from pandas._libs.tslibs import conversion, period +from pandas._libs.tslibs import conversion from pandas._libs.tslibs.timezones import get_timezone, dateutil_gettz as gettz from pandas._libs.tslibs.frequencies import _INVALID_FREQ_ERROR from pandas.compat import long, PY3 -from pandas.util.testing import assert_series_equal from pandas.compat.numpy import np_datetime64_compat -from pandas import (Timestamp, date_range, Period, Timedelta, compat, - Series, NaT, DataFrame) -from pandas.tseries.frequencies import (RESO_DAY, RESO_HR, RESO_MIN, RESO_US, - RESO_MS, RESO_SEC) +from pandas import Timestamp, Period, Timedelta, NaT class TestTimestampArithmetic(object): @@ -54,6 +50,50 @@ def test_delta_preserve_nanos(self): result = val + timedelta(1) assert result.nanosecond == val.nanosecond + def test_timestamp_sub_datetime(self): + dt = datetime(2013, 10, 12) + ts = Timestamp(datetime(2013, 10, 13)) + assert (ts - dt).days == 1 + assert (dt - ts).days == -1 + + def test_addition_subtraction_types(self): + # Assert on the types resulting from Timestamp +/- various date/time + # objects + dt = datetime(2014, 3, 4) + td = timedelta(seconds=1) + # build a timestamp with a frequency, since then it supports + # addition/subtraction of integers + ts = Timestamp(dt, freq='D') + + assert type(ts + 1) == Timestamp + assert type(ts - 1) == Timestamp + + # Timestamp + datetime not supported, though subtraction is supported + # and yields timedelta more tests in tseries/base/tests/test_base.py + assert type(ts - dt) == Timedelta + assert type(ts + td) == Timestamp + assert type(ts - td) == Timestamp + + # Timestamp +/- datetime64 not supported, so not tested (could possibly + # assert error raised?) + td64 = np.timedelta64(1, 'D') + assert type(ts + td64) == Timestamp + assert type(ts - td64) == Timestamp + + def test_addition_subtraction_preserve_frequency(self): + ts = Timestamp('2014-03-05', freq='D') + td = timedelta(days=1) + original_freq = ts.freq + + assert (ts + 1).freq == original_freq + assert (ts - 1).freq == original_freq + assert (ts + td).freq == original_freq + assert (ts - td).freq == original_freq + + td64 = np.timedelta64(1, 'D') + assert (ts + td64).freq == original_freq + assert (ts - td64).freq == original_freq + class TestTimestampProperties(object): @@ -76,6 +116,112 @@ def test_properties_business(self): assert control.is_month_end assert control.is_quarter_end + def test_fields(self): + def check(value, equal): + # that we are int/long like + assert isinstance(value, (int, long)) + assert value == equal + + # GH 10050 + ts = Timestamp('2015-05-10 09:06:03.000100001') + check(ts.year, 2015) + check(ts.month, 5) + check(ts.day, 10) + check(ts.hour, 9) + check(ts.minute, 6) + check(ts.second, 3) + pytest.raises(AttributeError, lambda: ts.millisecond) + check(ts.microsecond, 100) + check(ts.nanosecond, 1) + check(ts.dayofweek, 6) + check(ts.quarter, 2) + check(ts.dayofyear, 130) + check(ts.week, 19) + check(ts.daysinmonth, 31) + check(ts.daysinmonth, 31) + + # GH 13303 + ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern') + check(ts.year, 2014) + check(ts.month, 12) + check(ts.day, 31) + check(ts.hour, 23) + check(ts.minute, 59) + check(ts.second, 0) + pytest.raises(AttributeError, lambda: ts.millisecond) + check(ts.microsecond, 0) + check(ts.nanosecond, 0) + check(ts.dayofweek, 2) + check(ts.quarter, 4) + check(ts.dayofyear, 365) + check(ts.week, 1) + check(ts.daysinmonth, 31) + + ts = Timestamp('2014-01-01 00:00:00+01:00') + starts = ['is_month_start', 'is_quarter_start', 'is_year_start'] + for start in starts: + assert getattr(ts, start) + ts = Timestamp('2014-12-31 23:59:59+01:00') + ends = ['is_month_end', 'is_year_end', 'is_quarter_end'] + for end in ends: + assert getattr(ts, end) + + @pytest.mark.parametrize('data, expected', + [(Timestamp('2017-08-28 23:00:00'), 'Monday'), + (Timestamp('2017-08-28 23:00:00', tz='EST'), + 'Monday')]) + def test_weekday_name(self, data, expected): + # GH 17354 + assert data.weekday_name == expected + + @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']) + def test_is_leap_year(self, tz): + # GH 13727 + dt = Timestamp('2000-01-01 00:00:00', tz=tz) + assert dt.is_leap_year + assert isinstance(dt.is_leap_year, bool) + + dt = Timestamp('1999-01-01 00:00:00', tz=tz) + assert not dt.is_leap_year + + dt = Timestamp('2004-01-01 00:00:00', tz=tz) + assert dt.is_leap_year + + dt = Timestamp('2100-01-01 00:00:00', tz=tz) + assert not dt.is_leap_year + + def test_woy_boundary(self): + # make sure weeks at year boundaries are correct + d = datetime(2013, 12, 31) + result = Timestamp(d).week + expected = 1 # ISO standard + assert result == expected + + d = datetime(2008, 12, 28) + result = Timestamp(d).week + expected = 52 # ISO standard + assert result == expected + + d = datetime(2009, 12, 31) + result = Timestamp(d).week + expected = 53 # ISO standard + assert result == expected + + d = datetime(2010, 1, 1) + result = Timestamp(d).week + expected = 53 # ISO standard + assert result == expected + + d = datetime(2010, 1, 3) + result = Timestamp(d).week + expected = 53 # ISO standard + assert result == expected + + result = np.array([Timestamp(datetime(*args)).week + for args in [(2000, 1, 1), (2000, 1, 2), ( + 2005, 1, 1), (2005, 1, 2)]]) + assert (result == [52, 52, 53, 53]).all() + class TestTimestampConstructors(object): @@ -310,24 +456,60 @@ def test_constructor_fromordinal(self): ts = Timestamp.fromordinal(dt_tz.toordinal(), tz='US/Eastern') assert ts.to_pydatetime() == dt_tz + def test_out_of_bounds_value(self): + one_us = np.timedelta64(1).astype('timedelta64[us]') -class TestTimestamp(object): + # By definition we can't go out of bounds in [ns], so we + # convert the datetime64s to [us] so we can go out of bounds + min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]') + max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]') - def test_conversion(self): - # GH 9255 - ts = Timestamp('2000-01-01') + # No error for the min/max datetimes + Timestamp(min_ts_us) + Timestamp(max_ts_us) - result = ts.to_pydatetime() - expected = datetime(2000, 1, 1) - assert result == expected - assert type(result) == type(expected) + # One us less than the minimum is an error + with pytest.raises(ValueError): + Timestamp(min_ts_us - one_us) - result = ts.to_datetime64() - expected = np.datetime64(ts.value, 'ns') - assert result == expected - assert type(result) == type(expected) - assert result.dtype == expected.dtype + # One us more than the maximum is an error + with pytest.raises(ValueError): + Timestamp(max_ts_us + one_us) + + def test_out_of_bounds_string(self): + with pytest.raises(ValueError): + Timestamp('1676-01-01') + with pytest.raises(ValueError): + Timestamp('2263-01-01') + def test_bounds_with_different_units(self): + out_of_bounds_dates = ('1677-09-21', '2262-04-12') + + time_units = ('D', 'h', 'm', 's', 'ms', 'us') + + for date_string in out_of_bounds_dates: + for unit in time_units: + dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit) + with pytest.raises(ValueError): + Timestamp(dt64) + + in_bounds_dates = ('1677-09-23', '2262-04-11') + + for date_string in in_bounds_dates: + for unit in time_units: + dt64 = np.datetime64(date_string, dtype='M8[%s]' % unit) + Timestamp(dt64) + + def test_min_valid(self): + # Ensure that Timestamp.min is a valid Timestamp + Timestamp(Timestamp.min) + + def test_max_valid(self): + # Ensure that Timestamp.max is a valid Timestamp + Timestamp(Timestamp.max) + + +class TestTimestamp(object): @pytest.mark.parametrize('freq', ['D', 'M', 'S', 'N']) @pytest.mark.parametrize('date', ['2014-03-07', '2014-01-01 09:00', '2014-01-01 00:00:00.000000001']) @@ -394,22 +576,6 @@ def test_timestamp_repr_pre1900(self): result = repr(stamp) assert iso8601 in result - def test_bounds_with_different_units(self): - out_of_bounds_dates = ('1677-09-21', '2262-04-12', ) - - time_units = ('D', 'h', 'm', 's', 'ms', 'us') - - for date_string in out_of_bounds_dates: - for unit in time_units: - pytest.raises(ValueError, Timestamp, np.datetime64( - date_string, dtype='M8[%s]' % unit)) - - in_bounds_dates = ('1677-09-23', '2262-04-11', ) - - for date_string in in_bounds_dates: - for unit in time_units: - Timestamp(np.datetime64(date_string, dtype='M8[%s]' % unit)) - def test_tz(self): t = '2014-02-01 09:00' ts = Timestamp(t) @@ -435,11 +601,9 @@ def test_tz_localize_ambiguous(self): ts_dst = ts.tz_localize('US/Eastern', ambiguous=True) ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False) - rng = date_range('2014-11-02', periods=3, freq='H', tz='US/Eastern') - assert rng[1] == ts_dst - assert rng[2] == ts_no_dst - pytest.raises(ValueError, ts.tz_localize, 'US/Eastern', - ambiguous='infer') + assert (ts_no_dst.value - ts_dst.value) / 1e9 == 3600 + with pytest.raises(ValueError): + ts.tz_localize('US/Eastern', ambiguous='infer') # GH 8025 with tm.assert_raises_regex(TypeError, @@ -501,24 +665,6 @@ def test_tz_convert_roundtrip(self, tz): assert reset.tzinfo is None assert reset == converted.tz_convert('UTC').tz_localize(None) - def test_barely_oob_dts(self): - one_us = np.timedelta64(1).astype('timedelta64[us]') - - # By definition we can't go out of bounds in [ns], so we - # convert the datetime64s to [us] so we can go out of bounds - min_ts_us = np.datetime64(Timestamp.min).astype('M8[us]') - max_ts_us = np.datetime64(Timestamp.max).astype('M8[us]') - - # No error for the min/max datetimes - Timestamp(min_ts_us) - Timestamp(max_ts_us) - - # One us less than the minimum is an error - pytest.raises(ValueError, Timestamp, min_ts_us - one_us) - - # One us more than the maximum is an error - pytest.raises(ValueError, Timestamp, max_ts_us + one_us) - def test_utc_z_designator(self): assert get_timezone(Timestamp('2014-11-02 01:00Z').tzinfo) == 'UTC' @@ -569,64 +715,6 @@ def test_asm8(self): assert (Timestamp('nat').asm8.view('i8') == np.datetime64('nat', 'ns').view('i8')) - def test_fields(self): - def check(value, equal): - # that we are int/long like - assert isinstance(value, (int, compat.long)) - assert value == equal - - # GH 10050 - ts = Timestamp('2015-05-10 09:06:03.000100001') - check(ts.year, 2015) - check(ts.month, 5) - check(ts.day, 10) - check(ts.hour, 9) - check(ts.minute, 6) - check(ts.second, 3) - pytest.raises(AttributeError, lambda: ts.millisecond) - check(ts.microsecond, 100) - check(ts.nanosecond, 1) - check(ts.dayofweek, 6) - check(ts.quarter, 2) - check(ts.dayofyear, 130) - check(ts.week, 19) - check(ts.daysinmonth, 31) - check(ts.daysinmonth, 31) - - # GH 13303 - ts = Timestamp('2014-12-31 23:59:00-05:00', tz='US/Eastern') - check(ts.year, 2014) - check(ts.month, 12) - check(ts.day, 31) - check(ts.hour, 23) - check(ts.minute, 59) - check(ts.second, 0) - pytest.raises(AttributeError, lambda: ts.millisecond) - check(ts.microsecond, 0) - check(ts.nanosecond, 0) - check(ts.dayofweek, 2) - check(ts.quarter, 4) - check(ts.dayofyear, 365) - check(ts.week, 1) - check(ts.daysinmonth, 31) - - ts = Timestamp('2014-01-01 00:00:00+01:00') - starts = ['is_month_start', 'is_quarter_start', 'is_year_start'] - for start in starts: - assert getattr(ts, start) - ts = Timestamp('2014-12-31 23:59:59+01:00') - ends = ['is_month_end', 'is_year_end', 'is_quarter_end'] - for end in ends: - assert getattr(ts, end) - - @pytest.mark.parametrize('data, expected', - [(Timestamp('2017-08-28 23:00:00'), 'Monday'), - (Timestamp('2017-08-28 23:00:00', tz='EST'), - 'Monday')]) - def test_weekday_name(self, data, expected): - # GH 17354 - assert data.weekday_name == expected - def test_pprint(self): # GH12622 import pprint @@ -646,16 +734,6 @@ def test_pprint(self): 'foo': 1}""" assert result == expected - def test_to_pydatetime_nonzero_nano(self): - ts = Timestamp('2011-01-01 9:00:00.123456789') - - # Warn the user of data loss (nanoseconds). - with tm.assert_produces_warning(UserWarning, - check_stacklevel=False): - expected = datetime(2011, 1, 1, 9, 0, 0, 123456) - result = ts.to_pydatetime() - assert result == expected - def test_round(self): # round @@ -684,11 +762,6 @@ def test_round(self): expected = Timestamp('20130104 12:30:00') assert result == expected - dti = date_range('20130101 09:10:11', periods=5) - result = dti.round('D') - expected = date_range('20130101', periods=5) - tm.assert_index_equal(result, expected) - # floor dt = Timestamp('20130101 09:10:11') result = dt.floor('D') @@ -711,19 +784,6 @@ def test_round(self): result = dt.round('s') assert result == dt - dti = date_range('20130101 09:10:11', - periods=5).tz_localize('UTC').tz_convert('US/Eastern') - result = dti.round('D') - expected = date_range('20130101', periods=5).tz_localize('US/Eastern') - tm.assert_index_equal(result, expected) - - result = dti.round('s') - tm.assert_index_equal(result, dti) - - # invalid - for freq in ['Y', 'M', 'foobar']: - pytest.raises(ValueError, lambda: dti.round(freq)) - # GH 14440 & 15578 result = Timestamp('2016-10-17 12:00:00.0015').round('ms') expected = Timestamp('2016-10-17 12:00:00.002000') @@ -845,7 +905,7 @@ def check(val, unit=None, h=1, s=1, us=0): check(days, unit='D', h=0) # using truediv, so these are like floats - if compat.PY3: + if PY3: check((val + 500000) / long(1000000000), unit='s', us=500) check((val + 500000000) / long(1000000000), unit='s', us=500000) check((val + 500000) / long(1000000), unit='ms', us=500) @@ -900,22 +960,6 @@ def test_hash_equivalent(self): stamp = Timestamp(datetime(2011, 1, 1)) assert d[stamp] == 5 - @pytest.mark.parametrize('tz', [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']) - def test_is_leap_year(self, tz): - # GH 13727 - dt = Timestamp('2000-01-01 00:00:00', tz=tz) - assert dt.is_leap_year - assert isinstance(dt.is_leap_year, bool) - - dt = Timestamp('1999-01-01 00:00:00', tz=tz) - assert not dt.is_leap_year - - dt = Timestamp('2004-01-01 00:00:00', tz=tz) - assert dt.is_leap_year - - dt = Timestamp('2100-01-01 00:00:00', tz=tz) - assert not dt.is_leap_year - @td.skip_if_windows def test_timestamp(self): # GH#17329 @@ -1017,13 +1061,6 @@ def test_compare_invalid(self): assert val != np.float64(1) assert val != np.int64(1) - # ops testing - df = DataFrame(np.random.randn(5, 2)) - a = df[0] - b = Series(np.random.randn(5)) - b.name = Timestamp('2000-01-01') - tm.assert_series_equal(a / b, 1 / (b / a)) - def test_cant_compare_tz_naive_w_aware(self): # see gh-1404 a = Timestamp('3/12/2012') @@ -1112,41 +1149,6 @@ def test_timestamp_compare_scalars(self): result = right_f(nat, rhs) assert result == expected - def test_timestamp_compare_series(self): - # make sure we can compare Timestamps on the right AND left hand side - # GH4982 - s = Series(date_range('20010101', periods=10), name='dates') - s_nat = s.copy(deep=True) - - s[0] = Timestamp('nat') - s[3] = Timestamp('nat') - - ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'} - - for left, right in ops.items(): - left_f = getattr(operator, left) - right_f = getattr(operator, right) - - # no nats - expected = left_f(s, Timestamp('20010109')) - result = right_f(Timestamp('20010109'), s) - tm.assert_series_equal(result, expected) - - # nats - expected = left_f(s, Timestamp('nat')) - result = right_f(Timestamp('nat'), s) - tm.assert_series_equal(result, expected) - - # compare to timestamp with series containing nats - expected = left_f(s_nat, Timestamp('20010109')) - result = right_f(Timestamp('20010109'), s_nat) - tm.assert_series_equal(result, expected) - - # compare to nat with series containing nats - expected = left_f(s_nat, Timestamp('nat')) - result = right_f(Timestamp('nat'), s_nat) - tm.assert_series_equal(result, expected) - def test_timestamp_compare_with_early_datetime(self): # e.g. datetime.min stamp = Timestamp('2012-01-01') @@ -1250,79 +1252,6 @@ def test_nanosecond_timestamp(self): assert t.nanosecond == 10 -class TestTimestampOps(object): - - def test_timestamp_and_datetime(self): - assert ((Timestamp(datetime(2013, 10, 13)) - - datetime(2013, 10, 12)).days == 1) - assert ((datetime(2013, 10, 12) - - Timestamp(datetime(2013, 10, 13))).days == -1) - - def test_timestamp_and_series(self): - timestamp_series = Series(date_range('2014-03-17', periods=2, freq='D', - tz='US/Eastern')) - first_timestamp = timestamp_series[0] - - delta_series = Series([np.timedelta64(0, 'D'), np.timedelta64(1, 'D')]) - assert_series_equal(timestamp_series - first_timestamp, delta_series) - assert_series_equal(first_timestamp - timestamp_series, -delta_series) - - def test_addition_subtraction_types(self): - # Assert on the types resulting from Timestamp +/- various date/time - # objects - datetime_instance = datetime(2014, 3, 4) - timedelta_instance = timedelta(seconds=1) - # build a timestamp with a frequency, since then it supports - # addition/subtraction of integers - timestamp_instance = Timestamp(datetime_instance, freq='D') - - assert type(timestamp_instance + 1) == Timestamp - assert type(timestamp_instance - 1) == Timestamp - - # Timestamp + datetime not supported, though subtraction is supported - # and yields timedelta more tests in tseries/base/tests/test_base.py - assert type(timestamp_instance - datetime_instance) == Timedelta - assert type(timestamp_instance + timedelta_instance) == Timestamp - assert type(timestamp_instance - timedelta_instance) == Timestamp - - # Timestamp +/- datetime64 not supported, so not tested (could possibly - # assert error raised?) - timedelta64_instance = np.timedelta64(1, 'D') - assert type(timestamp_instance + timedelta64_instance) == Timestamp - assert type(timestamp_instance - timedelta64_instance) == Timestamp - - def test_addition_subtraction_preserve_frequency(self): - timestamp_instance = Timestamp('2014-03-05', freq='D') - timedelta_instance = timedelta(days=1) - original_freq = timestamp_instance.freq - - assert (timestamp_instance + 1).freq == original_freq - assert (timestamp_instance - 1).freq == original_freq - assert (timestamp_instance + timedelta_instance).freq == original_freq - assert (timestamp_instance - timedelta_instance).freq == original_freq - - timedelta64_instance = np.timedelta64(1, 'D') - assert (timestamp_instance + - timedelta64_instance).freq == original_freq - assert (timestamp_instance - - timedelta64_instance).freq == original_freq - - @pytest.mark.parametrize('tz', [None, 'Asia/Tokyo', 'US/Eastern', - 'dateutil/US/Eastern']) - def test_resolution(self, tz): - - for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', - 'S', 'L', 'U'], - [RESO_DAY, RESO_DAY, - RESO_DAY, RESO_DAY, - RESO_HR, RESO_MIN, - RESO_SEC, RESO_MS, - RESO_US]): - idx = date_range(start='2013-04-01', periods=30, freq=freq, tz=tz) - result = period.resolution(idx.asi8, idx.tz) - assert result == expected - - class TestTimestampToJulianDate(object): def test_compare_1700(self): @@ -1347,6 +1276,31 @@ def test_compare_hour13(self): class TestTimestampConversion(object): + def test_conversion(self): + # GH#9255 + ts = Timestamp('2000-01-01') + + result = ts.to_pydatetime() + expected = datetime(2000, 1, 1) + assert result == expected + assert type(result) == type(expected) + + result = ts.to_datetime64() + expected = np.datetime64(ts.value, 'ns') + assert result == expected + assert type(result) == type(expected) + assert result.dtype == expected.dtype + + def test_to_pydatetime_nonzero_nano(self): + ts = Timestamp('2011-01-01 9:00:00.123456789') + + # Warn the user of data loss (nanoseconds). + with tm.assert_produces_warning(UserWarning, + check_stacklevel=False): + expected = datetime(2011, 1, 1, 9, 0, 0, 123456) + result = ts.to_pydatetime() + assert result == expected + def test_timestamp_to_datetime(self): stamp = Timestamp('20090415', tz='US/Eastern', freq='D') dtval = stamp.to_pydatetime() @@ -1384,102 +1338,3 @@ def test_to_datetime_bijective(self): with tm.assert_produces_warning(exp_warning, check_stacklevel=False): assert (Timestamp(Timestamp.min.to_pydatetime()).value / 1000 == Timestamp.min.value / 1000) - - -class TestTimeSeries(object): - - def test_timestamp_date_out_of_range(self): - pytest.raises(ValueError, Timestamp, '1676-01-01') - pytest.raises(ValueError, Timestamp, '2263-01-01') - - def test_timestamp_equality(self): - - # GH 11034 - s = Series([Timestamp('2000-01-29 01:59:00'), 'NaT']) - result = s != s - assert_series_equal(result, Series([False, True])) - result = s != s[0] - assert_series_equal(result, Series([False, True])) - result = s != s[1] - assert_series_equal(result, Series([True, True])) - - result = s == s - assert_series_equal(result, Series([True, False])) - result = s == s[0] - assert_series_equal(result, Series([True, False])) - result = s == s[1] - assert_series_equal(result, Series([False, False])) - - def test_series_box_timestamp(self): - rng = date_range('20090415', '20090519', freq='B') - s = Series(rng) - - assert isinstance(s[5], Timestamp) - - rng = date_range('20090415', '20090519', freq='B') - s = Series(rng, index=rng) - assert isinstance(s[5], Timestamp) - - assert isinstance(s.iat[5], Timestamp) - - def test_to_html_timestamp(self): - rng = date_range('2000-01-01', periods=10) - df = DataFrame(np.random.randn(10, 4), index=rng) - - result = df.to_html() - assert '2000-01-01' in result - - def test_series_map_box_timestamps(self): - # #2689, #2627 - s = Series(date_range('1/1/2000', periods=10)) - - def f(x): - return (x.hour, x.day, x.month) - - # it works! - s.map(f) - s.apply(f) - DataFrame(s).applymap(f) - - def test_woy_boundary(self): - # make sure weeks at year boundaries are correct - d = datetime(2013, 12, 31) - result = Timestamp(d).week - expected = 1 # ISO standard - assert result == expected - - d = datetime(2008, 12, 28) - result = Timestamp(d).week - expected = 52 # ISO standard - assert result == expected - - d = datetime(2009, 12, 31) - result = Timestamp(d).week - expected = 53 # ISO standard - assert result == expected - - d = datetime(2010, 1, 1) - result = Timestamp(d).week - expected = 53 # ISO standard - assert result == expected - - d = datetime(2010, 1, 3) - result = Timestamp(d).week - expected = 53 # ISO standard - assert result == expected - - result = np.array([Timestamp(datetime(*args)).week - for args in [(2000, 1, 1), (2000, 1, 2), ( - 2005, 1, 1), (2005, 1, 2)]]) - assert (result == [52, 52, 53, 53]).all() - - -class TestTsUtil(object): - - def test_min_valid(self): - # Ensure that Timestamp.min is a valid Timestamp - Timestamp(Timestamp.min) - - def test_max_valid(self): - # Ensure that Timestamp.max is a valid Timestamp - Timestamp(Timestamp.max) diff --git a/pandas/tests/series/test_apply.py b/pandas/tests/series/test_apply.py index 8899ab585d6cb..3822ecd0a1b0e 100644 --- a/pandas/tests/series/test_apply.py +++ b/pandas/tests/series/test_apply.py @@ -77,6 +77,17 @@ def test_apply_args(self): assert result[0] == ['foo', 'bar'] assert isinstance(result[0], list) + def test_series_map_box_timestamps(self): + # GH#2689, GH#2627 + ser = Series(pd.date_range('1/1/2000', periods=10)) + + def func(x): + return (x.hour, x.day, x.month) + + # it works! + ser.map(func) + ser.apply(func) + def test_apply_box(self): # ufunc will not be boxed. Same test cases as the test_map_box vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')] diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 9db05ff590fed..ca558dd6b7cd5 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -1,10 +1,77 @@ # -*- coding: utf-8 -*- from datetime import timedelta +import operator + +import numpy as np import pandas as pd import pandas.util.testing as tm +class TestSeriesComparison(object): + def test_compare_invalid(self): + # GH#8058 + # ops testing + a = pd.Series(np.random.randn(5), name=0) + b = pd.Series(np.random.randn(5)) + b.name = pd.Timestamp('2000-01-01') + tm.assert_series_equal(a / b, 1 / (b / a)) + + +class TestTimestampSeriesComparison(object): + def test_timestamp_compare_series(self): + # make sure we can compare Timestamps on the right AND left hand side + # GH#4982 + ser = pd.Series(pd.date_range('20010101', periods=10), name='dates') + s_nat = ser.copy(deep=True) + + ser[0] = pd.Timestamp('nat') + ser[3] = pd.Timestamp('nat') + + ops = {'lt': 'gt', 'le': 'ge', 'eq': 'eq', 'ne': 'ne'} + + for left, right in ops.items(): + left_f = getattr(operator, left) + right_f = getattr(operator, right) + + # no nats + expected = left_f(ser, pd.Timestamp('20010109')) + result = right_f(pd.Timestamp('20010109'), ser) + tm.assert_series_equal(result, expected) + + # nats + expected = left_f(ser, pd.Timestamp('nat')) + result = right_f(pd.Timestamp('nat'), ser) + tm.assert_series_equal(result, expected) + + # compare to timestamp with series containing nats + expected = left_f(s_nat, pd.Timestamp('20010109')) + result = right_f(pd.Timestamp('20010109'), s_nat) + tm.assert_series_equal(result, expected) + + # compare to nat with series containing nats + expected = left_f(s_nat, pd.Timestamp('nat')) + result = right_f(pd.Timestamp('nat'), s_nat) + tm.assert_series_equal(result, expected) + + def test_timestamp_equality(self): + # GH#11034 + ser = pd.Series([pd.Timestamp('2000-01-29 01:59:00'), 'NaT']) + result = ser != ser + tm.assert_series_equal(result, pd.Series([False, True])) + result = ser != ser[0] + tm.assert_series_equal(result, pd.Series([False, True])) + result = ser != ser[1] + tm.assert_series_equal(result, pd.Series([True, True])) + + result = ser == ser + tm.assert_series_equal(result, pd.Series([True, False])) + result = ser == ser[0] + tm.assert_series_equal(result, pd.Series([True, False])) + result = ser == ser[1] + tm.assert_series_equal(result, pd.Series([False, False])) + + class TestTimedeltaSeriesComparisons(object): def test_compare_timedelta_series(self): # regresssion test for GH5963 @@ -55,3 +122,15 @@ def test_ops_series_period(self): expected = pd.Series([4, 2], name='xxx', dtype=object) tm.assert_series_equal(s2 - ser, expected) tm.assert_series_equal(ser - s2, -expected) + + +class TestTimestampSeriesArithmetic(object): + def test_timestamp_sub_series(self): + ser = pd.Series(pd.date_range('2014-03-17', periods=2, freq='D', + tz='US/Eastern')) + ts = ser[0] + + delta_series = pd.Series([np.timedelta64(0, 'D'), + np.timedelta64(1, 'D')]) + tm.assert_series_equal(ser - ts, delta_series) + tm.assert_series_equal(ts - ser, -delta_series) diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py index bafc6d268c266..fbfbad547ce1b 100644 --- a/pandas/tests/series/test_indexing.py +++ b/pandas/tests/series/test_indexing.py @@ -610,6 +610,18 @@ def test_getitem_box_float64(self): value = self.ts[5] assert isinstance(value, np.float64) + def test_series_box_timestamp(self): + rng = pd.date_range('20090415', '20090519', freq='B') + ser = Series(rng) + + assert isinstance(ser[5], pd.Timestamp) + + rng = pd.date_range('20090415', '20090519', freq='B') + ser = Series(rng, index=rng) + assert isinstance(ser[5], pd.Timestamp) + + assert isinstance(ser.iat[5], pd.Timestamp) + def test_getitem_ambiguous_keyerror(self): s = Series(lrange(10), index=lrange(0, 20, 2)) pytest.raises(KeyError, s.__getitem__, 1)
Getting DataFrame, Series, Index tests out of tests.scalars and organizing them better, should make it easier to go and parametrize some older tests, in particular arithmetic and comparisons.
https://api.github.com/repos/pandas-dev/pandas/pulls/19385
2018-01-24T23:18:41Z
2018-01-25T12:01:56Z
2018-01-25T12:01:56Z
2018-01-31T06:49:34Z
Remove unused from _libs.parsers
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 932ae8b1a33d0..efe61716d0831 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -7,9 +7,8 @@ import warnings from csv import QUOTE_MINIMAL, QUOTE_NONNUMERIC, QUOTE_NONE -from libc.stdio cimport fopen, fclose -from libc.stdlib cimport malloc, free -from libc.string cimport strncpy, strlen, strcmp, strcasecmp +from libc.stdlib cimport free +from libc.string cimport strncpy, strlen, strcasecmp cimport cython from cython cimport Py_ssize_t @@ -27,9 +26,6 @@ cdef extern from "Python.h": object PyUnicode_Decode(char *v, Py_ssize_t size, char *encoding, char *errors) -cdef extern from "stdlib.h": - void memcpy(void *dst, void *src, size_t n) - import numpy as np cimport numpy as cnp @@ -50,7 +46,7 @@ from khash cimport ( import pandas.compat as compat from pandas.core.dtypes.common import ( - is_categorical_dtype, CategoricalDtype, + is_categorical_dtype, is_integer_dtype, is_float_dtype, is_bool_dtype, is_object_dtype, is_datetime64_dtype, @@ -90,9 +86,6 @@ try: except NameError: basestring = str -cdef extern from "src/numpy_helper.h": - void transfer_object_column(char *dst, char *src, size_t stride, - size_t length) cdef extern from "parser/tokenizer.h": @@ -232,8 +225,6 @@ cdef extern from "parser/tokenizer.h": int parser_trim_buffers(parser_t *self) - void debug_print_parser(parser_t *self) - int tokenize_all_rows(parser_t *self) nogil int tokenize_nrows(parser_t *self, size_t nrows) nogil @@ -249,7 +240,6 @@ cdef extern from "parser/tokenizer.h": double round_trip(const char *p, char **q, char decimal, char sci, char tsep, int skip_trailing) nogil - int to_longlong(char *item, long long *p_value) nogil int to_boolean(const char *item, uint8_t *val) nogil @@ -875,9 +865,6 @@ cdef class TextReader: return header, field_count - cdef _implicit_index_count(self): - pass - def read(self, rows=None): """ rows=None --> read all rows @@ -997,9 +984,6 @@ cdef class TextReader: return columns - def debug_print(self): - debug_print_parser(self.parser) - cdef _start_clock(self): self.clocks.append(time.time()) @@ -1346,6 +1330,7 @@ cdef class TextReader: else: return None + cdef object _true_values = [b'True', b'TRUE', b'true'] cdef object _false_values = [b'False', b'FALSE', b'false'] @@ -1375,21 +1360,6 @@ cdef asbytes(object o): _NA_VALUES = _ensure_encoded(list(com._NA_VALUES)) -def _is_file_like(obj): - if PY3: - import io - if isinstance(obj, io.TextIOWrapper): - raise ParserError('Cannot handle open unicode files (yet)') - - # BufferedReader is a byte reader for Python 3 - file = io.BufferedReader - else: - import __builtin__ - file = __builtin__.file - - return isinstance(obj, (basestring, file)) - - def _maybe_upcast(arr): """ @@ -1479,6 +1449,7 @@ cdef _string_box_factorize(parser_t *parser, int64_t col, return result, na_count + cdef _string_box_utf8(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, kh_str_t *na_hashset): @@ -1532,6 +1503,7 @@ cdef _string_box_utf8(parser_t *parser, int64_t col, return result, na_count + cdef _string_box_decode(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, kh_str_t *na_hashset, @@ -1662,6 +1634,7 @@ cdef _categorical_convert(parser_t *parser, int64_t col, kh_destroy_str(table) return np.asarray(codes), result, na_count + cdef _to_fw_string(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, int64_t width): cdef: @@ -1679,6 +1652,7 @@ cdef _to_fw_string(parser_t *parser, int64_t col, int64_t line_start, return result + cdef inline void _to_fw_string_nogil(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, size_t width, char *data) nogil: @@ -1694,10 +1668,12 @@ cdef inline void _to_fw_string_nogil(parser_t *parser, int64_t col, strncpy(data, word, width) data += width + cdef char* cinf = b'inf' cdef char* cposinf = b'+inf' cdef char* cneginf = b'-inf' + cdef _try_double(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, kh_str_t *na_hashset, object na_flist): @@ -1738,6 +1714,7 @@ cdef _try_double(parser_t *parser, int64_t col, return None, None return result, na_count + cdef inline int _try_double_nogil(parser_t *parser, double (*double_converter)( const char *, char **, char, @@ -1808,6 +1785,7 @@ cdef inline int _try_double_nogil(parser_t *parser, return 0 + cdef _try_uint64(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, kh_str_t *na_hashset): @@ -1843,6 +1821,7 @@ cdef _try_uint64(parser_t *parser, int64_t col, return result + cdef inline int _try_uint64_nogil(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, @@ -1881,6 +1860,7 @@ cdef inline int _try_uint64_nogil(parser_t *parser, int64_t col, return 0 + cdef _try_int64(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, kh_str_t *na_hashset): @@ -1909,6 +1889,7 @@ cdef _try_int64(parser_t *parser, int64_t col, return result, na_count + cdef inline int _try_int64_nogil(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, @@ -1948,69 +1929,6 @@ cdef inline int _try_int64_nogil(parser_t *parser, int64_t col, return 0 -cdef _try_bool(parser_t *parser, int64_t col, - int64_t line_start, int64_t line_end, - bint na_filter, kh_str_t *na_hashset): - cdef: - int na_count - Py_ssize_t lines = line_end - line_start - uint8_t *data - cnp.ndarray[cnp.uint8_t, ndim=1] result - - uint8_t NA = na_values[np.bool_] - - result = np.empty(lines) - data = <uint8_t *> result.data - - with nogil: - error = _try_bool_nogil(parser, col, line_start, - line_end, na_filter, - na_hashset, NA, data, - &na_count) - if error != 0: - return None, None - return result.view(np.bool_), na_count - -cdef inline int _try_bool_nogil(parser_t *parser, int64_t col, - int64_t line_start, - int64_t line_end, bint na_filter, - const kh_str_t *na_hashset, uint8_t NA, - uint8_t *data, int *na_count) nogil: - cdef: - int error - Py_ssize_t i, lines = line_end - line_start - coliter_t it - const char *word = NULL - khiter_t k - na_count[0] = 0 - - coliter_setup(&it, parser, col, line_start) - - if na_filter: - for i in range(lines): - COLITER_NEXT(it, word) - - k = kh_get_str(na_hashset, word) - # in the hash table - if k != na_hashset.n_buckets: - na_count[0] += 1 - data[0] = NA - data += 1 - continue - - error = to_boolean(word, data) - if error != 0: - return error - data += 1 - else: - for i in range(lines): - COLITER_NEXT(it, word) - - error = to_boolean(word, data) - if error != 0: - return error - data += 1 - return 0 cdef _try_bool_flex(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, @@ -2039,6 +1957,7 @@ cdef _try_bool_flex(parser_t *parser, int64_t col, return None, None return result.view(np.bool_), na_count + cdef inline int _try_bool_flex_nogil(parser_t *parser, int64_t col, int64_t line_start, int64_t line_end, bint na_filter, @@ -2131,6 +2050,7 @@ cdef kh_str_t* kset_from_list(list values) except NULL: return table + cdef kh_float64_t* kset_float64_from_list(values) except NULL: # caller takes responsibility for freeing the hash table cdef: diff --git a/pandas/_libs/src/numpy_helper.h b/pandas/_libs/src/numpy_helper.h index de3486eca3e9b..6c2029fff8a1a 100644 --- a/pandas/_libs/src/numpy_helper.h +++ b/pandas/_libs/src/numpy_helper.h @@ -75,22 +75,6 @@ PANDAS_INLINE PyObject* char_to_string(char* data) { #endif } -void transfer_object_column(char* dst, char* src, size_t stride, - size_t length) { - size_t i; - size_t sz = sizeof(PyObject*); - - for (i = 0; i < length; ++i) { - // uninitialized data - - // Py_XDECREF(*((PyObject**) dst)); - - memcpy(dst, src, sz); - Py_INCREF(*((PyObject**)dst)); - src += sz; - dst += stride; - } -} void set_array_not_contiguous(PyArrayObject* ao) { ao->flags &= ~(NPY_C_CONTIGUOUS | NPY_F_CONTIGUOUS); diff --git a/pandas/_libs/src/parser/.gitignore b/pandas/_libs/src/parser/.gitignore deleted file mode 100644 index f07e771a35eec..0000000000000 --- a/pandas/_libs/src/parser/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -!*.c -test* \ No newline at end of file diff --git a/pandas/_libs/src/parser/Makefile b/pandas/_libs/src/parser/Makefile deleted file mode 100644 index ec88eaf44ba15..0000000000000 --- a/pandas/_libs/src/parser/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -PYTHONBASE = /Library/Frameworks/EPD64.framework/Versions/Current -NUMPY_INC = /Library/Frameworks/EPD64.framework/Versions/7.1/lib/python2.7/site-packages/numpy/core/include -PYTHON_INC = -I$(PYTHONBASE)/include/python2.7 -I$(NUMPY_INC) -PYTHON_LINK = -L$(PYTHONBASE)/lib -lpython - -SOURCES = conversions.c parser.c str_to.c - -check-syntax: - gcc -g $(PYTHON_INC) -o /dev/null -S ${CHK_SOURCES} - -test: $(SOURCES) - gcc $(PYTHON_INC) -o test $(SOURCES) - ./test \ No newline at end of file diff --git a/pandas/_libs/src/parser/tokenizer.c b/pandas/_libs/src/parser/tokenizer.c index 2e4ade209fa38..6e8c220eab6b8 100644 --- a/pandas/_libs/src/parser/tokenizer.c +++ b/pandas/_libs/src/parser/tokenizer.c @@ -1317,21 +1317,6 @@ int parser_trim_buffers(parser_t *self) { return 0; } -void debug_print_parser(parser_t *self) { - int64_t j, line; - char *token; - - for (line = 0; line < self->lines; ++line) { - printf("(Parsed) Line %lld: ", (long long)line); - - for (j = 0; j < self->line_fields[j]; ++j) { - token = self->words[j + self->line_start[line]]; - printf("%s ", token); - } - printf("\n"); - } -} - /* nrows : number of rows to tokenize (or until reach EOF) all : tokenize all the data vs. certain number of rows diff --git a/pandas/_libs/src/parser/tokenizer.h b/pandas/_libs/src/parser/tokenizer.h index 9462608a26814..63baf91e3c136 100644 --- a/pandas/_libs/src/parser/tokenizer.h +++ b/pandas/_libs/src/parser/tokenizer.h @@ -247,8 +247,6 @@ void parser_del(parser_t *self); void parser_set_default_options(parser_t *self); -void debug_print_parser(parser_t *self); - int tokenize_nrows(parser_t *self, size_t nrows); int tokenize_all_rows(parser_t *self);
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19380
2018-01-24T19:26:23Z
2018-01-25T11:54:45Z
2018-01-25T11:54:45Z
2018-01-31T06:49:29Z
BUG SparseDataFrame with dense Series (#19374)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 4dde76dee46a5..246eab386b2ab 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -512,7 +512,7 @@ Groupby/Resample/Rolling Sparse ^^^^^^ -- +- Bug in which creating a ``SparseDataFrame`` from a dense ``Series`` or an unsupported type raised an uncontrolled exception (:issue:`19374`) - - diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index c7f5b0ba67c19..eb3184f371a0c 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -95,6 +95,9 @@ def __init__(self, data=None, index=None, columns=None, default_kind=None, dtype=dtype, copy=copy) elif isinstance(data, DataFrame): mgr = self._init_dict(data, data.index, data.columns, dtype=dtype) + elif isinstance(data, Series): + mgr = self._init_dict(data.to_frame(), data.index, + columns=None, dtype=dtype) elif isinstance(data, BlockManager): mgr = self._init_mgr(data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy) @@ -116,6 +119,10 @@ def __init__(self, data=None, index=None, columns=None, default_kind=None, mgr = to_manager(data, columns, index) if dtype is not None: mgr = mgr.astype(dtype) + else: + msg = ('SparseDataFrame called with unkown type "{data_type}" ' + 'for data argument') + raise TypeError(msg.format(data_type=type(data).__name__)) generic.NDFrame.__init__(self, mgr) diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index 058892e3b85ff..2b589ebd4735e 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -199,6 +199,29 @@ def test_constructor_from_series(self): # without sparse value raises error # df2 = SparseDataFrame([x2_sparse, y]) + def test_constructor_from_dense_series(self): + # GH 19393 + # series with name + x = Series(np.random.randn(10000), name='a') + result = SparseDataFrame(x) + expected = x.to_frame().to_sparse() + tm.assert_sp_frame_equal(result, expected) + + # series with no name + x = Series(np.random.randn(10000)) + result = SparseDataFrame(x) + expected = x.to_frame().to_sparse() + tm.assert_sp_frame_equal(result, expected) + + def test_constructor_from_unknown_type(self): + # GH 19393 + class Unknown: + pass + with pytest.raises(TypeError, + message='SparseDataFrame called with unkown type ' + '"Unknown" for data argument'): + SparseDataFrame(Unknown()) + def test_constructor_preserve_attr(self): # GH 13866 arr = pd.SparseArray([1, 0, 3, 0], dtype=np.int64, fill_value=0)
- [X] closes #19374 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19377
2018-01-24T18:04:21Z
2018-01-27T01:10:16Z
2018-01-27T01:10:16Z
2018-01-27T01:10:47Z
Add CategoricalDtype to deprecated core.categorical shim
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 17435dfc48bde..530a3ecb5f378 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -5,3 +5,4 @@ FutureWarning, stacklevel=2) from pandas.core.arrays import Categorical # noqa +from pandas.core.dtypes.dtypes import CategoricalDtype # noqa diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 4a10ed6e7402c..c20767b09178c 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -259,3 +259,8 @@ def test_categorical_move(self): with tm.assert_produces_warning(FutureWarning): from pandas.core.categorical import Categorical # noqa + + sys.modules.pop("pandas.core.categorical", None) + + with tm.assert_produces_warning(FutureWarning): + from pandas.core.categorical import CategoricalDtype # noqa
At least fastparquet was using it
https://api.github.com/repos/pandas-dev/pandas/pulls/19373
2018-01-24T16:08:24Z
2018-01-25T01:07:53Z
2018-01-25T01:07:53Z
2018-02-13T21:58:38Z
implement ABCInterval
diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index e1ffd450c9a68..0718f8bd2b970 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -109,6 +109,7 @@ cdef class Interval(IntervalMixin): cut, qcut : Convert arrays of continuous data into Categoricals/Series of Interval. """ + _typ = "interval" cdef readonly object left """Left bound for the interval""" diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 1632f5d016439..042b319d51abf 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -49,7 +49,6 @@ PyDateTime_IMPORT from tslibs.np_datetime cimport get_timedelta64_value, get_datetime64_value from tslib import NaT, Timestamp, Timedelta, array_to_datetime -from interval import Interval from missing cimport checknull diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index e15f276b39bf8..46d3c2a9c04b2 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -38,7 +38,7 @@ cpdef bint is_decimal(object obj): cpdef bint is_interval(object obj): - return isinstance(obj, Interval) + return getattr(obj, '_typ', '_typ') == 'interval' cpdef bint is_period(object val): diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py index 629d88aa7f086..6fae09c43d2be 100644 --- a/pandas/core/dtypes/generic.py +++ b/pandas/core/dtypes/generic.py @@ -54,6 +54,7 @@ def _check(cls, inst): ABCPeriod = create_pandas_abc_type("ABCPeriod", "_typ", ("period", )) ABCDateOffset = create_pandas_abc_type("ABCDateOffset", "_typ", ("dateoffset",)) +ABCInterval = create_pandas_abc_type("ABCInterval", "_typ", ("interval", )) class _ABCGeneric(type): diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py index bd365f9c3281f..58cb182e7d403 100644 --- a/pandas/tests/dtypes/test_generic.py +++ b/pandas/tests/dtypes/test_generic.py @@ -45,6 +45,8 @@ def test_abc_types(self): gt.ABCDateOffset) assert not isinstance(pd.Period('2012', freq='A-DEC'), gt.ABCDateOffset) + assert isinstance(pd.Interval(0, 1.5), gt.ABCInterval) + assert not isinstance(pd.Period('2012', freq='A-DEC'), gt.ABCInterval) def test_setattr_warnings():
Avoid python-space import in lib - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19367
2018-01-24T03:48:45Z
2018-01-25T01:10:11Z
2018-01-25T01:10:11Z
2018-02-11T21:58:30Z
updated hist documentation
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 8b03d6ddde4ec..88b899ad60313 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2156,10 +2156,18 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, The size of the figure to create in inches by default layout : tuple, optional Tuple of (rows, columns) for the layout of the histograms - bins : integer, default 10 - Number of histogram bins to be used + bins : integer or sequence, default 10 + Number of histogram bins to be used. If an integer is given, bins + 1 + bin edges are calculated and returned. If bins is a sequence, gives + bin edges, including left edge of first bin and right edge of last + bin. In this case, bins is returned unmodified. `**kwds` : other plotting keyword arguments To be passed to hist function + + See Also + -------- + matplotlib.axes.Axes.hist : Plot a histogram using matplotlib. + """ _converter._WARN = False if by is not None: @@ -2219,14 +2227,19 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, rotation of y axis labels figsize : tuple, default None figure size in inches by default + bins : integer or sequence, default 10 + Number of histogram bins to be used. If an integer is given, bins + 1 + bin edges are calculated and returned. If bins is a sequence, gives + bin edges, including left edge of first bin and right edge of last + bin. In this case, bins is returned unmodified. bins: integer, default 10 Number of histogram bins to be used `**kwds` : keywords To be passed to the actual plotting function - Notes - ----- - See matplotlib documentation online for more on this + See Also + -------- + matplotlib.axes.Axes.hist : Plot a histogram using matplotlib. """ import matplotlib.pyplot as plt
pandas.DataFrame.hist()/pandas.Series.hist() currently say that the `bins` keyword argument must be an integer, with a default value of 10. Since these methods are built off matplotlib, these actually seem to be more flexible (allowing sequences as well as integers). I don't know if there was a reason for this discrepancy, but if not, I've adjusted the pandas docstrings to reflect the matplotlib documentation. [Existing pandas documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.hist.html) [Existing matplotlib documentation](https://matplotlib.org/api/_as_gen/matplotlib.axes.Axes.hist.html) - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19366
2018-01-24T03:45:40Z
2018-02-01T13:15:43Z
2018-02-01T13:15:43Z
2018-06-22T06:48:58Z
implement Timedelta mod, divmod, rmod, rdivmod, fix and test scalar methods
diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst index 50cff4c7bbdfb..9890c976c4e5a 100644 --- a/doc/source/timedeltas.rst +++ b/doc/source/timedeltas.rst @@ -283,6 +283,18 @@ Rounded division (floor-division) of a ``timedelta64[ns]`` Series by a scalar td // pd.Timedelta(days=3, hours=4) pd.Timedelta(days=3, hours=4) // td +The mod (%) and divmod operations are defined for ``Timedelta`` when operating with another timedelta-like or with a numeric argument. (:issue:`19365`) + +.. ipython:: python + + pd.Timedelta(hours=37) % datetime.timedelta(hours=2) + + # divmod against a timedelta-like returns a pair (int, Timedelta) + divmod(datetime.timedelta(hours=2), pd.Timedelta(minutes=11)) + + # divmod against a numeric returns a pair (Timedelta, Timedelta) + divmod(pd.Timedelta(hours=25), 86400000000000) + Attributes ---------- diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a2198d9103528..70ac4393da2b3 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -117,6 +117,18 @@ resetting indexes. See the :ref:`Sorting by Indexes and Values # Sort by 'second' (index) and 'A' (column) df_multi.sort_values(by=['second', 'A']) +.. _whatsnew_0230.enhancements.timedelta_mod + +Timedelta mod method +^^^^^^^^^^^^^^^^^^^^ + +``mod`` (%) and ``divmod`` operations are now defined on ``Timedelta`` objects when operating with either timedelta-like or with numeric arguments. (:issue:`19365`) + +.. ipython:: python + + td = pd.Timedelta(hours=37) + td + .. _whatsnew_0230.enhancements.ran_inf: ``.rank()`` handles ``inf`` values when ``NaN`` are present @@ -571,6 +583,7 @@ Other API Changes - Set operations (union, difference...) on :class:`IntervalIndex` with incompatible index types will now raise a ``TypeError`` rather than a ``ValueError`` (:issue:`19329`) - :class:`DateOffset` objects render more simply, e.g. "<DateOffset: days=1>" instead of "<DateOffset: kwds={'days': 1}>" (:issue:`19403`) - :func:`pandas.merge` provides a more informative error message when trying to merge on timezone-aware and timezone-naive columns (:issue:`15800`) +- :func:`Timedelta.__mod__`, :func:`Timedelta.__divmod__` now accept timedelta-like and numeric arguments instead of raising ``TypeError`` (:issue:`19365`) .. _whatsnew_0230.deprecations: diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 37693068e0974..5a044dc7e33c1 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -482,11 +482,15 @@ def _binary_op_method_timedeltalike(op, name): # the PyDateTime_CheckExact case is for a datetime object that # is specifically *not* a Timestamp, as the Timestamp case will be # handled after `_validate_ops_compat` returns False below - from ..tslib import Timestamp + from timestamps import Timestamp return op(self, Timestamp(other)) # We are implicitly requiring the canonical behavior to be # defined by Timestamp methods. + elif is_timedelta64_object(other): + # other coerced to Timedelta below + pass + elif hasattr(other, 'dtype'): # nd-array like if other.dtype.kind not in ['m', 'M']: @@ -503,6 +507,9 @@ def _binary_op_method_timedeltalike(op, name): # failed to parse as timedelta return NotImplemented + if other is NaT: + # e.g. if original other was np.timedelta64('NaT') + return NaT return Timedelta(op(self.value, other.value), unit='ns') f.__name__ = name @@ -1044,8 +1051,10 @@ class Timedelta(_Timedelta): __rsub__ = _binary_op_method_timedeltalike(lambda x, y: y - x, '__rsub__') def __mul__(self, other): - if hasattr(other, 'dtype'): - # ndarray-like + if (hasattr(other, 'dtype') and + not (is_integer_object(other) or is_float_object(other))): + # ndarray-like; the integer/float object checks exclude + # numpy scalars return other * self.to_timedelta64() elif other is NaT: @@ -1060,7 +1069,10 @@ class Timedelta(_Timedelta): __rmul__ = __mul__ def __truediv__(self, other): - if hasattr(other, 'dtype'): + if is_timedelta64_object(other): + return self / Timedelta(other) + + elif hasattr(other, 'dtype'): return self.to_timedelta64() / other elif is_integer_object(other) or is_float_object(other): @@ -1076,7 +1088,10 @@ class Timedelta(_Timedelta): return self.value / float(other.value) def __rtruediv__(self, other): - if hasattr(other, 'dtype'): + if is_timedelta64_object(other): + return Timedelta(other) / self + + elif hasattr(other, 'dtype'): return other / self.to_timedelta64() elif not _validate_ops_compat(other): @@ -1096,9 +1111,20 @@ class Timedelta(_Timedelta): # just defer if hasattr(other, '_typ'): # Series, DataFrame, ... + if other._typ == 'dateoffset' and hasattr(other, 'delta'): + # Tick offset + return self // other.delta return NotImplemented - if hasattr(other, 'dtype'): + elif is_timedelta64_object(other): + return self // Timedelta(other) + + elif is_integer_object(other) or is_float_object(other): + return Timedelta(self.value // other, unit='ns') + + elif hasattr(other, 'dtype'): + # ndarray-like; the integer/float object checks exclude + # numpy scalars if other.dtype.kind == 'm': # also timedelta-like return _broadcast_floordiv_td64(self.value, other, _floordiv) @@ -1107,14 +1133,10 @@ class Timedelta(_Timedelta): return Timedelta(self.value // other) else: return self.to_timedelta64() // other - raise TypeError('Invalid dtype {dtype} for ' '{op}'.format(dtype=other.dtype, op='__floordiv__')) - elif is_integer_object(other) or is_float_object(other): - return Timedelta(self.value // other, unit='ns') - elif not _validate_ops_compat(other): return NotImplemented @@ -1128,8 +1150,14 @@ class Timedelta(_Timedelta): # just defer if hasattr(other, '_typ'): # Series, DataFrame, ... + if other._typ == 'dateoffset' and hasattr(other, 'delta'): + # Tick offset + return other.delta // self return NotImplemented + elif is_timedelta64_object(other): + return Timedelta(other) // self + if hasattr(other, 'dtype'): if other.dtype.kind == 'm': # also timedelta-like @@ -1149,6 +1177,24 @@ class Timedelta(_Timedelta): return np.nan return other.value // self.value + def __mod__(self, other): + # Naive implementation, room for optimization + return self.__divmod__(other)[1] + + def __rmod__(self, other): + # Naive implementation, room for optimization + return self.__rdivmod__(other)[1] + + def __divmod__(self, other): + # Naive implementation, room for optimization + div = self // other + return div, self - div * other + + def __rdivmod__(self, other): + # Naive implementation, room for optimization + div = other // self + return div, other - div * self + cdef _floordiv(int64_t value, right): return value // right diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index 667266be2a89b..c245aaa8a5021 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -2,7 +2,7 @@ import pytest import numpy as np -from datetime import timedelta +from datetime import datetime, timedelta import pandas as pd import pandas.util.testing as tm @@ -105,15 +105,6 @@ def test_timedelta_ops_scalar(self): result = base - offset assert result == expected_sub - def test_ops_offsets(self): - td = Timedelta(10, unit='d') - assert Timedelta(241, unit='h') == td + pd.offsets.Hour(1) - assert Timedelta(241, unit='h') == pd.offsets.Hour(1) + td - assert 240 == td / pd.offsets.Hour(1) - assert 1 / 240.0 == pd.offsets.Hour(1) / td - assert Timedelta(239, unit='h') == td - pd.offsets.Hour(1) - assert Timedelta(-239, unit='h') == pd.offsets.Hour(1) - td - def test_unary_ops(self): td = Timedelta(10, unit='d') @@ -127,6 +118,73 @@ def test_unary_ops(self): assert abs(-td) == td assert abs(-td) == Timedelta('10d') + def test_mul(self): + # GH#19365 + td = Timedelta(minutes=3) + + result = td * 2 + assert result == Timedelta(minutes=6) + + result = td * np.int64(1) + assert isinstance(result, Timedelta) + assert result == td + + result = td * 1.5 + assert result == Timedelta(minutes=4, seconds=30) + + result = td * np.array([3, 4], dtype='int64') + expected = np.array([9, 12], dtype='m8[m]').astype('m8[ns]') + tm.assert_numpy_array_equal(result, expected) + + with pytest.raises(TypeError): + # timedelta * datetime is gibberish + td * pd.Timestamp(2016, 1, 2) + + def test_add_datetimelike(self): + # GH#19365 + td = Timedelta(10, unit='d') + + result = td + datetime(2016, 1, 1) + assert result == pd.Timestamp(2016, 1, 11) + + result = td + pd.Timestamp('2018-01-12 18:09') + assert result == pd.Timestamp('2018-01-22 18:09') + + result = td + np.datetime64('2018-01-12') + assert result == pd.Timestamp('2018-01-22') + + @pytest.mark.parametrize('op', [lambda x, y: x + y, + lambda x, y: y + x]) + def test_add_timedeltalike(self, op): + td = Timedelta(10, unit='d') + + result = op(td, Timedelta(days=10)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=20) + + result = op(td, timedelta(days=9)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=19) + + result = op(td, pd.offsets.Hour(6)) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=10, hours=6) + + result = op(td, np.timedelta64(-4, 'D')) + assert isinstance(result, Timedelta) + assert result == Timedelta(days=6) + + def test_sub_timedeltalike(self): + td = Timedelta(10, unit='d') + + result = td - pd.offsets.Hour(1) + assert isinstance(result, Timedelta) + assert result == Timedelta(239, unit='h') + + result = pd.offsets.Hour(1) - td + assert isinstance(result, Timedelta) + assert result == Timedelta(-239, unit='h') + def test_binary_ops_nat(self): td = Timedelta(10, unit='d') @@ -137,6 +195,12 @@ def test_binary_ops_nat(self): assert (td // pd.NaT) is np.nan assert (td // np.timedelta64('NaT')) is np.nan + # GH#19365 + assert td - np.timedelta64('NaT', 'ns') is pd.NaT + assert td + np.timedelta64('NaT', 'ns') is pd.NaT + assert np.timedelta64('NaT', 'ns') - td is pd.NaT + assert np.timedelta64('NaT', 'ns') + td is pd.NaT + def test_binary_ops_integers(self): td = Timedelta(10, unit='d') @@ -162,6 +226,16 @@ def test_binary_ops_with_timedelta(self): # invalid multiply with another timedelta pytest.raises(TypeError, lambda: td * td) + def test_div(self): + td = Timedelta(10, unit='d') + result = td / pd.offsets.Hour(1) + assert result == 240 + + def test_rdiv(self): + td = Timedelta(10, unit='d') + result = pd.offsets.Hour(1) / td + assert result == 1 / 240.0 + def test_floordiv(self): # GH#18846 td = Timedelta(hours=3, minutes=4) @@ -172,6 +246,10 @@ def test_floordiv(self): assert -td // scalar.to_pytimedelta() == -2 assert (2 * td) // scalar.to_timedelta64() == 2 + # GH#19365 + assert td // pd.offsets.Hour(1) == 3 + assert td // pd.offsets.Minute(2) == 92 + assert td // np.nan is pd.NaT assert np.isnan(td // pd.NaT) assert np.isnan(td // np.timedelta64('NaT')) @@ -217,6 +295,8 @@ def test_rfloordiv(self): assert (-td).__rfloordiv__(scalar.to_pytimedelta()) == -2 assert (2 * td).__rfloordiv__(scalar.to_timedelta64()) == 0 + assert pd.offsets.Hour(1) // Timedelta(minutes=25) == 2 + assert np.isnan(td.__rfloordiv__(pd.NaT)) assert np.isnan(td.__rfloordiv__(np.timedelta64('NaT'))) @@ -254,6 +334,178 @@ def test_rfloordiv(self): with pytest.raises(TypeError): ser // td + def test_mod_timedeltalike(self): + # GH#19365 + td = Timedelta(hours=37) + + # Timedelta-like others + result = td % Timedelta(hours=6) + assert isinstance(result, Timedelta) + assert result == Timedelta(hours=1) + + result = td % timedelta(minutes=60) + assert isinstance(result, Timedelta) + assert result == Timedelta(0) + + result = td % pd.offsets.Hour(5) + assert isinstance(result, Timedelta) + assert result == Timedelta(hours=2) + + result = td % np.timedelta64(2, 'h') + assert isinstance(result, Timedelta) + assert result == Timedelta(hours=1) + + result = td % NaT + assert result is NaT + + result = td % np.timedelta64('NaT', 'ns') + assert result is NaT + + def test_mod_numeric(self): + # GH#19365 + td = Timedelta(hours=37) + + # Numeric Others + result = td % 2 + assert isinstance(result, Timedelta) + assert result == Timedelta(0) + + result = td % 1e12 + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=3, seconds=20) + + result = td % int(1e12) + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=3, seconds=20) + + def test_mod_arraylike(self): + # GH#19365 + td = Timedelta(hours=37) + + # Array-like others + result = td % np.array([6, 5], dtype='timedelta64[h]') + expected = np.array([1, 2], dtype='timedelta64[h]').astype('m8[ns]') + tm.assert_numpy_array_equal(result, expected) + + result = td % pd.TimedeltaIndex(['6H', '5H']) + expected = pd.TimedeltaIndex(['1H', '2H']) + tm.assert_index_equal(result, expected) + + result = td % np.array([2, int(1e12)], dtype='i8') + expected = np.array([0, Timedelta(minutes=3, seconds=20).value], + dtype='m8[ns]') + tm.assert_numpy_array_equal(result, expected) + + def test_mod_invalid(self): + # GH#19365 + td = Timedelta(hours=37) + + with pytest.raises(TypeError): + td % pd.Timestamp('2018-01-22') + + with pytest.raises(TypeError): + td % [] + + def test_rmod(self): + # GH#19365 + td = Timedelta(minutes=3) + + result = timedelta(minutes=4) % td + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=1) + + result = np.timedelta64(5, 'm') % td + assert isinstance(result, Timedelta) + assert result == Timedelta(minutes=2) + + result = np.array([5, 6], dtype='m8[m]') % td + expected = np.array([2, 0], dtype='m8[m]').astype('m8[ns]') + tm.assert_numpy_array_equal(result, expected) + + def test_rmod_invalid(self): + # GH#19365 + td = Timedelta(minutes=3) + + with pytest.raises(TypeError): + pd.Timestamp('2018-01-22') % td + + with pytest.raises(TypeError): + 15 % td + + with pytest.raises(TypeError): + 16.0 % td + + with pytest.raises(TypeError): + np.array([22, 24]) % td + + def test_divmod(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + result = divmod(td, timedelta(days=1)) + assert result[0] == 2 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=6) + + result = divmod(td, pd.offsets.Hour(-4)) + assert result[0] == -14 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=-2) + + result = divmod(td, 54) + assert result[0] == Timedelta(hours=1) + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(0) + + result = divmod(td, 53 * 3600 * 1e9) + assert result[0] == Timedelta(1, unit='ns') + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=1) + + assert result + result = divmod(td, np.nan) + assert result[0] is pd.NaT + assert result[1] is pd.NaT + + result = divmod(td, pd.NaT) + assert np.isnan(result[0]) + assert result[1] is pd.NaT + + def test_divmod_invalid(self): + # GH#19365 + td = Timedelta(days=2, hours=6) + + with pytest.raises(TypeError): + divmod(td, pd.Timestamp('2018-01-22')) + + def test_rdivmod(self): + # GH#19365 + result = divmod(timedelta(days=2, hours=6), Timedelta(days=1)) + assert result[0] == 2 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=6) + + result = divmod(pd.offsets.Hour(54), Timedelta(hours=-4)) + assert result[0] == -14 + assert isinstance(result[1], Timedelta) + assert result[1] == Timedelta(hours=-2) + + def test_rdivmod_invalid(self): + # GH#19365 + td = Timedelta(minutes=3) + + with pytest.raises(TypeError): + divmod(pd.Timestamp('2018-01-22'), td) + + with pytest.raises(TypeError): + divmod(15, td) + + with pytest.raises(TypeError): + divmod(16.0, td) + + with pytest.raises(TypeError): + divmod(np.array([22, 24]), td) + class TestTimedeltaComparison(object): def test_comparison_object_array(self):
Implemented mod, divmod, rmod, rdivmod for Timedelta, along with tests. In the process turned up a few bugs. Conditional probability that there are more bugs is pretty good. - fixed offsets.Tick // Timedelta, Timedelta // offsets.Tick - fixed Timedelta [+-] timedelta64 returning timedelta64 instead of Timedelta - fixed Timedelta [*/] int64 returning timedelta64 instead of Timedelta - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19365
2018-01-24T03:32:19Z
2018-02-19T07:38:20Z
null
2023-05-11T01:17:14Z
Separate io helpers from _libs.lib
diff --git a/pandas/_libs/io_helper.pyx b/pandas/_libs/io_helper.pyx new file mode 100644 index 0000000000000..aa9af96c1bd6c --- /dev/null +++ b/pandas/_libs/io_helper.pyx @@ -0,0 +1,233 @@ +# -*- coding: utf-8 -*- + +cimport cython +from cython cimport Py_ssize_t + +from cpython cimport (PyString_Check, PyBytes_Check, PyUnicode_Check, + PyBytes_GET_SIZE, PyUnicode_GET_SIZE) + +try: + from cpython cimport PyString_GET_SIZE +except ImportError: + from cpython cimport PyUnicode_GET_SIZE as PyString_GET_SIZE + +import numpy as np +cimport numpy as cnp +from numpy cimport ndarray, uint8_t +cnp.import_array() + +cimport util + + +ctypedef fused pandas_string: + str + unicode + bytes + + +def sanitize_objects(ndarray[object] values, set na_values, + convert_empty=True): + cdef: + Py_ssize_t i, n + object val, onan + Py_ssize_t na_count = 0 + dict memo = {} + + n = len(values) + onan = np.nan + + for i from 0 <= i < n: + val = values[i] + if (convert_empty and val == '') or (val in na_values): + values[i] = onan + na_count += 1 + elif val in memo: + values[i] = memo[val] + else: + memo[val] = val + + return na_count + + +@cython.boundscheck(False) +@cython.wraparound(False) +def write_csv_rows(list data, ndarray data_index, + int nlevels, ndarray cols, object writer): + + cdef int N, j, i, ncols + cdef list rows + cdef object val + + # In crude testing, N>100 yields little marginal improvement + N=100 + + # pre-allocate rows + ncols = len(cols) + rows = [[None] * (nlevels + ncols) for x in range(N)] + + j = -1 + if nlevels == 1: + for j in range(len(data_index)): + row = rows[j % N] + row[0] = data_index[j] + for i in range(ncols): + row[1 + i] = data[i][j] + + if j >= N - 1 and j % N == N - 1: + writer.writerows(rows) + elif nlevels > 1: + for j in range(len(data_index)): + row = rows[j % N] + row[:nlevels] = list(data_index[j]) + for i in range(ncols): + row[nlevels + i] = data[i][j] + + if j >= N - 1 and j % N == N - 1: + writer.writerows(rows) + else: + for j in range(len(data_index)): + row = rows[j % N] + for i in range(ncols): + row[i] = data[i][j] + + if j >= N - 1 and j % N == N - 1: + writer.writerows(rows) + + if j >= 0 and (j < N - 1 or (j % N) != N - 1): + writer.writerows(rows[:((j + 1) % N)]) + + +@cython.boundscheck(False) +@cython.wraparound(False) +def convert_json_to_lines(object arr): + """ + replace comma separated json with line feeds, paying special attention + to quotes & brackets + """ + cdef: + Py_ssize_t i = 0, num_open_brackets_seen = 0, length + bint in_quotes = 0, is_escaping = 0 + ndarray[uint8_t] narr + unsigned char v, comma, left_bracket, right_brack, newline + + newline = ord('\n') + comma = ord(',') + left_bracket = ord('{') + right_bracket = ord('}') + quote = ord('"') + backslash = ord('\\') + + narr = np.frombuffer(arr.encode('utf-8'), dtype='u1').copy() + length = narr.shape[0] + for i in range(length): + v = narr[i] + if v == quote and i > 0 and not is_escaping: + in_quotes = ~in_quotes + if v == backslash or is_escaping: + is_escaping = ~is_escaping + if v == comma: # commas that should be \n + if num_open_brackets_seen == 0 and not in_quotes: + narr[i] = newline + elif v == left_bracket: + if not in_quotes: + num_open_brackets_seen += 1 + elif v == right_bracket: + if not in_quotes: + num_open_brackets_seen -= 1 + + return narr.tostring().decode('utf-8') + + +# stata, pytables +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef Py_ssize_t max_len_string_array(pandas_string[:] arr): + """ return the maximum size of elements in a 1-dim string array """ + cdef: + Py_ssize_t i, m = 0, l = 0, length = arr.shape[0] + pandas_string v + + for i in range(length): + v = arr[i] + if PyString_Check(v): + l = PyString_GET_SIZE(v) + elif PyBytes_Check(v): + l = PyBytes_GET_SIZE(v) + elif PyUnicode_Check(v): + l = PyUnicode_GET_SIZE(v) + + if l > m: + m = l + + return m + + +# ------------------------------------------------------------------ +# PyTables Helpers + + +@cython.boundscheck(False) +@cython.wraparound(False) +def string_array_replace_from_nan_rep( + ndarray[object, ndim=1] arr, object nan_rep, + object replace=None): + """ + Replace the values in the array with 'replacement' if + they are 'nan_rep'. Return the same array. + """ + + cdef int length = arr.shape[0], i = 0 + if replace is None: + replace = np.nan + + for i from 0 <= i < length: + if arr[i] == nan_rep: + arr[i] = replace + + return arr + + +def convert_timestamps(ndarray values): + cdef: + object val, f, result + dict cache = {} + Py_ssize_t i, n = len(values) + ndarray[object] out + + # for HDFStore, a bit temporary but... + + from datetime import datetime + f = datetime.fromtimestamp + + out = np.empty(n, dtype='O') + + for i in range(n): + val = util.get_value_1d(values, i) + if val in cache: + out[i] = cache[val] + else: + cache[val] = out[i] = f(val) + + return out + + +@cython.wraparound(False) +@cython.boundscheck(False) +def fast_unique(ndarray[object] values): + cdef: + Py_ssize_t i, n = len(values) + list uniques = [] + dict table = {} + object val, stub = 0 + + for i from 0 <= i < n: + val = values[i] + if val not in table: + table[val] = stub + uniques.append(val) + try: + uniques.sort() + except Exception: + pass + + return uniques diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 1632f5d016439..c634ebfb68b7a 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -20,20 +20,12 @@ np.import_ufunc() from libc.stdlib cimport malloc, free from cpython cimport (Py_INCREF, PyTuple_SET_ITEM, - PyList_Check, PyFloat_Check, + PyList_Check, PyFloat_Check, PyBool_Check, PyString_Check, PyBytes_Check, PyUnicode_Check, PyTuple_New, - PyObject_RichCompareBool, - PyBytes_GET_SIZE, - PyUnicode_GET_SIZE, - PyObject) - -try: - from cpython cimport PyString_GET_SIZE -except ImportError: - from cpython cimport PyUnicode_GET_SIZE as PyString_GET_SIZE + PyObject_RichCompareBool) cimport cpython @@ -138,28 +130,6 @@ def item_from_zerodim(object val): return util.unbox_if_zerodim(val) -@cython.wraparound(False) -@cython.boundscheck(False) -def fast_unique(ndarray[object] values): - cdef: - Py_ssize_t i, n = len(values) - list uniques = [] - dict table = {} - object val, stub = 0 - - for i from 0 <= i < n: - val = values[i] - if val not in table: - table[val] = stub - uniques.append(val) - try: - uniques.sort() - except Exception: - pass - - return uniques - - @cython.wraparound(False) @cython.boundscheck(False) def fast_unique_multiple(list arrays): @@ -379,30 +349,6 @@ def has_infs_f8(ndarray[float64_t] arr): return False -def convert_timestamps(ndarray values): - cdef: - object val, f, result - dict cache = {} - Py_ssize_t i, n = len(values) - ndarray[object] out - - # for HDFStore, a bit temporary but... - - from datetime import datetime - f = datetime.fromtimestamp - - out = np.empty(n, dtype='O') - - for i in range(n): - val = util.get_value_1d(values, i) - if val in cache: - out[i] = cache[val] - else: - cache[val] = out[i] = f(val) - - return out - - def maybe_indices_to_slice(ndarray[int64_t] indices, int max_len): cdef: Py_ssize_t i, n = len(indices) @@ -742,145 +688,6 @@ def clean_index_list(list obj): return np.asarray(obj), 0 -ctypedef fused pandas_string: - str - unicode - bytes - - -@cython.boundscheck(False) -@cython.wraparound(False) -cpdef Py_ssize_t max_len_string_array(pandas_string[:] arr): - """ return the maximum size of elements in a 1-dim string array """ - cdef: - Py_ssize_t i, m = 0, l = 0, length = arr.shape[0] - pandas_string v - - for i in range(length): - v = arr[i] - if PyString_Check(v): - l = PyString_GET_SIZE(v) - elif PyBytes_Check(v): - l = PyBytes_GET_SIZE(v) - elif PyUnicode_Check(v): - l = PyUnicode_GET_SIZE(v) - - if l > m: - m = l - - return m - - -@cython.boundscheck(False) -@cython.wraparound(False) -def string_array_replace_from_nan_rep( - ndarray[object, ndim=1] arr, object nan_rep, - object replace=None): - """ - Replace the values in the array with 'replacement' if - they are 'nan_rep'. Return the same array. - """ - - cdef int length = arr.shape[0], i = 0 - if replace is None: - replace = np.nan - - for i from 0 <= i < length: - if arr[i] == nan_rep: - arr[i] = replace - - return arr - - -@cython.boundscheck(False) -@cython.wraparound(False) -def convert_json_to_lines(object arr): - """ - replace comma separated json with line feeds, paying special attention - to quotes & brackets - """ - cdef: - Py_ssize_t i = 0, num_open_brackets_seen = 0, length - bint in_quotes = 0, is_escaping = 0 - ndarray[uint8_t] narr - unsigned char v, comma, left_bracket, right_brack, newline - - newline = ord('\n') - comma = ord(',') - left_bracket = ord('{') - right_bracket = ord('}') - quote = ord('"') - backslash = ord('\\') - - narr = np.frombuffer(arr.encode('utf-8'), dtype='u1').copy() - length = narr.shape[0] - for i in range(length): - v = narr[i] - if v == quote and i > 0 and not is_escaping: - in_quotes = ~in_quotes - if v == backslash or is_escaping: - is_escaping = ~is_escaping - if v == comma: # commas that should be \n - if num_open_brackets_seen == 0 and not in_quotes: - narr[i] = newline - elif v == left_bracket: - if not in_quotes: - num_open_brackets_seen += 1 - elif v == right_bracket: - if not in_quotes: - num_open_brackets_seen -= 1 - - return narr.tostring().decode('utf-8') - - -@cython.boundscheck(False) -@cython.wraparound(False) -def write_csv_rows(list data, ndarray data_index, - int nlevels, ndarray cols, object writer): - - cdef int N, j, i, ncols - cdef list rows - cdef object val - - # In crude testing, N>100 yields little marginal improvement - N=100 - - # pre-allocate rows - ncols = len(cols) - rows = [[None] * (nlevels + ncols) for x in range(N)] - - j = -1 - if nlevels == 1: - for j in range(len(data_index)): - row = rows[j % N] - row[0] = data_index[j] - for i in range(ncols): - row[1 + i] = data[i][j] - - if j >= N - 1 and j % N == N - 1: - writer.writerows(rows) - elif nlevels > 1: - for j in range(len(data_index)): - row = rows[j % N] - row[:nlevels] = list(data_index[j]) - for i in range(ncols): - row[nlevels + i] = data[i][j] - - if j >= N - 1 and j % N == N - 1: - writer.writerows(rows) - else: - for j in range(len(data_index)): - row = rows[j % N] - for i in range(ncols): - row[i] = data[i][j] - - if j >= N - 1 and j % N == N - 1: - writer.writerows(rows) - - if j >= 0 and (j < N - 1 or (j % N) != N - 1): - writer.writerows(rows[:((j + 1) % N)]) - - # ------------------------------------------------------------------------------ # Groupby-related functions diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index e15f276b39bf8..0015a58d9de34 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -6,7 +6,7 @@ from tslibs.nattype import NaT from tslibs.conversion cimport convert_to_tsobject from tslibs.timedeltas cimport convert_to_timedelta64 from tslibs.timezones cimport get_timezone, tz_compare -from datetime import datetime, timedelta + iNaT = util.get_nat() cdef bint PY2 = sys.version_info[0] == 2 @@ -1394,30 +1394,6 @@ def convert_sql_column(x): return maybe_convert_objects(x, try_float=1) -def sanitize_objects(ndarray[object] values, set na_values, - convert_empty=True): - cdef: - Py_ssize_t i, n - object val, onan - Py_ssize_t na_count = 0 - dict memo = {} - - n = len(values) - onan = np.nan - - for i from 0 <= i < n: - val = values[i] - if (convert_empty and val == '') or (val in na_values): - values[i] = onan - na_count += 1 - elif val in memo: - values[i] = memo[val] - else: - memo[val] = val - - return na_count - - def maybe_convert_bool(ndarray[object] arr, true_values=None, false_values=None): cdef: @@ -1443,7 +1419,7 @@ def maybe_convert_bool(ndarray[object] arr, for i from 0 <= i < n: val = arr[i] - if cpython.PyBool_Check(val): + if PyBool_Check(val): if val is True: result[i] = 1 else: diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 2293032ebb8a1..04d0a048bfdd3 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -38,7 +38,7 @@ _stringify_path) from pandas.io.formats.printing import adjoin, justify, pprint_thing from pandas.io.formats.common import get_level_lengths -from pandas._libs import lib +from pandas._libs import lib, io_helper as libio from pandas._libs.tslib import (iNaT, Timestamp, Timedelta, format_array_from_datetime) from pandas.core.indexes.datetimes import DatetimeIndex @@ -1789,7 +1789,8 @@ def _save_chunk(self, start_i, end_i): date_format=self.date_format, quoting=self.quoting) - lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer) + libio.write_csv_rows(self.data, ix, self.nlevels, + self.cols, self.writer) # ---------------------------------------------------------------------- diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py index 595031b04e367..fa03e7bb5caa2 100644 --- a/pandas/io/json/normalize.py +++ b/pandas/io/json/normalize.py @@ -5,7 +5,7 @@ from collections import defaultdict import numpy as np -from pandas._libs.lib import convert_json_to_lines +from pandas._libs.io_helper import convert_json_to_lines from pandas import compat, DataFrame diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 5135bb01fb378..545b40f2cc96b 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -43,6 +43,7 @@ import pandas._libs.lib as lib import pandas._libs.parsers as parsers +from pandas._libs import io_helper as libio from pandas._libs.tslibs import parsing # BOM character (byte order mark) @@ -1596,11 +1597,11 @@ def _infer_types(self, values, na_values, try_num_bool=True): except Exception: result = values if values.dtype == np.object_: - na_count = lib.sanitize_objects(result, na_values, False) + na_count = libio.sanitize_objects(result, na_values, False) else: result = values if values.dtype == np.object_: - na_count = lib.sanitize_objects(values, na_values, False) + na_count = libio.sanitize_objects(values, na_values, False) if result.dtype == np.object_ and try_num_bool: result = lib.maybe_convert_bool(values, diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 106823199ee93..0558b4d340a17 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -47,7 +47,7 @@ from pandas.core.config import get_option from pandas.core.computation.pytables import Expr, maybe_expression -from pandas._libs import algos, lib +from pandas._libs import algos, lib, io_helper as libio from pandas._libs.tslibs import timezones from distutils.version import LooseVersion @@ -3843,7 +3843,7 @@ def read(self, where=None, columns=None, **kwargs): # need a better algorithm tuple_index = long_index.values - unique_tuples = lib.fast_unique(tuple_index) + unique_tuples = libio.fast_unique(tuple_index) unique_tuples = com._asarray_tuplesafe(unique_tuples) indexer = match(unique_tuples, tuple_index) @@ -4561,7 +4561,7 @@ def _convert_string_array(data, encoding, itemsize=None): # create the sized dtype if itemsize is None: - itemsize = lib.max_len_string_array(_ensure_object(data.ravel())) + itemsize = libio.max_len_string_array(_ensure_object(data.ravel())) data = np.asarray(data, dtype="S%d" % itemsize) return data @@ -4590,7 +4590,7 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None): encoding = _ensure_encoding(encoding) if encoding is not None and len(data): - itemsize = lib.max_len_string_array(_ensure_object(data)) + itemsize = libio.max_len_string_array(_ensure_object(data)) if compat.PY3: dtype = "U{0}".format(itemsize) else: @@ -4604,7 +4604,7 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None): if nan_rep is None: nan_rep = 'nan' - data = lib.string_array_replace_from_nan_rep(data, nan_rep) + data = libio.string_array_replace_from_nan_rep(data, nan_rep) return data.reshape(shape) @@ -4621,7 +4621,7 @@ def _get_converter(kind, encoding): if kind == 'datetime64': return lambda x: np.asarray(x, dtype='M8[ns]') elif kind == 'datetime': - return lib.convert_timestamps + return libio.convert_timestamps elif kind == 'string': return lambda x: _unconvert_string_array(x, encoding=encoding) else: # pragma: no cover diff --git a/pandas/io/stata.py b/pandas/io/stata.py index b409cf20e9a09..60af6242ee56d 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -16,7 +16,8 @@ import numpy as np from dateutil.relativedelta import relativedelta -from pandas._libs.lib import max_len_string_array, infer_dtype +from pandas._libs.lib import infer_dtype +from pandas._libs.io_helper import max_len_string_array from pandas._libs.tslib import NaT, Timestamp import pandas as pd diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py index 10061204df42a..66e9886a3d998 100644 --- a/pandas/tests/test_lib.py +++ b/pandas/tests/test_lib.py @@ -3,7 +3,7 @@ import pytest import numpy as np -from pandas._libs import lib +from pandas._libs import lib, io_helper as libio import pandas.util.testing as tm @@ -12,19 +12,19 @@ class TestMisc(object): def test_max_len_string_array(self): arr = a = np.array(['foo', 'b', np.nan], dtype='object') - assert lib.max_len_string_array(arr) == 3 + assert libio.max_len_string_array(arr) == 3 # unicode arr = a.astype('U').astype(object) - assert lib.max_len_string_array(arr) == 3 + assert libio.max_len_string_array(arr) == 3 # bytes for python3 arr = a.astype('S').astype(object) - assert lib.max_len_string_array(arr) == 3 + assert libio.max_len_string_array(arr) == 3 # raises pytest.raises(TypeError, - lambda: lib.max_len_string_array(arr.astype('U'))) + lambda: libio.max_len_string_array(arr.astype('U'))) def test_fast_unique_multiple_list_gen_sort(self): keys = [['p', 'a'], ['n', 'd'], ['a', 's']] diff --git a/setup.py b/setup.py index 7ade1544ec5cd..c889bac898e33 100755 --- a/setup.py +++ b/setup.py @@ -307,6 +307,7 @@ class CheckSDist(sdist_class): 'pandas/_libs/join.pyx', 'pandas/_libs/indexing.pyx', 'pandas/_libs/interval.pyx', + 'pandas/_libs/io_helper.pyx', 'pandas/_libs/hashing.pyx', 'pandas/_libs/missing.pyx', 'pandas/_libs/reduction.pyx', @@ -486,6 +487,9 @@ def pxd(name): 'pyxfile': '_libs/interval', 'pxdfiles': ['_libs/hashtable'], 'depends': _pxi_dep['interval']}, + '_libs.io_helper': { + 'pyxfile': '_libs/io_helper', + 'pxdfiles': ['_libs/src/util']}, '_libs.join': { 'pyxfile': '_libs/join', 'pxdfiles': ['_libs/src/util', '_libs/hashtable'],
Avoids overlap with #19360 where possible. Moved functions are cut/paste verbatim. I'm open to a better name than io_helpers, just wanted to avoid overlap with the stdlib name "io". Besides refactoring for its own sake, part of the goal is to get lib down the point where we can merge src/inference back into it; the split file makes it tough to track dependencies, and AFAICT it makes the move towards using cythonize easier.
https://api.github.com/repos/pandas-dev/pandas/pulls/19364
2018-01-23T17:06:24Z
2018-01-26T17:24:40Z
null
2018-02-04T16:41:33Z
remove unused convert_sql_column
diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index e15f276b39bf8..39656239aae76 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -1390,10 +1390,6 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, return objects -def convert_sql_column(x): - return maybe_convert_objects(x, try_float=1) - - def sanitize_objects(ndarray[object] values, set na_values, convert_empty=True): cdef: diff --git a/pandas/tests/dtypes/test_io.py b/pandas/tests/dtypes/test_io.py deleted file mode 100644 index 06b61371c9a0b..0000000000000 --- a/pandas/tests/dtypes/test_io.py +++ /dev/null @@ -1,73 +0,0 @@ -# -*- coding: utf-8 -*- - -import numpy as np -import pandas._libs.lib as lib -import pandas.util.testing as tm - -from pandas.compat import long, u - - -class TestParseSQL(object): - - def test_convert_sql_column_floats(self): - arr = np.array([1.5, None, 3, 4.2], dtype=object) - result = lib.convert_sql_column(arr) - expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8') - tm.assert_numpy_array_equal(result, expected) - - def test_convert_sql_column_strings(self): - arr = np.array(['1.5', None, '3', '4.2'], dtype=object) - result = lib.convert_sql_column(arr) - expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object) - tm.assert_numpy_array_equal(result, expected) - - def test_convert_sql_column_unicode(self): - arr = np.array([u('1.5'), None, u('3'), u('4.2')], - dtype=object) - result = lib.convert_sql_column(arr) - expected = np.array([u('1.5'), np.nan, u('3'), u('4.2')], - dtype=object) - tm.assert_numpy_array_equal(result, expected) - - def test_convert_sql_column_ints(self): - arr = np.array([1, 2, 3, 4], dtype='O') - arr2 = np.array([1, 2, 3, 4], dtype='i4').astype('O') - result = lib.convert_sql_column(arr) - result2 = lib.convert_sql_column(arr2) - expected = np.array([1, 2, 3, 4], dtype='i8') - tm.assert_numpy_array_equal(result, expected) - tm.assert_numpy_array_equal(result2, expected) - - arr = np.array([1, 2, 3, None, 4], dtype='O') - result = lib.convert_sql_column(arr) - expected = np.array([1, 2, 3, np.nan, 4], dtype='f8') - tm.assert_numpy_array_equal(result, expected) - - def test_convert_sql_column_longs(self): - arr = np.array([long(1), long(2), long(3), long(4)], dtype='O') - result = lib.convert_sql_column(arr) - expected = np.array([1, 2, 3, 4], dtype='i8') - tm.assert_numpy_array_equal(result, expected) - - arr = np.array([long(1), long(2), long(3), None, long(4)], dtype='O') - result = lib.convert_sql_column(arr) - expected = np.array([1, 2, 3, np.nan, 4], dtype='f8') - tm.assert_numpy_array_equal(result, expected) - - def test_convert_sql_column_bools(self): - arr = np.array([True, False, True, False], dtype='O') - result = lib.convert_sql_column(arr) - expected = np.array([True, False, True, False], dtype=bool) - tm.assert_numpy_array_equal(result, expected) - - arr = np.array([True, False, None, False], dtype='O') - result = lib.convert_sql_column(arr) - expected = np.array([True, False, np.nan, False], dtype=object) - tm.assert_numpy_array_equal(result, expected) - - def test_convert_sql_column_decimals(self): - from decimal import Decimal - arr = np.array([Decimal('1.5'), None, Decimal('3'), Decimal('4.2')]) - result = lib.convert_sql_column(arr) - expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8') - tm.assert_numpy_array_equal(result, expected)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19363
2018-01-23T16:41:44Z
2018-01-24T11:10:06Z
2018-01-24T11:10:06Z
2018-02-11T21:59:32Z
Remove unused functions, cimports
diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index f8371d4855803..15aef867ba413 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -73,10 +73,6 @@ cpdef object get_value_box(ndarray arr, object loc): return util.get_value_1d(arr, i) -def set_value_at(ndarray arr, object loc, object val): - return util.set_value_at(arr, loc, val) - - # Don't populate hash tables in monotonic indexes larger than this _SIZE_CUTOFF = 1000000 @@ -404,18 +400,6 @@ cdef Py_ssize_t _bin_search(ndarray values, object val) except -1: else: return mid + 1 -_pad_functions = { - 'object': algos.pad_object, - 'int64': algos.pad_int64, - 'float64': algos.pad_float64 -} - -_backfill_functions = { - 'object': algos.backfill_object, - 'int64': algos.backfill_int64, - 'float64': algos.backfill_float64 -} - cdef class DatetimeEngine(Int64Engine): @@ -566,7 +550,7 @@ cpdef convert_scalar(ndarray arr, object value): # we don't turn bools into int/float/complex if arr.descr.type_num == NPY_DATETIME: - if isinstance(value, np.ndarray): + if util.is_array(value): pass elif isinstance(value, (datetime, np.datetime64, date)): return Timestamp(value).value @@ -577,7 +561,7 @@ cpdef convert_scalar(ndarray arr, object value): raise ValueError("cannot set a Timestamp with a non-timestamp") elif arr.descr.type_num == NPY_TIMEDELTA: - if isinstance(value, np.ndarray): + if util.is_array(value): pass elif isinstance(value, timedelta): return Timedelta(value).value diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 93a45335efc9c..a5abe324254ce 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -4,6 +4,7 @@ cimport cython from cython cimport Py_ssize_t from cpython cimport PyObject +from cpython.slice cimport PySlice_Check cdef extern from "Python.h": Py_ssize_t PY_SSIZE_T_MAX @@ -32,7 +33,7 @@ cdef class BlockPlacement: self._has_slice = False self._has_array = False - if isinstance(val, slice): + if PySlice_Check(val): slc = slice_canonize(val) if slc.start != slc.stop: @@ -118,7 +119,7 @@ cdef class BlockPlacement: else: val = self._as_array[loc] - if not isinstance(val, slice) and val.ndim == 0: + if not PySlice_Check(val) and val.ndim == 0: return val return BlockPlacement(val) @@ -288,7 +289,7 @@ def slice_getitem(slice slc not None, ind): s_start, s_stop, s_step, s_len = slice_get_indices_ex(slc) - if isinstance(ind, slice): + if PySlice_Check(ind): ind_start, ind_stop, ind_step, ind_len = slice_get_indices_ex(ind, s_len) diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 1632f5d016439..e337c2b25b887 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -17,8 +17,6 @@ from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM, np.import_array() np.import_ufunc() -from libc.stdlib cimport malloc, free - from cpython cimport (Py_INCREF, PyTuple_SET_ITEM, PyList_Check, PyFloat_Check, PyString_Check, @@ -27,8 +25,7 @@ from cpython cimport (Py_INCREF, PyTuple_SET_ITEM, PyTuple_New, PyObject_RichCompareBool, PyBytes_GET_SIZE, - PyUnicode_GET_SIZE, - PyObject) + PyUnicode_GET_SIZE) try: from cpython cimport PyString_GET_SIZE @@ -37,17 +34,12 @@ except ImportError: cimport cpython -isnan = np.isnan -cdef double NaN = <double> np.NaN -cdef double nan = NaN from cpython.datetime cimport (PyDateTime_Check, PyDate_Check, PyTime_Check, PyDelta_Check, PyDateTime_IMPORT) PyDateTime_IMPORT -from tslibs.np_datetime cimport get_timedelta64_value, get_datetime64_value - from tslib import NaT, Timestamp, Timedelta, array_to_datetime from interval import Interval from missing cimport checknull diff --git a/pandas/_libs/reduction.pyx b/pandas/_libs/reduction.pyx index d51583c7aa473..4ca87a777e497 100644 --- a/pandas/_libs/reduction.pyx +++ b/pandas/_libs/reduction.pyx @@ -24,9 +24,9 @@ is_numpy_prior_1_6_2 = LooseVersion(np.__version__) < '1.6.2' cdef _get_result_array(object obj, Py_ssize_t size, Py_ssize_t cnt): - if isinstance(obj, np.ndarray) \ - or isinstance(obj, list) and len(obj) == cnt \ - or getattr(obj, 'shape', None) == (cnt,): + if (util.is_array(obj) or + isinstance(obj, list) and len(obj) == cnt or + getattr(obj, 'shape', None) == (cnt,)): raise ValueError('function does not reduce') return np.empty(size, dtype='O') @@ -150,8 +150,7 @@ cdef class Reducer: else: res = self.f(chunk) - if hasattr(res, 'values') and isinstance( - res.values, np.ndarray): + if hasattr(res, 'values') and util.is_array(res.values): res = res.values if i == 0: result = _get_result_array(res, @@ -433,10 +432,10 @@ cdef class SeriesGrouper: cdef inline _extract_result(object res): """ extract the result object, it might be a 0-dim ndarray or a len-1 0-dim, or a scalar """ - if hasattr(res, 'values') and isinstance(res.values, np.ndarray): + if hasattr(res, 'values') and util.is_array(res.values): res = res.values if not np.isscalar(res): - if isinstance(res, np.ndarray): + if util.is_array(res): if res.ndim == 0: res = res.item() elif res.ndim == 1 and len(res) == 1: diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index e15f276b39bf8..52ae32023e2b4 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -10,10 +10,9 @@ from datetime import datetime, timedelta iNaT = util.get_nat() cdef bint PY2 = sys.version_info[0] == 2 +cdef double nan = <double> np.NaN -from util cimport (UINT8_MAX, UINT16_MAX, UINT32_MAX, UINT64_MAX, - INT8_MIN, INT8_MAX, INT16_MIN, INT16_MAX, - INT32_MAX, INT32_MIN, INT64_MAX, INT64_MIN) +from util cimport UINT8_MAX, UINT64_MAX, INT64_MAX, INT64_MIN # core.common import for fast inference checks @@ -331,7 +330,7 @@ def infer_dtype(object value, bint skipna=False): bint seen_pdnat = False bint seen_val = False - if isinstance(value, np.ndarray): + if util.is_array(value): values = value elif hasattr(value, 'dtype'): @@ -349,7 +348,7 @@ def infer_dtype(object value, bint skipna=False): raise ValueError("cannot infer type for {0}".format(type(value))) else: - if not isinstance(value, list): + if not PyList_Check(value): value = list(value) from pandas.core.dtypes.cast import ( construct_1d_object_array_from_listlike) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 700ba5b6e48f7..a0ac6389c0646 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -306,8 +306,8 @@ class _BaseOffset(object): def __call__(self, other): return self.apply(other) - def __mul__(self, someInt): - return self.__class__(n=someInt * self.n, normalize=self.normalize, + def __mul__(self, other): + return self.__class__(n=other * self.n, normalize=self.normalize, **self.kwds) def __neg__(self): @@ -374,8 +374,8 @@ class _BaseOffset(object): class BaseOffset(_BaseOffset): # Here we add __rfoo__ methods that don't play well with cdef classes - def __rmul__(self, someInt): - return self.__mul__(someInt) + def __rmul__(self, other): + return self.__mul__(other) def __radd__(self, other): return self.__add__(other) @@ -840,6 +840,8 @@ cpdef int roll_qtrday(datetime other, int n, int month, object day_opt, ------- n : int number of periods to increment """ + cdef: + int months_since # TODO: Merge this with roll_yearday by setting modby=12 there? # code de-duplication versus perf hit? # TODO: with small adjustments this could be used in shift_quarters diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 3ca150cda83c7..9463512ac11de 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1936,10 +1936,6 @@ def _convert_key(self, key, is_setter=False): return key -# 32-bit floating point machine epsilon -_eps = 1.1920929e-07 - - def length_of_indexer(indexer, target=None): """return the length of a single non-tuple indexer which could be a slice """ @@ -1992,19 +1988,6 @@ def convert_to_index_sliceable(obj, key): return None -def is_index_slice(obj): - def _is_valid_index(x): - return (is_integer(x) or is_float(x) and - np.allclose(x, int(x), rtol=_eps, atol=0)) - - def _crit(v): - return v is None or _is_valid_index(v) - - both_none = obj.start is None and obj.stop is None - - return not both_none and (_crit(obj.start) and _crit(obj.stop)) - - def check_bool_indexer(ax, key): # boolean indexing, need to check that the data are aligned, otherwise # disallowed diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 4b649927f8f72..257b0791e4841 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -19,7 +19,7 @@ from pandas.core import generic import pandas.core.common as com import pandas.core.ops as ops -import pandas._libs.index as _index +import pandas._libs.index as libindex from pandas.util._decorators import Appender from pandas.core.sparse.array import ( @@ -560,7 +560,7 @@ def _set_values(self, key, value): key = key.values values = self.values.to_dense() - values[key] = _index.convert_scalar(values, value) + values[key] = libindex.convert_scalar(values, value) values = SparseArray(values, fill_value=self.fill_value, kind=self.kind) self._data = SingleBlockManager(values, self.index)
Miscellaneous cleanup, use `util.is_array` instead of `isinstance`, that kind of thing. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19360
2018-01-23T04:42:09Z
2018-01-24T01:03:40Z
2018-01-24T01:03:40Z
2018-02-11T21:59:35Z
Added cast blacklist for certain transform agg funcs
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 7f697003f44b9..71492154419fb 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -506,6 +506,7 @@ Groupby/Resample/Rolling - Fixed regression in :func:`DataFrame.groupby` which would not emit an error when called with a tuple key not in the index (:issue:`18798`) - Bug in :func:`DataFrame.resample` which silently ignored unsupported (or mistyped) options for ``label``, ``closed`` and ``convention`` (:issue:`19303`) - Bug in :func:`DataFrame.groupby` where tuples were interpreted as lists of keys rather than as keys (:issue:`17979`, :issue:`18249`) +- Bug in ``transform`` where particular aggregation functions were being incorrectly cast to match the dtype(s) of the grouped data (:issue:`19200`) - Sparse diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index fc7a0faef0cf6..2c1deb9db7bba 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -345,6 +345,8 @@ _cython_transforms = frozenset(['cumprod', 'cumsum', 'shift', 'cummin', 'cummax']) +_cython_cast_blacklist = frozenset(['rank', 'count', 'size']) + class Grouper(object): """ @@ -965,6 +967,21 @@ def _try_cast(self, result, obj, numeric_only=False): return result + def _transform_should_cast(self, func_nm): + """ + Parameters: + ----------- + func_nm: str + The name of the aggregation function being performed + + Returns: + -------- + bool + Whether transform should attempt to cast the result of aggregation + """ + return (self.size().fillna(0) > 0).any() and (func_nm not in + _cython_cast_blacklist) + def _cython_transform(self, how, numeric_only=True): output = collections.OrderedDict() for name, obj in self._iterate_slices(): @@ -3333,7 +3350,7 @@ def transform(self, func, *args, **kwargs): else: # cythonized aggregation and merge return self._transform_fast( - lambda: getattr(self, func)(*args, **kwargs)) + lambda: getattr(self, func)(*args, **kwargs), func) # reg transform klass = self._selected_obj.__class__ @@ -3364,7 +3381,7 @@ def transform(self, func, *args, **kwargs): result.index = self._selected_obj.index return result - def _transform_fast(self, func): + def _transform_fast(self, func, func_nm): """ fast version of transform, only applicable to builtin/cythonizable functions @@ -3373,7 +3390,7 @@ def _transform_fast(self, func): func = getattr(self, func) ids, _, ngroup = self.grouper.group_info - cast = (self.size().fillna(0) > 0).any() + cast = self._transform_should_cast(func_nm) out = algorithms.take_1d(func().values, ids) if cast: out = self._try_cast(out, self.obj) @@ -4127,15 +4144,15 @@ def transform(self, func, *args, **kwargs): if not result.columns.equals(obj.columns): return self._transform_general(func, *args, **kwargs) - return self._transform_fast(result, obj) + return self._transform_fast(result, obj, func) - def _transform_fast(self, result, obj): + def _transform_fast(self, result, obj, func_nm): """ Fast transform path for aggregations """ # if there were groups with no observations (Categorical only?) # try casting data to original dtype - cast = (self.size().fillna(0) > 0).any() + cast = self._transform_should_cast(func_nm) # for each col, reshape to to size of original frame # by take operation diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 8f72da293a50c..4159d0f709a13 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -582,3 +582,28 @@ def test_transform_with_non_scalar_group(self): 'group.*', df.groupby(axis=1, level=1).transform, lambda z: z.div(z.sum(axis=1), axis=0)) + + @pytest.mark.parametrize('cols,exp,comp_func', [ + ('a', pd.Series([1, 1, 1], name='a'), tm.assert_series_equal), + (['a', 'c'], pd.DataFrame({'a': [1, 1, 1], 'c': [1, 1, 1]}), + tm.assert_frame_equal) + ]) + @pytest.mark.parametrize('agg_func', [ + 'count', 'rank', 'size']) + def test_transform_numeric_ret(self, cols, exp, comp_func, agg_func): + if agg_func == 'size' and isinstance(cols, list): + pytest.xfail("'size' transformation not supported with " + "NDFrameGroupy") + + # GH 19200 + df = pd.DataFrame( + {'a': pd.date_range('2018-01-01', periods=3), + 'b': range(3), + 'c': range(7, 10)}) + + result = df.groupby('b')[cols].transform(agg_func) + + if agg_func == 'rank': + exp = exp.astype('float') + + comp_func(result, exp)
- [X] closes #19200 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19355
2018-01-23T01:25:58Z
2018-01-23T11:00:56Z
2018-01-23T11:00:56Z
2018-01-23T17:09:45Z
ENH: add IntervalIndex.get_loc_exact to look for exact matches only
diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index 77e013e1e4fb0..3070765b3db22 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -229,6 +229,10 @@ def time_getitem_list(self, monotonic): def time_loc_list(self, monotonic): monotonic.loc[80000:] + def time_get_loc_exact(self, monotonic): + interval = monotonic.index[80000] + monotonic.index.get_loc_exact(interval) + class PanelIndexing(object): diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index acab9d0bbebf8..bc0d92ad0a385 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -323,6 +323,7 @@ Other Enhancements - ``IntervalIndex.astype`` now supports conversions between subtypes when passed an ``IntervalDtype`` (:issue:`19197`) - :class:`IntervalIndex` and its associated constructor methods (``from_arrays``, ``from_breaks``, ``from_tuples``) have gained a ``dtype`` parameter (:issue:`19262`) +- New :meth:`IntervalIndex.get_loc_exact` has been added to find exact Interval matches only (:issue:`19349`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 15df77bf772dc..2a762ef4317d0 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2536,6 +2536,21 @@ def get_loc(self, key, method=None, tolerance=None): raise KeyError(key) return loc + def get_loc_exact(self, key, method=None): + """Get integer location, slice or boolean mask for exact + matches only. + + This method dispatches to :meth:`get_loc`. The use for + ``get_loc_exact`` is mainly in :class:`IntervalIndex`, + when a exact match is needed. + + See Also + -------- + get_loc + pandas.IntervalIndex.get_loc_exact + """ + return self.get_loc(key, method=method) + def get_value(self, series, key): """ Fast lookup of value from 1-dimensional ndarray. Only use this if you diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 3bf783b5a2faa..2118afa374f90 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -899,15 +899,11 @@ def _searchsorted_monotonic(self, label, side, exclude_label=False): def _get_loc_only_exact_matches(self, key): if isinstance(key, Interval): - if not self.is_unique: raise ValueError("cannot index with a slice Interval" " and a non-unique index") - - # TODO: this expands to a tuple index, see if we can - # do better - return Index(self._multiindex.values).get_loc(key) - raise KeyError + return self.get_loc_exact(key) + raise KeyError(key) def _find_non_overlapping_monotonic_bounds(self, key): if isinstance(key, IntervalMixin): @@ -970,6 +966,10 @@ def get_loc(self, key, method=None): >>> overlapping_index = pd.IntervalIndex([i2, i3]) >>> overlapping_index.get_loc(1.5) array([0, 1], dtype=int64) + + See Also + -------- + get_loc_exact : Exact matches only """ self._check_method(method) @@ -1003,6 +1003,59 @@ def get_loc(self, key, method=None): else: return self._engine.get_loc(key) + def get_loc_exact(self, key, method=None): + """Get integer location, slice or boolean mask for exact + Interval matches only. + + Parameters + ---------- + key : Interval + The label we want to find locations for. Must have type + :class:`Interval` + method : {None}, optional + * default: matches where the label exactly matches a given + :class:`Interval`. + + Returns + ------- + loc : int if unique index, slice if monotonic index, else mask + + Raises + ------ + KeyError if key is not found + + Examples + --------- + >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2) + >>> index = pd.IntervalIndex([i1, i2]) + >>> index.get_loc_exact(i1) + 0 + + If an exact match is not found, a KeyError is raised + + >>> index.get_loc_exact(pd.Interval(0.5, 1.5)) + KeyError: Interval(0.5, 1.5, closed='right') + + If a label is in several locations, you get all the relevant + locations. + + >>> index = pd.IntervalIndex([i1, i2, i1]) + >>> index.get_loc_exact(i1) + array([0, 2], dtype=int64) + + See Also + -------- + get_loc + """ + + all_matches = self.get_loc(key, method=method) + exact_matches = self[all_matches] == key + if np.all(exact_matches): + return all_matches + elif np.any(exact_matches): + return all_matches[exact_matches] + raise KeyError(key) + def get_value(self, series, key): if com.is_bool_indexer(key): loc = key diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 71a6f78125004..77181c39aa665 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -497,6 +497,24 @@ def test_get_loc_interval(self): pytest.raises(KeyError, self.index.get_loc, Interval(-1, 0, 'left')) + def test_get_loc_exact(self): + # GH19353 + assert self.index.get_loc_exact(Interval(0, 1)) == 0 + with pytest.raises(KeyError): + self.index.get_loc_exact(1) + with pytest.raises(KeyError): + self.index.get_loc_exact(Interval(0, 1, 'left')) + with pytest.raises(KeyError): + self.index.get_loc_exact(Interval(0, 0.5)) + with pytest.raises(KeyError): + self.index.get_loc_exact(Interval(2, 3)) + with pytest.raises(KeyError): + self.index.get_loc_exact(Interval(-1, 0, 'left')) + + # The below tests if get_loc_exact interferes with caching + # used for index.get_loc. See #19353#issuecomment-364295029 + assert self.index.get_loc(Interval(0, 1)) == 0 + # To be removed, replaced by test_interval_new.py (see #16316, #16386) def test_get_indexer(self): actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
- [x] closes #19349 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This PR add a 'exact' option to the parameter ``method`` on ``IntervalIndex.get_loc`` for accepting exact matches only. In addition to it's direct advantage, it will later make it possible to make a PR to allow ``IntervalIndex.get_indexer`` calls with the other index having non-Interval values, and hence allow mixing mix Interval with non-intervals in ``pd.Index(..., dtype=object)`` instances. This would fix the remaining issue with #19021.
https://api.github.com/repos/pandas-dev/pandas/pulls/19353
2018-01-22T21:18:53Z
2018-05-18T18:09:19Z
null
2018-05-18T18:09:19Z
TST: Clean up pickle compression tests
diff --git a/pandas/conftest.py b/pandas/conftest.py index 4cf5c9da44697..4fe66d4cf7e1f 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -4,6 +4,7 @@ import numpy import pandas import dateutil +import pandas.util._test_decorators as td def pytest_addoption(parser): @@ -73,3 +74,22 @@ def ip(): is_dateutil_gt_261 = pytest.mark.skipif( LooseVersion(dateutil.__version__) <= LooseVersion('2.6.1'), reason="dateutil stable version") + + +@pytest.fixture(params=[None, 'gzip', 'bz2', 'zip', + pytest.param('xz', marks=td.skip_if_no_lzma)]) +def compression(request): + """ + Fixture for trying common compression types in compression tests + """ + return request.param + + +@pytest.fixture(params=[None, 'gzip', 'bz2', + pytest.param('xz', marks=td.skip_if_no_lzma)]) +def compression_no_zip(request): + """ + Fixture for trying common compression types in compression tests + except zip + """ + return request.param diff --git a/pandas/tests/conftest.py b/pandas/tests/conftest.py deleted file mode 100644 index 8f5d963927f60..0000000000000 --- a/pandas/tests/conftest.py +++ /dev/null @@ -1,11 +0,0 @@ -import pytest -import pandas.util._test_decorators as td - - -@pytest.fixture(params=[None, 'gzip', 'bz2', - pytest.param('xz', marks=td.skip_if_no_lzma)]) -def compression(request): - """ - Fixture for trying common compression types in compression tests - """ - return request.param diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index d89d57947bde2..a3ba34ae92283 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -919,7 +919,7 @@ def test_to_csv_path_is_none(self): recons = pd.read_csv(StringIO(csv_str), index_col=0) assert_frame_equal(self.frame, recons) - def test_to_csv_compression(self, compression): + def test_to_csv_compression(self, compression_no_zip): df = DataFrame([[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], @@ -927,19 +927,20 @@ def test_to_csv_compression(self, compression): with ensure_clean() as filename: - df.to_csv(filename, compression=compression) + df.to_csv(filename, compression=compression_no_zip) # test the round trip - to_csv -> read_csv - rs = read_csv(filename, compression=compression, index_col=0) + rs = read_csv(filename, compression=compression_no_zip, + index_col=0) assert_frame_equal(df, rs) # explicitly make sure file is compressed - with tm.decompress_file(filename, compression) as fh: + with tm.decompress_file(filename, compression_no_zip) as fh: text = fh.read().decode('utf8') for col in df.columns: assert col in text - with tm.decompress_file(filename, compression) as fh: + with tm.decompress_file(filename, compression_no_zip) as fh: assert_frame_equal(df, read_csv(fh, index_col=0)) def test_to_csv_compression_value_error(self): diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index 2cf4c435bdc12..08335293f9292 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -5,17 +5,18 @@ from pandas.util.testing import assert_frame_equal, assert_raises_regex -def test_compression_roundtrip(compression): +def test_compression_roundtrip(compression_no_zip): df = pd.DataFrame([[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], index=['A', 'B'], columns=['X', 'Y', 'Z']) with tm.ensure_clean() as path: - df.to_json(path, compression=compression) - assert_frame_equal(df, pd.read_json(path, compression=compression)) + df.to_json(path, compression=compression_no_zip) + assert_frame_equal(df, pd.read_json(path, + compression=compression_no_zip)) # explicitly ensure file was compressed. - with tm.decompress_file(path, compression) as fh: + with tm.decompress_file(path, compression_no_zip) as fh: result = fh.read().decode('utf8') assert_frame_equal(df, pd.read_json(result)) @@ -40,7 +41,7 @@ def test_read_zipped_json(): assert_frame_equal(uncompressed_df, compressed_df) -def test_with_s3_url(compression): +def test_with_s3_url(compression_no_zip): boto3 = pytest.importorskip('boto3') pytest.importorskip('s3fs') moto = pytest.importorskip('moto') @@ -51,31 +52,36 @@ def test_with_s3_url(compression): bucket = conn.create_bucket(Bucket="pandas-test") with tm.ensure_clean() as path: - df.to_json(path, compression=compression) + df.to_json(path, compression=compression_no_zip) with open(path, 'rb') as f: bucket.put_object(Key='test-1', Body=f) roundtripped_df = pd.read_json('s3://pandas-test/test-1', - compression=compression) + compression=compression_no_zip) assert_frame_equal(df, roundtripped_df) -def test_lines_with_compression(compression): +def test_lines_with_compression(compression_no_zip): + with tm.ensure_clean() as path: df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}') - df.to_json(path, orient='records', lines=True, compression=compression) + df.to_json(path, orient='records', lines=True, + compression=compression_no_zip) roundtripped_df = pd.read_json(path, lines=True, - compression=compression) + compression=compression_no_zip) assert_frame_equal(df, roundtripped_df) -def test_chunksize_with_compression(compression): +def test_chunksize_with_compression(compression_no_zip): + with tm.ensure_clean() as path: df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}') - df.to_json(path, orient='records', lines=True, compression=compression) + df.to_json(path, orient='records', lines=True, + compression=compression_no_zip) - roundtripped_df = pd.concat(pd.read_json(path, lines=True, chunksize=1, - compression=compression)) + res = pd.read_json(path, lines=True, chunksize=1, + compression=compression_no_zip) + roundtripped_df = pd.concat(res) assert_frame_equal(df, roundtripped_df) diff --git a/pandas/tests/io/test_pickle.py b/pandas/tests/io/test_pickle.py index 5d2ba8e4fa712..2ba3e174404c7 100644 --- a/pandas/tests/io/test_pickle.py +++ b/pandas/tests/io/test_pickle.py @@ -352,42 +352,7 @@ def compress_file(self, src_path, dest_path, compression): f.write(fh.read()) f.close() - def decompress_file(self, src_path, dest_path, compression): - if compression is None: - shutil.copyfile(src_path, dest_path) - return - - if compression == 'gzip': - import gzip - f = gzip.open(src_path, "r") - elif compression == 'bz2': - import bz2 - f = bz2.BZ2File(src_path, "r") - elif compression == 'zip': - import zipfile - zip_file = zipfile.ZipFile(src_path) - zip_names = zip_file.namelist() - if len(zip_names) == 1: - f = zip_file.open(zip_names.pop()) - else: - raise ValueError('ZIP file {} error. Only one file per ZIP.' - .format(src_path)) - elif compression == 'xz': - lzma = pandas.compat.import_lzma() - f = lzma.LZMAFile(src_path, "r") - else: - msg = 'Unrecognized compression type: {}'.format(compression) - raise ValueError(msg) - - with open(dest_path, "wb") as fh: - fh.write(f.read()) - f.close() - - @pytest.mark.parametrize('compression', [ - None, 'gzip', 'bz2', - pytest.param('xz', marks=td.skip_if_no_lzma) # issue 11666 - ]) - def test_write_explicit(self, compression, get_random_path): + def test_write_explicit(self, compression_no_zip, get_random_path): base = get_random_path path1 = base + ".compressed" path2 = base + ".raw" @@ -396,10 +361,12 @@ def test_write_explicit(self, compression, get_random_path): df = tm.makeDataFrame() # write to compressed file - df.to_pickle(p1, compression=compression) + df.to_pickle(p1, compression=compression_no_zip) # decompress - self.decompress_file(p1, p2, compression=compression) + with tm.decompress_file(p1, compression=compression_no_zip) as f: + with open(p2, "wb") as fh: + fh.write(f.read()) # read decompressed file df2 = pd.read_pickle(p2, compression=None) @@ -435,17 +402,15 @@ def test_write_infer(self, ext, get_random_path): df.to_pickle(p1) # decompress - self.decompress_file(p1, p2, compression=compression) + with tm.decompress_file(p1, compression=compression) as f: + with open(p2, "wb") as fh: + fh.write(f.read()) # read decompressed file df2 = pd.read_pickle(p2, compression=None) tm.assert_frame_equal(df, df2) - @pytest.mark.parametrize('compression', [ - None, 'gzip', 'bz2', "zip", - pytest.param('xz', marks=td.skip_if_no_lzma) - ]) def test_read_explicit(self, compression, get_random_path): base = get_random_path path1 = base + ".raw" diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index ec26716f79446..62d1372525cc8 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -138,28 +138,29 @@ def test_to_csv_path_is_none(self): csv_str = s.to_csv(path=None) assert isinstance(csv_str, str) - def test_to_csv_compression(self, compression): + def test_to_csv_compression(self, compression_no_zip): s = Series([0.123456, 0.234567, 0.567567], index=['A', 'B', 'C'], name='X') with ensure_clean() as filename: - s.to_csv(filename, compression=compression, header=True) + s.to_csv(filename, compression=compression_no_zip, header=True) # test the round trip - to_csv -> read_csv - rs = pd.read_csv(filename, compression=compression, index_col=0, - squeeze=True) + rs = pd.read_csv(filename, compression=compression_no_zip, + index_col=0, squeeze=True) assert_series_equal(s, rs) # explicitly ensure file was compressed - with tm.decompress_file(filename, compression=compression) as fh: + with tm.decompress_file(filename, compression_no_zip) as fh: text = fh.read().decode('utf8') assert s.name in text - with tm.decompress_file(filename, compression=compression) as fh: + with tm.decompress_file(filename, compression_no_zip) as fh: assert_series_equal(s, pd.read_csv(fh, - index_col=0, squeeze=True)) + index_col=0, + squeeze=True)) class TestSeriesIO(TestData): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 3a06f6244da14..34e634f56aec6 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -191,6 +191,15 @@ def decompress_file(path, compression): elif compression == 'xz': lzma = compat.import_lzma() f = lzma.LZMAFile(path, 'rb') + elif compression == 'zip': + import zipfile + zip_file = zipfile.ZipFile(path) + zip_names = zip_file.namelist() + if len(zip_names) == 1: + f = zip_file.open(zip_names.pop()) + else: + raise ValueError('ZIP file {} error. Only one file per ZIP.' + .format(path)) else: msg = 'Unrecognized compression type: {}'.format(compression) raise ValueError(msg)
xref #19226 Move compression fixture to top-level ``conftest.py`` Clean-up compression tests in ``pandas/tests/io/test_pickle.py`` and add zip to the ``decompress_file`` testing utility Make some minor adjustments to the json and csv compression tests to skip zip compression when it's not valid
https://api.github.com/repos/pandas-dev/pandas/pulls/19350
2018-01-22T20:37:09Z
2018-01-24T10:06:44Z
2018-01-24T10:06:44Z
2018-01-24T10:06:52Z
fix and test index division by zero
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 26a7a78bb5c55..2b3dec3efaa64 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -204,6 +204,50 @@ Please note that the string `index` is not supported with the round trip format, new_df print(new_df.index.name) +.. _whatsnew_0230.enhancements.index_division_by_zero + +Index Division By Zero Fills Correctly +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Division operations on ``Index`` and subclasses will now fill division of positive numbers by zero with ``np.inf``, division of negative numbers by zero with ``-np.inf`` and `0 / 0` with ``np.nan``. This matches existing ``Series`` behavior. (:issue:`19322`, :issue:`19347`) + +Previous Behavior: + +.. code-block:: ipython + + In [6]: index = pd.Int64Index([-1, 0, 1]) + + In [7]: index / 0 + Out[7]: Int64Index([0, 0, 0], dtype='int64') + + # Previous behavior yielded different results depending on the type of zero in the divisor + In [8]: index / 0.0 + Out[8]: Float64Index([-inf, nan, inf], dtype='float64') + + In [9]: index = pd.UInt64Index([0, 1]) + + In [10]: index / np.array([0, 0], dtype=np.uint64) + Out[10]: UInt64Index([0, 0], dtype='uint64') + + In [11]: pd.RangeIndex(1, 5) / 0 + ZeroDivisionError: integer division or modulo by zero + +Current Behavior: + +.. ipython:: python + + index = pd.Int64Index([-1, 0, 1]) + # division by zero gives -infinity where negative, +infinity where positive, and NaN for 0 / 0 + index / 0 + + # The result of division by zero should not depend on whether the zero is int or float + index / 0.0 + + index = pd.UInt64Index([0, 1]) + index / np.array([0, 0], dtype=np.uint64) + + pd.RangeIndex(1, 5) / 0 + .. _whatsnew_0230.enhancements.other: Other Enhancements diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 626f3dc86556a..1e1bb0d49b3df 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4040,6 +4040,8 @@ def _evaluate_numeric_binop(self, other): attrs = self._maybe_update_attributes(attrs) with np.errstate(all='ignore'): result = op(values, other) + + result = missing.dispatch_missing(op, values, other, result) return constructor(result, **attrs) return _evaluate_numeric_binop diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index a82ee6b2b44af..0ed92a67c7e14 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -550,7 +550,7 @@ def __getitem__(self, key): return super_getitem(key) def __floordiv__(self, other): - if is_integer(other): + if is_integer(other) and other != 0: if (len(self) == 0 or self._start % other == 0 and self._step % other == 0): @@ -592,14 +592,15 @@ def _evaluate_numeric_binop(self, other): attrs = self._get_attributes_dict() attrs = self._maybe_update_attributes(attrs) + left, right = self, other if reversed: - self, other = other, self + left, right = right, left try: # apply if we have an override if step: with np.errstate(all='ignore'): - rstep = step(self._step, other) + rstep = step(left._step, right) # we don't have a representable op # so return a base index @@ -607,11 +608,11 @@ def _evaluate_numeric_binop(self, other): raise ValueError else: - rstep = self._step + rstep = left._step with np.errstate(all='ignore'): - rstart = op(self._start, other) - rstop = op(self._stop, other) + rstart = op(left._start, right) + rstop = op(left._stop, right) result = RangeIndex(rstart, rstop, @@ -627,18 +628,12 @@ def _evaluate_numeric_binop(self, other): return result - except (ValueError, TypeError, AttributeError): - pass - - # convert to Int64Index ops - if isinstance(self, RangeIndex): - self = self.values - if isinstance(other, RangeIndex): - other = other.values - - with np.errstate(all='ignore'): - results = op(self, other) - return Index(results, **attrs) + except (ValueError, TypeError, AttributeError, + ZeroDivisionError): + # Defer to Int64Index implementation + if reversed: + return op(other, self._int64index) + return op(self._int64index, other) return _evaluate_numeric_binop diff --git a/pandas/core/missing.py b/pandas/core/missing.py index 2eccc5777bca6..31c489e2f8941 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -1,6 +1,7 @@ """ Routines for filling missing data """ +import operator import numpy as np from distutils.version import LooseVersion @@ -650,6 +651,87 @@ def fill_zeros(result, x, y, name, fill): return result +def mask_zero_div_zero(x, y, result, copy=False): + """ + Set results of 0 / 0 or 0 // 0 to np.nan, regardless of the dtypes + of the numerator or the denominator. + + Parameters + ---------- + x : ndarray + y : ndarray + result : ndarray + copy : bool (default False) + Whether to always create a new array or try to fill in the existing + array if possible. + + Returns + ------- + filled_result : ndarray + + Examples + -------- + >>> x = np.array([1, 0, -1], dtype=np.int64) + >>> y = 0 # int 0; numpy behavior is different with float + >>> result = x / y + >>> result # raw numpy result does not fill division by zero + array([0, 0, 0]) + >>> mask_zero_div_zero(x, y, result) + array([ inf, nan, -inf]) + """ + if is_scalar(y): + y = np.array(y) + + zmask = y == 0 + if zmask.any(): + shape = result.shape + + nan_mask = (zmask & (x == 0)).ravel() + neginf_mask = (zmask & (x < 0)).ravel() + posinf_mask = (zmask & (x > 0)).ravel() + + if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): + # Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN + result = result.astype('float64', copy=copy).ravel() + + np.putmask(result, nan_mask, np.nan) + np.putmask(result, posinf_mask, np.inf) + np.putmask(result, neginf_mask, -np.inf) + + result = result.reshape(shape) + + return result + + +def dispatch_missing(op, left, right, result): + """ + Fill nulls caused by division by zero, casting to a diffferent dtype + if necessary. + + Parameters + ---------- + op : function (operator.add, operator.div, ...) + left : object (Index for non-reversed ops) + right : object (Index fof reversed ops) + result : ndarray + + Returns + ------- + result : ndarray + """ + opstr = '__{opname}__'.format(opname=op.__name__).replace('____', '__') + if op in [operator.truediv, operator.floordiv, + getattr(operator, 'div', None)]: + result = mask_zero_div_zero(left, right, result) + elif op is operator.mod: + result = fill_zeros(result, left, right, opstr, np.nan) + elif op is divmod: + res0 = mask_zero_div_zero(left, right, result[0]) + res1 = fill_zeros(result[1], left, right, opstr, np.nan) + result = (res0, res1) + return result + + def _interp_limit(invalid, fw_limit, bw_limit): """ Get indexers of values that won't be filled diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py index 217ee07affa84..6d88ef0cfa6c5 100644 --- a/pandas/tests/indexes/conftest.py +++ b/pandas/tests/indexes/conftest.py @@ -1,9 +1,10 @@ import pytest import numpy as np +import pandas as pd import pandas.util.testing as tm from pandas.core.indexes.api import Index, MultiIndex -from pandas.compat import lzip +from pandas.compat import lzip, long @pytest.fixture(params=[tm.makeUnicodeIndex(100), @@ -29,3 +30,18 @@ def indices(request): def one(request): # zero-dim integer array behaves like an integer return request.param + + +zeros = [box([0] * 5, dtype=dtype) + for box in [pd.Index, np.array] + for dtype in [np.int64, np.uint64, np.float64]] +zeros.extend([np.array(0, dtype=dtype) + for dtype in [np.int64, np.uint64, np.float64]]) +zeros.extend([0, 0.0, long(0)]) + + +@pytest.fixture(params=zeros) +def zero(request): + # For testing division by (or of) zero for Index with length 5, this + # gives several scalar-zeros and length-5 vector-zeros + return request.param diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index 0c1bec7a6f1a9..c6883df7ee91a 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -157,6 +157,48 @@ def test_divmod_series(self): for r, e in zip(result, expected): tm.assert_series_equal(r, e) + def test_div_zero(self, zero): + idx = self.create_index() + + expected = Index([np.nan, np.inf, np.inf, np.inf, np.inf], + dtype=np.float64) + result = idx / zero + tm.assert_index_equal(result, expected) + ser_compat = Series(idx).astype('i8') / np.array(zero).astype('i8') + tm.assert_series_equal(ser_compat, Series(result)) + + def test_floordiv_zero(self, zero): + idx = self.create_index() + expected = Index([np.nan, np.inf, np.inf, np.inf, np.inf], + dtype=np.float64) + + result = idx // zero + tm.assert_index_equal(result, expected) + ser_compat = Series(idx).astype('i8') // np.array(zero).astype('i8') + tm.assert_series_equal(ser_compat, Series(result)) + + def test_mod_zero(self, zero): + idx = self.create_index() + + expected = Index([np.nan, np.nan, np.nan, np.nan, np.nan], + dtype=np.float64) + result = idx % zero + tm.assert_index_equal(result, expected) + ser_compat = Series(idx).astype('i8') % np.array(zero).astype('i8') + tm.assert_series_equal(ser_compat, Series(result)) + + def test_divmod_zero(self, zero): + idx = self.create_index() + + exleft = Index([np.nan, np.inf, np.inf, np.inf, np.inf], + dtype=np.float64) + exright = Index([np.nan, np.nan, np.nan, np.nan, np.nan], + dtype=np.float64) + + result = divmod(idx, zero) + tm.assert_index_equal(result[0], exleft) + tm.assert_index_equal(result[1], exright) + def test_explicit_conversions(self): # GH 8608
Related: #19336, this implements the most important parts of the parametrization requested there. - [ ] closes #xxxx - [x] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19347
2018-01-22T17:42:33Z
2018-02-06T01:30:30Z
null
2018-02-06T04:44:39Z
clarify redirection in ops
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index e0aa0a4a415e1..3db2dd849ccee 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -41,6 +41,297 @@ ABCIndex, ABCPeriodIndex) + +def _gen_eval_kwargs(name): + """ + Find the keyword arguments to pass to numexpr for the given operation. + + Parameters + ---------- + name : str + + Returns + ------- + eval_kwargs : dict + + Examples + -------- + >>> _gen_eval_kwargs("__add__") + {} + + >>> _gen_eval_kwargs("rtruediv") + {"reversed": True, "truediv": True} + """ + kwargs = {} + + # Series and Panel appear to only pass __add__, __radd__, ... + # but DataFrame gets both these dunder names _and_ non-dunder names + # add, radd, ... + name = name.replace('__', '') + + if name.startswith('r'): + if name not in ['radd', 'rand', 'ror', 'rxor']: + # Exclude commutative operations + kwargs['reversed'] = True + + if name in ['truediv', 'rtruediv']: + kwargs['truediv'] = True + + if name in ['ne']: + kwargs['masker'] = True + + return kwargs + + +def _gen_fill_zeros(name): + """ + Find the appropriate fill value to use when filling in undefined values + in the results of the given operation caused by operating on + (generally dividing by) zero. + + Parameters + ---------- + name : str + + Returns + ------- + fill_value : {None, np.nan, np.inf} + """ + name = name.strip('__') + if 'div' in name: + # truediv, floordiv, div, and reversed variants + fill_value = np.inf + elif 'mod' in name: + # mod, rmod + fill_value = np.nan + else: + fill_value = None + return fill_value + + +# ----------------------------------------------------------------------------- +# Docstring Generation and Templates + +_op_descriptions = { + 'add': {'op': '+', + 'desc': 'Addition', + 'reversed': False, + 'reverse': 'radd'}, + 'sub': {'op': '-', + 'desc': 'Subtraction', + 'reversed': False, + 'reverse': 'rsub'}, + 'mul': {'op': '*', + 'desc': 'Multiplication', + 'reversed': False, + 'reverse': 'rmul'}, + 'mod': {'op': '%', + 'desc': 'Modulo', + 'reversed': False, + 'reverse': 'rmod'}, + 'pow': {'op': '**', + 'desc': 'Exponential power', + 'reversed': False, + 'reverse': 'rpow'}, + 'truediv': {'op': '/', + 'desc': 'Floating division', + 'reversed': False, + 'reverse': 'rtruediv'}, + 'floordiv': {'op': '//', + 'desc': 'Integer division', + 'reversed': False, + 'reverse': 'rfloordiv'}, + 'divmod': {'op': 'divmod', + 'desc': 'Integer division and modulo', + 'reversed': False, + 'reverse': None}, + + 'eq': {'op': '==', + 'desc': 'Equal to', + 'reversed': False, + 'reverse': None}, + 'ne': {'op': '!=', + 'desc': 'Not equal to', + 'reversed': False, + 'reverse': None}, + 'lt': {'op': '<', + 'desc': 'Less than', + 'reversed': False, + 'reverse': None}, + 'le': {'op': '<=', + 'desc': 'Less than or equal to', + 'reversed': False, + 'reverse': None}, + 'gt': {'op': '>', + 'desc': 'Greater than', + 'reversed': False, + 'reverse': None}, + 'ge': {'op': '>=', + 'desc': 'Greater than or equal to', + 'reversed': False, + 'reverse': None}} + +_op_names = list(_op_descriptions.keys()) +for key in _op_names: + reverse_op = _op_descriptions[key]['reverse'] + if reverse_op is not None: + _op_descriptions[reverse_op] = _op_descriptions[key].copy() + _op_descriptions[reverse_op]['reversed'] = True + _op_descriptions[reverse_op]['reverse'] = key + +_flex_doc_SERIES = """ +{desc} of series and other, element-wise (binary operator `{op_name}`). + +Equivalent to ``{equiv}``, but with support to substitute a fill_value for +missing data in one of the inputs. + +Parameters +---------- +other : Series or scalar value +fill_value : None or float value, default None (NaN) + Fill missing (NaN) values with this value. If both Series are + missing, the result will be missing +level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level + +Returns +------- +result : Series + +See also +-------- +Series.{reverse} +""" + +_arith_doc_FRAME = """ +Binary operator %s with support to substitute a fill_value for missing data in +one of the inputs + +Parameters +---------- +other : Series, DataFrame, or constant +axis : {0, 1, 'index', 'columns'} + For Series input, axis to match Series index on +fill_value : None or float value, default None + Fill missing (NaN) values with this value. If both DataFrame locations are + missing, the result will be missing +level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level + +Notes +----- +Mismatched indices will be unioned together + +Returns +------- +result : DataFrame +""" + +_flex_doc_FRAME = """ +{desc} of dataframe and other, element-wise (binary operator `{op_name}`). + +Equivalent to ``{equiv}``, but with support to substitute a fill_value for +missing data in one of the inputs. + +Parameters +---------- +other : Series, DataFrame, or constant +axis : {{0, 1, 'index', 'columns'}} + For Series input, axis to match Series index on +fill_value : None or float value, default None + Fill missing (NaN) values with this value. If both DataFrame + locations are missing, the result will be missing +level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level + +Notes +----- +Mismatched indices will be unioned together + +Returns +------- +result : DataFrame + +See also +-------- +DataFrame.{reverse} +""" + +_flex_doc_PANEL = """ +{desc} of series and other, element-wise (binary operator `{op_name}`). +Equivalent to ``{equiv}``. + +Parameters +---------- +other : DataFrame or Panel +axis : {{items, major_axis, minor_axis}} + Axis to broadcast over + +Returns +------- +Panel + +See also +-------- +Panel.{reverse} +""" + + +_agg_doc_PANEL = """ +Wrapper method for {wrp_method} + +Parameters +---------- +other : {construct} or {cls_name} +axis : {{{axis_order}}} + Axis to broadcast over + +Returns +------- +{cls_name} +""" + + +def _make_flex_doc(op_name, typ): + """ + Make the appropriate substitutions for the given operation and class-typ + into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring + to attach to a generated method. + + Parameters + ---------- + op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...} + typ : str {series, 'dataframe']} + + Returns + ------- + doc : str + """ + op_name = op_name.replace('__', '') + op_desc = _op_descriptions[op_name] + + if op_desc['reversed']: + equiv = 'other ' + op_desc['op'] + ' ' + typ + else: + equiv = typ + ' ' + op_desc['op'] + ' other' + + if typ == 'series': + base_doc = _flex_doc_SERIES + elif typ == 'dataframe': + base_doc = _flex_doc_FRAME + elif typ == 'panel': + base_doc = _flex_doc_PANEL + else: + raise AssertionError('Invalid typ argument.') + + doc = base_doc.format(desc=op_desc['desc'], op_name=op_name, + equiv=equiv, reverse=op_desc['reverse']) + return doc + + # ----------------------------------------------------------------------------- # Functions that add arithmetic methods to objects, given arithmetic factory # methods @@ -82,35 +373,31 @@ def names(x): mul=arith_method(operator.mul, names('mul'), op('*'), default_axis=default_axis), truediv=arith_method(operator.truediv, names('truediv'), op('/'), - truediv=True, fill_zeros=np.inf, default_axis=default_axis), floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'), - default_axis=default_axis, fill_zeros=np.inf), + default_axis=default_axis), # Causes a floating point exception in the tests when numexpr enabled, # so for now no speedup mod=arith_method(operator.mod, names('mod'), None, - default_axis=default_axis, fill_zeros=np.nan), + default_axis=default_axis), pow=arith_method(operator.pow, names('pow'), op('**'), default_axis=default_axis), # not entirely sure why this is necessary, but previously was included # so it's here to maintain compatibility rmul=arith_method(operator.mul, names('rmul'), op('*'), - default_axis=default_axis, reversed=True), + default_axis=default_axis), rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'), - default_axis=default_axis, reversed=True), + default_axis=default_axis), rtruediv=arith_method(lambda x, y: operator.truediv(y, x), - names('rtruediv'), op('/'), truediv=True, - fill_zeros=np.inf, default_axis=default_axis, - reversed=True), + names('rtruediv'), op('/'), + default_axis=default_axis), rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x), names('rfloordiv'), op('//'), - default_axis=default_axis, fill_zeros=np.inf, - reversed=True), + default_axis=default_axis), rpow=arith_method(lambda x, y: y**x, names('rpow'), op('**'), - default_axis=default_axis, reversed=True), + default_axis=default_axis), rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'), - default_axis=default_axis, fill_zeros=np.nan, - reversed=True),) + default_axis=default_axis)) # yapf: enable new_methods['div'] = new_methods['truediv'] new_methods['rdiv'] = new_methods['rtruediv'] @@ -119,11 +406,11 @@ def names(x): if comp_method: new_methods.update(dict( eq=comp_method(operator.eq, names('eq'), op('==')), - ne=comp_method(operator.ne, names('ne'), op('!='), masker=True), + ne=comp_method(operator.ne, names('ne'), op('!=')), lt=comp_method(operator.lt, names('lt'), op('<')), gt=comp_method(operator.gt, names('gt'), op('>')), le=comp_method(operator.le, names('le'), op('<=')), - ge=comp_method(operator.ge, names('ge'), op('>=')), )) + ge=comp_method(operator.ge, names('ge'), op('>=')))) if bool_method: new_methods.update( dict(and_=bool_method(operator.and_, names('and_'), op('&')), @@ -138,13 +425,10 @@ def names(x): names('rxor'), op('^')))) if have_divmod: # divmod doesn't have an op that is supported by numexpr - new_methods['divmod'] = arith_method( - divmod, - names('divmod'), - None, - default_axis=default_axis, - construct_result=_construct_divmod_result, - ) + new_methods['divmod'] = arith_method(divmod, + names('divmod'), + None, + default_axis=default_axis) new_methods = {names(k): v for k, v in new_methods.items()} return new_methods @@ -170,7 +454,7 @@ def add_special_arithmetic_methods(cls, arith_method=None, ---------- arith_method : function (optional) factory for special arithmetic methods, with op string: - f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs) + f(op, name, str_rep, default_axis=None) comp_method : function (optional) factory for rich comparison - signature: f(op, name, str_rep) bool_method : function (optional) @@ -242,7 +526,7 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, ---------- flex_arith_method : function factory for special arithmetic methods, with op string: - f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs) + f(op, name, str_rep, default_axis=None) flex_comp_method : function, optional, factory for rich comparison - signature: f(op, name, str_rep) use_numexpr : bool, default True @@ -267,6 +551,9 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, add_methods(cls, new_methods=new_methods, force=force) +# ----------------------------------------------------------------------------- +# Series + def _align_method_SERIES(left, right, align_asobject=False): """ align lhs and rhs Series """ @@ -310,12 +597,16 @@ def _construct_divmod_result(left, result, index, name, dtype): ) -def _arith_method_SERIES(op, name, str_rep, fill_zeros=None, default_axis=None, - construct_result=_construct_result, **eval_kwargs): +def _arith_method_SERIES(op, name, str_rep, default_axis=None): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ + eval_kwargs = _gen_eval_kwargs(name) + fill_zeros = _gen_fill_zeros(name) + construct_result = (_construct_divmod_result + if op is divmod else _construct_result) + def na_op(x, y): import pandas.core.computation.expressions as expressions @@ -448,11 +739,12 @@ def _comp_method_OBJECT_ARRAY(op, x, y): return result -def _comp_method_SERIES(op, name, str_rep, masker=False): +def _comp_method_SERIES(op, name, str_rep): """ Wrapper function for Series arithmetic operations, to avoid code duplication. """ + masker = _gen_eval_kwargs(name).get('masker', False) def na_op(x, y): @@ -641,109 +933,8 @@ def wrapper(self, other): return wrapper -_op_descriptions = {'add': {'op': '+', - 'desc': 'Addition', - 'reversed': False, - 'reverse': 'radd'}, - 'sub': {'op': '-', - 'desc': 'Subtraction', - 'reversed': False, - 'reverse': 'rsub'}, - 'mul': {'op': '*', - 'desc': 'Multiplication', - 'reversed': False, - 'reverse': 'rmul'}, - 'mod': {'op': '%', - 'desc': 'Modulo', - 'reversed': False, - 'reverse': 'rmod'}, - 'pow': {'op': '**', - 'desc': 'Exponential power', - 'reversed': False, - 'reverse': 'rpow'}, - 'truediv': {'op': '/', - 'desc': 'Floating division', - 'reversed': False, - 'reverse': 'rtruediv'}, - 'floordiv': {'op': '//', - 'desc': 'Integer division', - 'reversed': False, - 'reverse': 'rfloordiv'}, - 'divmod': {'op': 'divmod', - 'desc': 'Integer division and modulo', - 'reversed': False, - 'reverse': None}, - - 'eq': {'op': '==', - 'desc': 'Equal to', - 'reversed': False, - 'reverse': None}, - 'ne': {'op': '!=', - 'desc': 'Not equal to', - 'reversed': False, - 'reverse': None}, - 'lt': {'op': '<', - 'desc': 'Less than', - 'reversed': False, - 'reverse': None}, - 'le': {'op': '<=', - 'desc': 'Less than or equal to', - 'reversed': False, - 'reverse': None}, - 'gt': {'op': '>', - 'desc': 'Greater than', - 'reversed': False, - 'reverse': None}, - 'ge': {'op': '>=', - 'desc': 'Greater than or equal to', - 'reversed': False, - 'reverse': None}} - -_op_names = list(_op_descriptions.keys()) -for k in _op_names: - reverse_op = _op_descriptions[k]['reverse'] - _op_descriptions[reverse_op] = _op_descriptions[k].copy() - _op_descriptions[reverse_op]['reversed'] = True - _op_descriptions[reverse_op]['reverse'] = k - - -_flex_doc_SERIES = """ -%s of series and other, element-wise (binary operator `%s`). - -Equivalent to ``%s``, but with support to substitute a fill_value for -missing data in one of the inputs. - -Parameters ----------- -other : Series or scalar value -fill_value : None or float value, default None (NaN) - Fill missing (NaN) values with this value. If both Series are - missing, the result will be missing -level : int or name - Broadcast across a level, matching Index values on the - passed MultiIndex level - -Returns -------- -result : Series - -See also --------- -Series.%s -""" - - -def _flex_method_SERIES(op, name, str_rep, default_axis=None, fill_zeros=None, - **eval_kwargs): - op_name = name.replace('__', '') - op_desc = _op_descriptions[op_name] - if op_desc['reversed']: - equiv = 'other ' + op_desc['op'] + ' series' - else: - equiv = 'series ' + op_desc['op'] + ' other' - - doc = _flex_doc_SERIES % (op_desc['desc'], op_name, equiv, - op_desc['reverse']) +def _flex_method_SERIES(op, name, str_rep, default_axis=None): + doc = _make_flex_doc(name, 'series') @Appender(doc) def flex_wrapper(self, other, level=None, fill_value=None, axis=0): @@ -776,62 +967,9 @@ def flex_wrapper(self, other, level=None, fill_value=None, axis=0): bool_method=_bool_method_SERIES, have_divmod=True) -_arith_doc_FRAME = """ -Binary operator %s with support to substitute a fill_value for missing data in -one of the inputs - -Parameters ----------- -other : Series, DataFrame, or constant -axis : {0, 1, 'index', 'columns'} - For Series input, axis to match Series index on -fill_value : None or float value, default None - Fill missing (NaN) values with this value. If both DataFrame locations are - missing, the result will be missing -level : int or name - Broadcast across a level, matching Index values on the - passed MultiIndex level - -Notes ------ -Mismatched indices will be unioned together - -Returns -------- -result : DataFrame -""" - -_flex_doc_FRAME = """ -%s of dataframe and other, element-wise (binary operator `%s`). - -Equivalent to ``%s``, but with support to substitute a fill_value for -missing data in one of the inputs. - -Parameters ----------- -other : Series, DataFrame, or constant -axis : {0, 1, 'index', 'columns'} - For Series input, axis to match Series index on -fill_value : None or float value, default None - Fill missing (NaN) values with this value. If both DataFrame - locations are missing, the result will be missing -level : int or name - Broadcast across a level, matching Index values on the - passed MultiIndex level - -Notes ------ -Mismatched indices will be unioned together - -Returns -------- -result : DataFrame - -See also --------- -DataFrame.%s -""" +# ----------------------------------------------------------------------------- +# DataFrame def _align_method_FRAME(left, right, axis): """ convert rhs to meet lhs dims if input is list, tuple or np.ndarray """ @@ -877,8 +1015,10 @@ def to_series(right): return right -def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns', - fill_zeros=None, **eval_kwargs): +def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns'): + eval_kwargs = _gen_eval_kwargs(name) + fill_zeros = _gen_fill_zeros(name) + def na_op(x, y): import pandas.core.computation.expressions as expressions @@ -923,15 +1063,8 @@ def na_op(x, y): return result if name in _op_descriptions: - op_name = name.replace('__', '') - op_desc = _op_descriptions[op_name] - if op_desc['reversed']: - equiv = 'other ' + op_desc['op'] + ' dataframe' - else: - equiv = 'dataframe ' + op_desc['op'] + ' other' - - doc = _flex_doc_FRAME % (op_desc['desc'], op_name, equiv, - op_desc['reverse']) + # i.e. include "add" but not "__add__" + doc = _make_flex_doc(name, 'dataframe') else: doc = _arith_doc_FRAME % name @@ -955,9 +1088,8 @@ def f(self, other, axis=default_axis, level=None, fill_value=None): return f -# Masker unused for now -def _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns', - masker=False): +def _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns'): + def na_op(x, y): try: with np.errstate(invalid='ignore'): @@ -1003,7 +1135,7 @@ def f(self, other, axis=default_axis, level=None): return f -def _comp_method_FRAME(func, name, str_rep, masker=False): +def _comp_method_FRAME(func, name, str_rep): @Appender('Wrapper for comparison method {name}'.format(name=name)) def f(self, other): if isinstance(other, ABCDataFrame): # Another DataFrame @@ -1032,8 +1164,10 @@ def f(self, other): bool_method=_arith_method_FRAME) -def _arith_method_PANEL(op, name, str_rep=None, fill_zeros=None, - default_axis=None, **eval_kwargs): +# ----------------------------------------------------------------------------- +# Panel + +def _arith_method_PANEL(op, name, str_rep=None, default_axis=None): # work only for scalars def f(self, other): @@ -1048,7 +1182,7 @@ def f(self, other): return f -def _comp_method_PANEL(op, name, str_rep=None, masker=False): +def _comp_method_PANEL(op, name, str_rep=None): def na_op(x, y): import pandas.core.computation.expressions as expressions diff --git a/pandas/core/panel.py b/pandas/core/panel.py index ae86074ce2d05..afdd9bae3006f 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1525,8 +1525,11 @@ def _extract_axis(self, data, axis=0, intersect=False): def _add_aggregate_operations(cls, use_numexpr=True): """ add the operations to the cls; evaluate the doc strings again """ - def _panel_arith_method(op, name, str_rep=None, default_axis=None, - fill_zeros=None, **eval_kwargs): + def _panel_arith_method(op, name, str_rep=None, default_axis=None): + + eval_kwargs = ops._gen_eval_kwargs(name) + fill_zeros = ops._gen_fill_zeros(name) + def na_op(x, y): import pandas.core.computation.expressions as expressions @@ -1544,50 +1547,10 @@ def na_op(x, y): return result if name in ops._op_descriptions: - op_name = name.replace('__', '') - op_desc = ops._op_descriptions[op_name] - if op_desc['reversed']: - equiv = 'other ' + op_desc['op'] + ' panel' - else: - equiv = 'panel ' + op_desc['op'] + ' other' - - _op_doc = """ -{desc} of series and other, element-wise (binary operator `{op_name}`). -Equivalent to ``{equiv}``. - -Parameters ----------- -other : {construct} or {cls_name} -axis : {{{axis_order}}} - Axis to broadcast over - -Returns -------- -{cls_name} - -See also --------- -{cls_name}.{reverse}\n""" - doc = _op_doc.format( - desc=op_desc['desc'], op_name=op_name, equiv=equiv, - construct=cls._constructor_sliced.__name__, - cls_name=cls.__name__, reverse=op_desc['reverse'], - axis_order=', '.join(cls._AXIS_ORDERS)) + doc = ops._make_flex_doc(name, 'panel') else: # doc strings substitors - _agg_doc = """ - Wrapper method for {wrp_method} - - Parameters - ---------- - other : {construct} or {cls_name} - axis : {{{axis_order}}} - Axis to broadcast over - - Returns - ------- - {cls_name}\n""" - doc = _agg_doc.format( + doc = ops._agg_doc_PANEL.format( construct=cls._constructor_sliced.__name__, cls_name=cls.__name__, wrp_method=name, axis_order=', '.join(cls._AXIS_ORDERS)) diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index 9b2650359bf68..059e399593971 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -43,8 +43,7 @@ _sparray_doc_kwargs = dict(klass='SparseArray') -def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None, - **eval_kwargs): +def _arith_method_SPARSE_ARRAY(op, name, str_rep=None, default_axis=None): """ Wrapper function for Series arithmetic operations, to avoid code duplication. @@ -864,7 +863,8 @@ def _make_index(length, indices, kind): return index -ops.add_special_arithmetic_methods(SparseArray, arith_method=_arith_method, - comp_method=_arith_method, - bool_method=_arith_method, +ops.add_special_arithmetic_methods(SparseArray, + arith_method=_arith_method_SPARSE_ARRAY, + comp_method=_arith_method_SPARSE_ARRAY, + bool_method=_arith_method_SPARSE_ARRAY, use_numexpr=False) diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 4b649927f8f72..3506284161660 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -41,13 +41,12 @@ # Wrapper function for Series arithmetic methods -def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None, - **eval_kwargs): +def _arith_method_SPARSE_SERIES(op, name, str_rep=None, default_axis=None): """ Wrapper function for Series arithmetic operations, to avoid code duplication. - str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are + str_rep and default_axis are not used, but are present for compatibility. """ @@ -864,7 +863,8 @@ def from_coo(cls, A, dense_index=False): **ops.series_flex_funcs) # overwrite basic arithmetic to use SparseSeries version # force methods to overwrite previous definitions. -ops.add_special_arithmetic_methods(SparseSeries, _arith_method, - comp_method=_arith_method, +ops.add_special_arithmetic_methods(SparseSeries, + arith_method=_arith_method_SPARSE_SERIES, + comp_method=_arith_method_SPARSE_SERIES, bool_method=None, use_numexpr=False, force=True)
core.ops involves a _lot_ of redirection. This decreases some of that redirection (and some redundancy) by implementing functions that show the logic of how some of the args/kwargs are chosen. The goal is to make it so future debugging can be done without going through `func.im_func.func_closure[1].cell_contents.func_closure...` - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19346
2018-01-22T16:53:32Z
2018-01-25T11:59:05Z
2018-01-25T11:59:05Z
2018-02-11T22:00:13Z
DOC GH19312
diff --git a/pandas/core/series.py b/pandas/core/series.py index be40f65186d2d..00ea4960f0ebf 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -490,7 +490,8 @@ def compress(self, condition, *args, **kwargs): def nonzero(self): """ - Return the indices of the elements that are non-zero + Return the integer locations of the elements in the series + that are non-zero This method is equivalent to calling `numpy.nonzero` on the series data. For compatibility with NumPy, the return value is
- [x] closes #19312 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19345
2018-01-22T15:54:49Z
2018-01-23T00:17:06Z
null
2023-05-11T01:17:13Z
LINT: Adding scripts directory to lint, and fixing flake issues on them (#18949)
diff --git a/ci/lint.sh b/ci/lint.sh index 98b33c0803d90..49bf9a690b990 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -30,6 +30,13 @@ if [ "$LINT" ]; then fi echo "Linting asv_bench/benchmarks/*.py DONE" + echo "Linting scripts/*.py" + flake8 scripts --filename=*.py + if [ $? -ne "0" ]; then + RET=1 + fi + echo "Linting scripts/*.py DONE" + echo "Linting *.pyx" flake8 pandas --filename=*.pyx --select=E501,E302,E203,E111,E114,E221,E303,E128,E231,E126,E265,E305,E301,E127,E261,E271,E129,W291,E222,E241,E123,F403 if [ $? -ne "0" ]; then diff --git a/scripts/announce.py b/scripts/announce.py old mode 100644 new mode 100755 diff --git a/scripts/api_rst_coverage.py b/scripts/api_rst_coverage.py index 28e761ef256d0..4800e80d82891 100755 --- a/scripts/api_rst_coverage.py +++ b/scripts/api_rst_coverage.py @@ -17,9 +17,11 @@ $ PYTHONPATH=.. ./api_rst_coverage.py """ -import pandas as pd -import inspect +import os import re +import inspect +import pandas as pd + def main(): # classes whose members to check @@ -61,13 +63,17 @@ def add_notes(x): # class members class_members = set() for cls in classes: - class_members.update([cls.__name__ + '.' + x[0] for x in inspect.getmembers(cls)]) + for member in inspect.getmembers(cls): + class_members.add('{cls}.{member}'.format(cls=cls.__name__, + member=member[0])) # class members referenced in api.rst api_rst_members = set() - file_name = '../doc/source/api.rst' - with open(file_name, 'r') as f: - pattern = re.compile('({})\.(\w+)'.format('|'.join(cls.__name__ for cls in classes))) + base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + api_rst_fname = os.path.join(base_path, 'doc', 'source', 'api.rst') + class_names = (cls.__name__ for cls in classes) + pattern = re.compile('({})\.(\w+)'.format('|'.join(class_names))) + with open(api_rst_fname, 'r') as f: for line in f: match = pattern.search(line) if match: @@ -75,7 +81,8 @@ def add_notes(x): print() print("Documented members in api.rst that aren't actual class members:") - for x in sorted(api_rst_members.difference(class_members), key=class_name_sort_key): + for x in sorted(api_rst_members.difference(class_members), + key=class_name_sort_key): print(x) print() @@ -86,5 +93,6 @@ def add_notes(x): if '._' not in x: print(add_notes(x)) + if __name__ == "__main__": main() diff --git a/scripts/build_dist_for_release.sh b/scripts/build_dist_for_release.sh old mode 100644 new mode 100755 diff --git a/scripts/convert_deps.py b/scripts/convert_deps.py old mode 100644 new mode 100755 diff --git a/scripts/find_commits_touching_func.py b/scripts/find_commits_touching_func.py index 0dd609417d7ba..29eb4161718ff 100755 --- a/scripts/find_commits_touching_func.py +++ b/scripts/find_commits_touching_func.py @@ -1,135 +1,148 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- - # copyright 2013, y-p @ github - -from __future__ import print_function -from pandas.compat import range, lrange, map, string_types, text_type - -"""Search the git history for all commits touching a named method +""" +Search the git history for all commits touching a named method You need the sh module to run this -WARNING: this script uses git clean -f, running it on a repo with untracked files -will probably erase them. +WARNING: this script uses git clean -f, running it on a repo with untracked +files will probably erase them. + +Usage:: + $ ./find_commits_touching_func.py (see arguments below) """ +from __future__ import print_function import logging import re import os +import argparse from collections import namedtuple -from pandas.compat import parse_date - +from pandas.compat import lrange, map, string_types, text_type, parse_date try: import sh except ImportError: - raise ImportError("The 'sh' package is required in order to run this script. ") + raise ImportError("The 'sh' package is required to run this script.") -import argparse desc = """ Find all commits touching a specified function across the codebase. """.strip() argparser = argparse.ArgumentParser(description=desc) argparser.add_argument('funcname', metavar='FUNCNAME', - help='Name of function/method to search for changes on.') + help='Name of function/method to search for changes on') argparser.add_argument('-f', '--file-masks', metavar='f_re(,f_re)*', default=["\.py.?$"], - help='comma separated list of regexes to match filenames against\n'+ - 'defaults all .py? files') + help='comma separated list of regexes to match ' + 'filenames against\ndefaults all .py? files') argparser.add_argument('-d', '--dir-masks', metavar='d_re(,d_re)*', default=[], - help='comma separated list of regexes to match base path against') + help='comma separated list of regexes to match base ' + 'path against') argparser.add_argument('-p', '--path-masks', metavar='p_re(,p_re)*', default=[], - help='comma separated list of regexes to match full file path against') + help='comma separated list of regexes to match full ' + 'file path against') argparser.add_argument('-y', '--saw-the-warning', - action='store_true',default=False, - help='must specify this to run, acknowledge you realize this will erase untracked files') + action='store_true', default=False, + help='must specify this to run, acknowledge you ' + 'realize this will erase untracked files') argparser.add_argument('--debug-level', default="CRITICAL", - help='debug level of messages (DEBUG,INFO,etc...)') - + help='debug level of messages (DEBUG, INFO, etc...)') args = argparser.parse_args() lfmt = logging.Formatter(fmt='%(levelname)-8s %(message)s', - datefmt='%m-%d %H:%M:%S' -) - + datefmt='%m-%d %H:%M:%S') shh = logging.StreamHandler() shh.setFormatter(lfmt) - -logger=logging.getLogger("findit") +logger = logging.getLogger("findit") logger.addHandler(shh) +Hit = namedtuple("Hit", "commit path") +HASH_LEN = 8 -Hit=namedtuple("Hit","commit path") -HASH_LEN=8 def clean_checkout(comm): - h,s,d = get_commit_vitals(comm) + h, s, d = get_commit_vitals(comm) if len(s) > 60: s = s[:60] + "..." - s=s.split("\n")[0] - logger.info("CO: %s %s" % (comm,s )) + s = s.split("\n")[0] + logger.info("CO: %s %s" % (comm, s)) - sh.git('checkout', comm ,_tty_out=False) + sh.git('checkout', comm, _tty_out=False) sh.git('clean', '-f') -def get_hits(defname,files=()): - cs=set() + +def get_hits(defname, files=()): + cs = set() for f in files: try: - r=sh.git('blame', '-L', '/def\s*{start}/,/def/'.format(start=defname),f,_tty_out=False) + r = sh.git('blame', + '-L', + '/def\s*{start}/,/def/'.format(start=defname), + f, + _tty_out=False) except sh.ErrorReturnCode_128: logger.debug("no matches in %s" % f) continue lines = r.strip().splitlines()[:-1] # remove comment lines - lines = [x for x in lines if not re.search("^\w+\s*\(.+\)\s*#",x)] - hits = set(map(lambda x: x.split(" ")[0],lines)) - cs.update(set(Hit(commit=c,path=f) for c in hits)) + lines = [x for x in lines if not re.search("^\w+\s*\(.+\)\s*#", x)] + hits = set(map(lambda x: x.split(" ")[0], lines)) + cs.update(set(Hit(commit=c, path=f) for c in hits)) return cs -def get_commit_info(c,fmt,sep='\t'): - r=sh.git('log', "--format={}".format(fmt), '{}^..{}'.format(c,c),"-n","1",_tty_out=False) + +def get_commit_info(c, fmt, sep='\t'): + r = sh.git('log', + "--format={}".format(fmt), + '{}^..{}'.format(c, c), + "-n", + "1", + _tty_out=False) return text_type(r).split(sep) -def get_commit_vitals(c,hlen=HASH_LEN): - h,s,d= get_commit_info(c,'%H\t%s\t%ci',"\t") - return h[:hlen],s,parse_date(d) -def file_filter(state,dirname,fnames): - if args.dir_masks and not any(re.search(x,dirname) for x in args.dir_masks): +def get_commit_vitals(c, hlen=HASH_LEN): + h, s, d = get_commit_info(c, '%H\t%s\t%ci', "\t") + return h[:hlen], s, parse_date(d) + + +def file_filter(state, dirname, fnames): + if (args.dir_masks and + not any(re.search(x, dirname) for x in args.dir_masks)): return for f in fnames: - p = os.path.abspath(os.path.join(os.path.realpath(dirname),f)) - if any(re.search(x,f) for x in args.file_masks)\ - or any(re.search(x,p) for x in args.path_masks): + p = os.path.abspath(os.path.join(os.path.realpath(dirname), f)) + if (any(re.search(x, f) for x in args.file_masks) or + any(re.search(x, p) for x in args.path_masks)): if os.path.isfile(p): state['files'].append(p) -def search(defname,head_commit="HEAD"): - HEAD,s = get_commit_vitals("HEAD")[:2] - logger.info("HEAD at %s: %s" % (HEAD,s)) + +def search(defname, head_commit="HEAD"): + HEAD, s = get_commit_vitals("HEAD")[:2] + logger.info("HEAD at %s: %s" % (HEAD, s)) done_commits = set() # allhits = set() files = [] state = dict(files=files) - os.path.walk('.',file_filter,state) + os.walk('.', file_filter, state) # files now holds a list of paths to files # seed with hits from q - allhits= set(get_hits(defname, files = files)) + allhits = set(get_hits(defname, files=files)) q = set([HEAD]) try: while q: - h=q.pop() + h = q.pop() clean_checkout(h) - hits = get_hits(defname, files = files) + hits = get_hits(defname, files=files) for x in hits: - prevc = get_commit_vitals(x.commit+"^")[0] + prevc = get_commit_vitals(x.commit + "^")[0] if prevc not in done_commits: q.add(prevc) allhits.update(hits) @@ -141,43 +154,46 @@ def search(defname,head_commit="HEAD"): clean_checkout(HEAD) return allhits + def pprint_hits(hits): - SUBJ_LEN=50 + SUBJ_LEN = 50 PATH_LEN = 20 - hits=list(hits) + hits = list(hits) max_p = 0 for hit in hits: - p=hit.path.split(os.path.realpath(os.curdir)+os.path.sep)[-1] - max_p=max(max_p,len(p)) + p = hit.path.split(os.path.realpath(os.curdir) + os.path.sep)[-1] + max_p = max(max_p, len(p)) if max_p < PATH_LEN: SUBJ_LEN += PATH_LEN - max_p PATH_LEN = max_p def sorter(i): - h,s,d=get_commit_vitals(hits[i].commit) - return hits[i].path,d + h, s, d = get_commit_vitals(hits[i].commit) + return hits[i].path, d - print("\nThese commits touched the %s method in these files on these dates:\n" \ - % args.funcname) - for i in sorted(lrange(len(hits)),key=sorter): + print(('\nThese commits touched the %s method in these files ' + 'on these dates:\n') % args.funcname) + for i in sorted(lrange(len(hits)), key=sorter): hit = hits[i] - h,s,d=get_commit_vitals(hit.commit) - p=hit.path.split(os.path.realpath(os.curdir)+os.path.sep)[-1] + h, s, d = get_commit_vitals(hit.commit) + p = hit.path.split(os.path.realpath(os.curdir) + os.path.sep)[-1] fmt = "{:%d} {:10} {:<%d} {:<%d}" % (HASH_LEN, SUBJ_LEN, PATH_LEN) if len(s) > SUBJ_LEN: - s = s[:SUBJ_LEN-5] + " ..." - print(fmt.format(h[:HASH_LEN],d.isoformat()[:10],s,p[-20:]) ) + s = s[:SUBJ_LEN - 5] + " ..." + print(fmt.format(h[:HASH_LEN], d.isoformat()[:10], s, p[-20:])) print("\n") + def main(): if not args.saw_the_warning: argparser.print_help() print(""" !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -WARNING: this script uses git clean -f, running it on a repo with untracked files. +WARNING: +this script uses git clean -f, running it on a repo with untracked files. It's recommended that you make a fresh clone and run from its root directory. You must specify the -y argument to ignore this warning. !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! @@ -190,12 +206,11 @@ def main(): if isinstance(args.dir_masks, string_types): args.dir_masks = args.dir_masks.split(',') - logger.setLevel(getattr(logging,args.debug_level)) + logger.setLevel(getattr(logging, args.debug_level)) - hits=search(args.funcname) + hits = search(args.funcname) pprint_hits(hits) - pass if __name__ == "__main__": import sys diff --git a/scripts/find_undoc_args.py b/scripts/find_undoc_args.py index 32b23a67b187f..a135c8e5171a1 100755 --- a/scripts/find_undoc_args.py +++ b/scripts/find_undoc_args.py @@ -1,126 +1,135 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +""" +Script that compares the signature arguments with the ones in the docsting +and returns the differences in plain text or GitHub task list format. +Usage:: + $ ./find_undoc_args.py (see arguments below) +""" from __future__ import print_function - +import sys from collections import namedtuple -from itertools import islice import types import os import re import argparse -#http://docs.python.org/2/library/argparse.html -# arg name is positional is not prefixed with - or -- +import inspect + parser = argparse.ArgumentParser(description='Program description.') parser.add_argument('-p', '--path', metavar='PATH', type=str, required=False, - default=None, - help='full path relative to which paths wills be reported',action='store') -parser.add_argument('-m', '--module', metavar='MODULE', type=str,required=True, - help='name of package to import and examine',action='store') -parser.add_argument('-G', '--github_repo', metavar='REPO', type=str,required=False, - help='github project where the code lives, e.g. "pandas-dev/pandas"', - default=None,action='store') - + default=None, action='store', + help='full path relative to which paths wills be reported') +parser.add_argument('-m', '--module', metavar='MODULE', type=str, + required=True, action='store', + help='name of package to import and examine') +parser.add_argument('-G', '--github_repo', metavar='REPO', type=str, + required=False, default=None, action='store', + help='github project where the code lives, ' + 'e.g. "pandas-dev/pandas"') args = parser.parse_args() -Entry=namedtuple("Entry","func path lnum undoc_names missing_args nsig_names ndoc_names") +Entry = namedtuple('Entry', + 'func path lnum undoc_names missing_args ' + 'nsig_names ndoc_names') -def entry_gen(root_ns,module_name): - q=[root_ns] - seen=set() +def entry_gen(root_ns, module_name): + """Walk and yield all methods and functions in the module root_ns and + submodules.""" + q = [root_ns] + seen = set() while q: ns = q.pop() for x in dir(ns): - cand = getattr(ns,x) - if (isinstance(cand,types.ModuleType) - and cand.__name__ not in seen - and cand.__name__.startswith(module_name)): - # print(cand.__name__) + cand = getattr(ns, x) + if (isinstance(cand, types.ModuleType) and + cand.__name__ not in seen and + cand.__name__.startswith(module_name)): seen.add(cand.__name__) - q.insert(0,cand) - elif (isinstance(cand,(types.MethodType,types.FunctionType)) and + q.insert(0, cand) + elif (isinstance(cand, (types.MethodType, types.FunctionType)) and cand not in seen and cand.__doc__): seen.add(cand) yield cand + def cmp_docstring_sig(f): + """Return an `Entry` object describing the differences between the + arguments in the signature and the documented ones.""" def build_loc(f): - path=f.__code__.co_filename.split(args.path,1)[-1][1:] - return dict(path=path,lnum=f.__code__.co_firstlineno) + path = f.__code__.co_filename.split(args.path, 1)[-1][1:] + return dict(path=path, lnum=f.__code__.co_firstlineno) - import inspect - sig_names=set(inspect.getargspec(f).args) + sig_names = set(inspect.getargspec(f).args) + # XXX numpydoc can be used to get the list of parameters doc = f.__doc__.lower() - doc = re.split("^\s*parameters\s*",doc,1,re.M)[-1] - doc = re.split("^\s*returns*",doc,1,re.M)[0] - doc_names={x.split(":")[0].strip() for x in doc.split("\n") - if re.match("\s+[\w_]+\s*:",x)} - sig_names.discard("self") - doc_names.discard("kwds") - doc_names.discard("kwargs") - doc_names.discard("args") - return Entry(func=f,path=build_loc(f)['path'],lnum=build_loc(f)['lnum'], + doc = re.split('^\s*parameters\s*', doc, 1, re.M)[-1] + doc = re.split('^\s*returns*', doc, 1, re.M)[0] + doc_names = {x.split(":")[0].strip() for x in doc.split('\n') + if re.match('\s+[\w_]+\s*:', x)} + sig_names.discard('self') + doc_names.discard('kwds') + doc_names.discard('kwargs') + doc_names.discard('args') + return Entry(func=f, path=build_loc(f)['path'], lnum=build_loc(f)['lnum'], undoc_names=sig_names.difference(doc_names), - missing_args=doc_names.difference(sig_names),nsig_names=len(sig_names), - ndoc_names=len(doc_names)) + missing_args=doc_names.difference(sig_names), + nsig_names=len(sig_names), ndoc_names=len(doc_names)) + def format_id(i): return i -def format_item_as_github_task_list( i,item,repo): - tmpl = "- [ ] {id}) [{file}:{lnum} ({func_name}())]({link}) - __Missing__[{nmissing}/{total_args}]: {undoc_names}" +def format_item_as_github_task_list(i, item, repo): + tmpl = ('- [ ] {id_}) [{fname}:{lnum} ({func_name}())]({link}) - ' + '__Missing__[{nmissing}/{total_args}]: {undoc_names}') link_tmpl = "https://github.com/{repo}/blob/master/{file}#L{lnum}" - - link = link_tmpl.format(repo=repo,file=item.path ,lnum=item.lnum ) - - s = tmpl.format(id=i,file=item.path , - lnum=item.lnum, - func_name=item.func.__name__, - link=link, - nmissing=len(item.undoc_names), - total_args=item.nsig_names, - undoc_names=list(item.undoc_names)) - + link = link_tmpl.format(repo=repo, file=item.path, lnum=item.lnum) + s = tmpl.format(id_=i, fname=item.path, lnum=item.lnum, + func_name=item.func.__name__, link=link, + nmissing=len(item.undoc_names), + total_args=item.nsig_names, + undoc_names=list(item.undoc_names)) if item.missing_args: - s+= " __Extra__(?): {missing_args}".format(missing_args=list(item.missing_args)) - + s += ' __Extra__(?): %s' % list(item.missing_args) return s -def format_item_as_plain(i,item): - tmpl = "+{lnum} {path} {func_name}(): Missing[{nmissing}/{total_args}]={undoc_names}" - - s = tmpl.format(path=item.path , - lnum=item.lnum, - func_name=item.func.__name__, - nmissing=len(item.undoc_names), - total_args=item.nsig_names, - undoc_names=list(item.undoc_names)) +def format_item_as_plain(i, item): + tmpl = ('+{lnum} {path} {func_name}(): ' + 'Missing[{nmissing}/{total_args}]={undoc_names}') + s = tmpl.format(path=item.path, lnum=item.lnum, + func_name=item.func.__name__, + nmissing=len(item.undoc_names), + total_args=item.nsig_names, + undoc_names=list(item.undoc_names)) if item.missing_args: - s+= " Extra(?)={missing_args}".format(missing_args=list(item.missing_args)) - + s += ' Extra(?)=%s' % list(item.missing_args) return s + def main(): module = __import__(args.module) if not args.path: - args.path=os.path.dirname(module.__file__) - collect=[cmp_docstring_sig(e) for e in entry_gen(module,module.__name__)] - # only include if there are missing arguments in the docstring (fewer false positives) - # and there are at least some documented arguments - collect = [e for e in collect if e.undoc_names and len(e.undoc_names) != e.nsig_names] - collect.sort(key=lambda x:x.path) + args.path = os.path.dirname(module.__file__) + collect = [cmp_docstring_sig(e) + for e in entry_gen(module, module.__name__)] + # only include if there are missing arguments in the docstring + # (fewer false positives) and there are at least some documented arguments + collect = [e for e in collect + if e.undoc_names and len(e.undoc_names) != e.nsig_names] + collect.sort(key=lambda x: x.path) if args.github_repo: - for i,item in enumerate(collect,1): - print( format_item_as_github_task_list(i,item,args.github_repo)) + for i, item in enumerate(collect, 1): + print(format_item_as_github_task_list(i, item, args.github_repo)) else: - for i,item in enumerate(collect,1): - print( format_item_as_plain(i, item)) + for i, item in enumerate(collect, 1): + print(format_item_as_plain(i, item)) + -if __name__ == "__main__": - import sys +if __name__ == '__main__': sys.exit(main()) diff --git a/scripts/merge-pr.py b/scripts/merge-pr.py index 5337c37fe5320..31264cad52e4f 100755 --- a/scripts/merge-pr.py +++ b/scripts/merge-pr.py @@ -22,7 +22,6 @@ # usage: ./apache-pr-merge.py (see config env vars below) # # Lightly modified from version of this script in incubator-parquet-format - from __future__ import print_function from subprocess import check_output @@ -223,7 +222,7 @@ def update_pr(pr_num, user_login, base_ref): try: run_cmd( 'git push -f %s %s:%s' % (push_user_remote, pr_branch_name, - base_ref)) + base_ref)) except Exception as e: fail("Exception while pushing: %s" % e) clean_up() @@ -275,6 +274,7 @@ def fix_version_from_branch(branch, versions): branch_ver = branch.replace("branch-", "") return filter(lambda x: x.name.startswith(branch_ver), versions)[-1] + pr_num = input("Which pull request would you like to merge? (e.g. 34): ") pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
- [X] closes #18949 - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19344
2018-01-22T14:29:03Z
2018-01-24T01:20:21Z
2018-01-24T01:20:21Z
2018-01-24T01:21:09Z
MAINT: Remove pytest.warns in tests
diff --git a/ci/lint.sh b/ci/lint.sh index a96e0961304e7..98b33c0803d90 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -89,6 +89,14 @@ if [ "$LINT" ]; then if [ $? = "0" ]; then RET=1 fi + + # Check for pytest.warns + grep -r -E --include '*.py' 'pytest\.warns' pandas/tests/ + + if [ $? = "0" ]; then + RET=1 + fi + echo "Check for invalid testing DONE" # Check for imports from pandas.core.common instead diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index b9c95c372ab9e..7be801629e387 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -937,7 +937,7 @@ def test_from_M8_structured(self): assert isinstance(s[0], Timestamp) assert s[0] == dates[0][0] - with pytest.warns(FutureWarning): + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): s = Series.from_array(arr['Date'], Index([0])) assert s[0] == dates[0][0]
Per discussion in #18258, we are prohibiting its use in tests, at least for the time being.
https://api.github.com/repos/pandas-dev/pandas/pulls/19341
2018-01-22T10:22:50Z
2018-01-22T23:59:55Z
2018-01-22T23:59:55Z
2018-01-23T10:05:14Z
ENH: Add dtype parameter to IntervalIndex constructors and deprecate from_intervals
diff --git a/doc/source/api.rst b/doc/source/api.rst index 88419df1880ec..ddd09327935ce 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1617,7 +1617,6 @@ IntervalIndex Components IntervalIndex.from_arrays IntervalIndex.from_tuples IntervalIndex.from_breaks - IntervalIndex.from_intervals IntervalIndex.contains IntervalIndex.left IntervalIndex.right diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 71492154419fb..4dde76dee46a5 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -207,9 +207,8 @@ Other Enhancements :func:`pandas.api.extensions.register_index_accessor`, accessor for libraries downstream of pandas to register custom accessors like ``.cat`` on pandas objects. See :ref:`Registering Custom Accessors <developer.register-accessors>` for more (:issue:`14781`). - - - ``IntervalIndex.astype`` now supports conversions between subtypes when passed an ``IntervalDtype`` (:issue:`19197`) +- :class:`IntervalIndex` and its associated constructor methods (``from_arrays``, ``from_breaks``, ``from_tuples``) have gained a ``dtype`` parameter (:issue:`19262`) .. _whatsnew_0230.api_breaking: @@ -329,6 +328,7 @@ Deprecations - ``Series.valid`` is deprecated. Use :meth:`Series.dropna` instead (:issue:`18800`). - :func:`read_excel` has deprecated the ``skip_footer`` parameter. Use ``skipfooter`` instead (:issue:`18836`) - The ``is_copy`` attribute is deprecated and will be removed in a future version (:issue:`18801`). +- ``IntervalIndex.from_intervals`` is deprecated in favor of the :class:`IntervalIndex` constructor (:issue:`19263`) .. _whatsnew_0230.prior_deprecations: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index f67e6eae27001..74c6abeb0ad12 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -200,7 +200,9 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, # interval if is_interval_dtype(data) or is_interval_dtype(dtype): from .interval import IntervalIndex - return IntervalIndex(data, dtype=dtype, name=name, copy=copy) + closed = kwargs.get('closed', None) + return IntervalIndex(data, dtype=dtype, name=name, copy=copy, + closed=closed) # index-like elif isinstance(data, (np.ndarray, Index, ABCSeries)): @@ -313,8 +315,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, return Float64Index(subarr, copy=copy, name=name) elif inferred == 'interval': from .interval import IntervalIndex - return IntervalIndex.from_intervals(subarr, name=name, - copy=copy) + return IntervalIndex(subarr, name=name, copy=copy) elif inferred == 'boolean': # don't support boolean explicitly ATM pass diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 2d4655d84dca8..2c7be2b21f959 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -341,7 +341,7 @@ def __array__(self, dtype=None): def astype(self, dtype, copy=True): if is_interval_dtype(dtype): from pandas import IntervalIndex - return IntervalIndex.from_intervals(np.array(self)) + return IntervalIndex(np.array(self)) elif is_categorical_dtype(dtype): # GH 18630 dtype = self.dtype._update_dtype(dtype) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 18fb71b490592..232770e582763 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1,6 +1,7 @@ """ define the IntervalIndex """ import numpy as np +import warnings from pandas.core.dtypes.missing import notna, isna from pandas.core.dtypes.generic import ABCDatetimeIndex, ABCPeriodIndex @@ -151,6 +152,10 @@ class IntervalIndex(IntervalMixin, Index): Name to be stored in the index. copy : boolean, default False Copy the meta-data + dtype : dtype or None, default None + If None, dtype will be inferred + + ..versionadded:: 0.23.0 Attributes ---------- @@ -167,7 +172,6 @@ class IntervalIndex(IntervalMixin, Index): from_arrays from_tuples from_breaks - from_intervals contains Examples @@ -181,8 +185,7 @@ class IntervalIndex(IntervalMixin, Index): It may also be constructed using one of the constructor methods: :meth:`IntervalIndex.from_arrays`, - :meth:`IntervalIndex.from_breaks`, :meth:`IntervalIndex.from_intervals` - and :meth:`IntervalIndex.from_tuples`. + :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`. See further examples in the doc strings of ``interval_range`` and the mentioned constructor methods. @@ -211,8 +214,7 @@ class IntervalIndex(IntervalMixin, Index): _mask = None - def __new__(cls, data, closed=None, - name=None, copy=False, dtype=None, + def __new__(cls, data, closed=None, name=None, copy=False, dtype=None, fastpath=False, verify_integrity=True): if fastpath: @@ -245,19 +247,28 @@ def __new__(cls, data, closed=None, closed = closed or infer_closed - return cls._simple_new(left, right, closed, name, - copy=copy, verify_integrity=verify_integrity) + return cls._simple_new(left, right, closed, name, copy=copy, + dtype=dtype, verify_integrity=verify_integrity) @classmethod - def _simple_new(cls, left, right, closed=None, name=None, - copy=False, verify_integrity=True): + def _simple_new(cls, left, right, closed=None, name=None, copy=False, + dtype=None, verify_integrity=True): result = IntervalMixin.__new__(cls) - if closed is None: - closed = 'right' + closed = closed or 'right' left = _ensure_index(left, copy=copy) right = _ensure_index(right, copy=copy) + if dtype is not None: + # GH 19262: dtype must be an IntervalDtype to override inferred + dtype = pandas_dtype(dtype) + if not is_interval_dtype(dtype): + msg = 'dtype must be an IntervalDtype, got {dtype}' + raise TypeError(msg.format(dtype=dtype)) + elif dtype.subtype is not None: + left = left.astype(dtype.subtype) + right = right.astype(dtype.subtype) + # coerce dtypes to match if needed if is_float_dtype(left) and is_integer_dtype(right): right = right.astype(left.dtype) @@ -304,7 +315,7 @@ def _shallow_copy(self, left=None, right=None, **kwargs): # only single value passed, could be an IntervalIndex # or array of Intervals if not isinstance(left, IntervalIndex): - left = type(self).from_intervals(left) + left = self._constructor(left) left, right = left.left, left.right else: @@ -322,7 +333,7 @@ def _validate(self): Verify that the IntervalIndex is valid. """ if self.closed not in _VALID_CLOSED: - raise ValueError("invalid options for 'closed': {closed}" + raise ValueError("invalid option for 'closed': {closed}" .format(closed=self.closed)) if len(self.left) != len(self.right): raise ValueError('left and right must have the same length') @@ -356,7 +367,7 @@ def _engine(self): @property def _constructor(self): - return type(self).from_intervals + return type(self) def __contains__(self, key): """ @@ -402,7 +413,8 @@ def contains(self, key): return False @classmethod - def from_breaks(cls, breaks, closed='right', name=None, copy=False): + def from_breaks(cls, breaks, closed='right', name=None, copy=False, + dtype=None): """ Construct an IntervalIndex from an array of splits @@ -417,6 +429,10 @@ def from_breaks(cls, breaks, closed='right', name=None, copy=False): Name to be stored in the index. copy : boolean, default False copy the data + dtype : dtype or None, default None + If None, dtype will be inferred + + ..versionadded:: 0.23.0 Examples -------- @@ -430,18 +446,17 @@ def from_breaks(cls, breaks, closed='right', name=None, copy=False): interval_range : Function to create a fixed frequency IntervalIndex IntervalIndex.from_arrays : Construct an IntervalIndex from a left and right array - IntervalIndex.from_intervals : Construct an IntervalIndex from an array - of Interval objects IntervalIndex.from_tuples : Construct an IntervalIndex from a list/array of tuples """ breaks = maybe_convert_platform_interval(breaks) return cls.from_arrays(breaks[:-1], breaks[1:], closed, - name=name, copy=copy) + name=name, copy=copy, dtype=dtype) @classmethod - def from_arrays(cls, left, right, closed='right', name=None, copy=False): + def from_arrays(cls, left, right, closed='right', name=None, copy=False, + dtype=None): """ Construct an IntervalIndex from a a left and right array @@ -458,6 +473,10 @@ def from_arrays(cls, left, right, closed='right', name=None, copy=False): Name to be stored in the index. copy : boolean, default False copy the data + dtype : dtype or None, default None + If None, dtype will be inferred + + ..versionadded:: 0.23.0 Examples -------- @@ -471,22 +490,23 @@ def from_arrays(cls, left, right, closed='right', name=None, copy=False): interval_range : Function to create a fixed frequency IntervalIndex IntervalIndex.from_breaks : Construct an IntervalIndex from an array of splits - IntervalIndex.from_intervals : Construct an IntervalIndex from an array - of Interval objects IntervalIndex.from_tuples : Construct an IntervalIndex from a list/array of tuples """ left = maybe_convert_platform_interval(left) right = maybe_convert_platform_interval(right) - return cls._simple_new(left, right, closed, name=name, - copy=copy, verify_integrity=True) + return cls._simple_new(left, right, closed, name=name, copy=copy, + dtype=dtype, verify_integrity=True) @classmethod - def from_intervals(cls, data, name=None, copy=False): + def from_intervals(cls, data, closed=None, name=None, copy=False, + dtype=None): """ Construct an IntervalIndex from a 1d array of Interval objects + .. deprecated:: 0.23.0 + Parameters ---------- data : array-like (1-dimensional) @@ -496,6 +516,10 @@ def from_intervals(cls, data, name=None, copy=False): Name to be stored in the index. copy : boolean, default False by-default copy the data, this is compat only and ignored + dtype : dtype or None, default None + If None, dtype will be inferred + + ..versionadded:: 0.23.0 Examples -------- @@ -521,16 +545,14 @@ def from_intervals(cls, data, name=None, copy=False): IntervalIndex.from_tuples : Construct an IntervalIndex from a list/array of tuples """ - if isinstance(data, IntervalIndex): - left, right, closed = data.left, data.right, data.closed - name = name or data.name - else: - data = maybe_convert_platform_interval(data) - left, right, closed = intervals_to_interval_bounds(data) - return cls.from_arrays(left, right, closed, name=name, copy=False) + msg = ('IntervalIndex.from_intervals is deprecated and will be ' + 'removed in a future version; use IntervalIndex(...) instead') + warnings.warn(msg, FutureWarning, stacklevel=2) + return cls(data, closed=closed, name=name, copy=copy, dtype=dtype) @classmethod - def from_tuples(cls, data, closed='right', name=None, copy=False): + def from_tuples(cls, data, closed='right', name=None, copy=False, + dtype=None): """ Construct an IntervalIndex from a list/array of tuples @@ -545,10 +567,14 @@ def from_tuples(cls, data, closed='right', name=None, copy=False): Name to be stored in the index. copy : boolean, default False by-default copy the data, this is compat only and ignored + dtype : dtype or None, default None + If None, dtype will be inferred + + ..versionadded:: 0.23.0 Examples -------- - >>> pd.IntervalIndex.from_tuples([(0, 1), (1,2)]) + >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)]) IntervalIndex([(0, 1], (1, 2]], closed='right', dtype='interval[int64]') @@ -559,8 +585,6 @@ def from_tuples(cls, data, closed='right', name=None, copy=False): right array IntervalIndex.from_breaks : Construct an IntervalIndex from an array of splits - IntervalIndex.from_intervals : Construct an IntervalIndex from an array - of Interval objects """ if len(data): left, right = [], [] @@ -571,15 +595,22 @@ def from_tuples(cls, data, closed='right', name=None, copy=False): if isna(d): lhs = rhs = np.nan else: - lhs, rhs = d + try: + # need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...] + lhs, rhs = d + except ValueError: + msg = ('IntervalIndex.from_tuples requires tuples of ' + 'length 2, got {tpl}').format(tpl=d) + raise ValueError(msg) + except TypeError: + msg = ('IntervalIndex.from_tuples received an invalid ' + 'item, {tpl}').format(tpl=d) + raise TypeError(msg) left.append(lhs) right.append(rhs) - # TODO - # if we have nulls and we previous had *only* - # integer data, then we have changed the dtype - - return cls.from_arrays(left, right, closed, name=name, copy=False) + return cls.from_arrays(left, right, closed, name=name, copy=False, + dtype=dtype) def to_tuples(self, na_tuple=True): """ @@ -921,7 +952,7 @@ def get_loc(self, key, method=None): Examples --------- >>> i1, i2 = pd.Interval(0, 1), pd.Interval(1, 2) - >>> index = pd.IntervalIndex.from_intervals([i1, i2]) + >>> index = pd.IntervalIndex([i1, i2]) >>> index.get_loc(1) 0 @@ -937,7 +968,7 @@ def get_loc(self, key, method=None): relevant intervals. >>> i3 = pd.Interval(0, 2) - >>> overlapping_index = pd.IntervalIndex.from_intervals([i2, i3]) + >>> overlapping_index = pd.IntervalIndex([i2, i3]) >>> overlapping_index.get_loc(1.5) array([0, 1], dtype=int64) """ diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 2adf17a227a59..777f08bd9db2b 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -348,8 +348,7 @@ def _format_labels(bins, precision, right=True, # account that we are all right closed v = adjust(labels[0].left) - i = IntervalIndex.from_intervals( - [Interval(v, labels[0].right, closed='right')]) + i = IntervalIndex([Interval(v, labels[0].right, closed='right')]) labels = i.append(labels[1:]) return labels diff --git a/pandas/tests/categorical/test_constructors.py b/pandas/tests/categorical/test_constructors.py index abea7e9a0e0b4..b29d75bed5c6f 100644 --- a/pandas/tests/categorical/test_constructors.py +++ b/pandas/tests/categorical/test_constructors.py @@ -76,9 +76,7 @@ def test_constructor_unsortable(self): def test_constructor_interval(self): result = Categorical([Interval(1, 2), Interval(2, 3), Interval(3, 6)], ordered=True) - ii = IntervalIndex.from_intervals([Interval(1, 2), - Interval(2, 3), - Interval(3, 6)]) + ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)]) exp = Categorical(ii, ordered=True) tm.assert_categorical_equal(result, exp) tm.assert_index_equal(result.categories, ii) diff --git a/pandas/tests/indexes/interval/test_construction.py b/pandas/tests/indexes/interval/test_construction.py new file mode 100644 index 0000000000000..5fdf92dcb2044 --- /dev/null +++ b/pandas/tests/indexes/interval/test_construction.py @@ -0,0 +1,342 @@ +from __future__ import division + +import pytest +import numpy as np +from functools import partial + +from pandas import ( + Interval, IntervalIndex, Index, Int64Index, Float64Index, Categorical, + date_range, timedelta_range, period_range, notna) +from pandas.compat import lzip +from pandas.core.dtypes.dtypes import IntervalDtype +import pandas.core.common as com +import pandas.util.testing as tm + + +@pytest.fixture(params=['left', 'right', 'both', 'neither']) +def closed(request): + return request.param + + +@pytest.fixture(params=[None, 'foo']) +def name(request): + return request.param + + +class Base(object): + """ + Common tests for all variations of IntervalIndex construction. Input data + to be supplied in breaks format, then converted by the subclass method + get_kwargs_from_breaks to the expected format. + """ + + @pytest.mark.parametrize('breaks', [ + [3, 14, 15, 92, 653], + np.arange(10, dtype='int64'), + Int64Index(range(-10, 11)), + Float64Index(np.arange(20, 30, 0.5)), + date_range('20180101', periods=10), + date_range('20180101', periods=10, tz='US/Eastern'), + timedelta_range('1 day', periods=10)]) + def test_constructor(self, constructor, breaks, closed, name): + result_kwargs = self.get_kwargs_from_breaks(breaks, closed) + result = constructor(closed=closed, name=name, **result_kwargs) + + assert result.closed == closed + assert result.name == name + assert result.dtype.subtype == getattr(breaks, 'dtype', 'int64') + tm.assert_index_equal(result.left, Index(breaks[:-1])) + tm.assert_index_equal(result.right, Index(breaks[1:])) + + @pytest.mark.parametrize('breaks, subtype', [ + (Int64Index([0, 1, 2, 3, 4]), 'float64'), + (Int64Index([0, 1, 2, 3, 4]), 'datetime64[ns]'), + (Int64Index([0, 1, 2, 3, 4]), 'timedelta64[ns]'), + (Float64Index([0, 1, 2, 3, 4]), 'int64'), + (date_range('2017-01-01', periods=5), 'int64'), + (timedelta_range('1 day', periods=5), 'int64')]) + def test_constructor_dtype(self, constructor, breaks, subtype): + # GH 19262: conversion via dtype parameter + expected_kwargs = self.get_kwargs_from_breaks(breaks.astype(subtype)) + expected = constructor(**expected_kwargs) + + result_kwargs = self.get_kwargs_from_breaks(breaks) + iv_dtype = IntervalDtype(subtype) + for dtype in (iv_dtype, str(iv_dtype)): + result = constructor(dtype=dtype, **result_kwargs) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize('breaks', [ + [np.nan] * 2, [np.nan] * 4, [np.nan] * 50]) + def test_constructor_nan(self, constructor, breaks, closed): + # GH 18421 + result_kwargs = self.get_kwargs_from_breaks(breaks) + result = constructor(closed=closed, **result_kwargs) + + expected_subtype = np.float64 + expected_values = np.array(breaks[:-1], dtype=object) + + assert result.closed == closed + assert result.dtype.subtype == expected_subtype + tm.assert_numpy_array_equal(result.values, expected_values) + + @pytest.mark.parametrize('breaks', [ + [], + np.array([], dtype='int64'), + np.array([], dtype='float64'), + np.array([], dtype='datetime64[ns]'), + np.array([], dtype='timedelta64[ns]')]) + def test_constructor_empty(self, constructor, breaks, closed): + # GH 18421 + result_kwargs = self.get_kwargs_from_breaks(breaks) + result = constructor(closed=closed, **result_kwargs) + + expected_values = np.array([], dtype=object) + expected_subtype = getattr(breaks, 'dtype', np.int64) + + assert result.empty + assert result.closed == closed + assert result.dtype.subtype == expected_subtype + tm.assert_numpy_array_equal(result.values, expected_values) + + @pytest.mark.parametrize('breaks', [ + tuple('0123456789'), + list('abcdefghij'), + np.array(list('abcdefghij'), dtype=object), + np.array(list('abcdefghij'), dtype='<U1')]) + def test_constructor_string(self, constructor, breaks): + # GH 19016 + msg = ('category, object, and string subtypes are not supported ' + 'for IntervalIndex') + with tm.assert_raises_regex(TypeError, msg): + constructor(**self.get_kwargs_from_breaks(breaks)) + + def test_generic_errors(self, constructor): + # filler input data to be used when supplying invalid kwargs + filler = self.get_kwargs_from_breaks(range(10)) + + # invalid closed + msg = "invalid option for 'closed': invalid" + with tm.assert_raises_regex(ValueError, msg): + constructor(closed='invalid', **filler) + + # unsupported dtype + msg = 'dtype must be an IntervalDtype, got int64' + with tm.assert_raises_regex(TypeError, msg): + constructor(dtype='int64', **filler) + + # invalid dtype + msg = 'data type "invalid" not understood' + with tm.assert_raises_regex(TypeError, msg): + constructor(dtype='invalid', **filler) + + # no point in nesting periods in an IntervalIndex + periods = period_range('2000-01-01', periods=10) + periods_kwargs = self.get_kwargs_from_breaks(periods) + msg = 'Period dtypes are not supported, use a PeriodIndex instead' + with tm.assert_raises_regex(ValueError, msg): + constructor(**periods_kwargs) + + # decreasing values + decreasing_kwargs = self.get_kwargs_from_breaks(range(10, -1, -1)) + msg = 'left side of interval must be <= right side' + with tm.assert_raises_regex(ValueError, msg): + constructor(**decreasing_kwargs) + + +class TestFromArrays(Base): + """Tests specific to IntervalIndex.from_arrays""" + + @pytest.fixture + def constructor(self): + return IntervalIndex.from_arrays + + def get_kwargs_from_breaks(self, breaks, closed='right'): + """ + converts intervals in breaks format to a dictionary of kwargs to + specific to the format expected by IntervalIndex.from_arrays + """ + return {'left': breaks[:-1], 'right': breaks[1:]} + + def test_constructor_errors(self): + # GH 19016: categorical data + data = Categorical(list('01234abcde'), ordered=True) + msg = ('category, object, and string subtypes are not supported ' + 'for IntervalIndex') + with tm.assert_raises_regex(TypeError, msg): + IntervalIndex.from_arrays(data[:-1], data[1:]) + + # unequal length + left = [0, 1, 2] + right = [2, 3] + msg = 'left and right must have the same length' + with tm.assert_raises_regex(ValueError, msg): + IntervalIndex.from_arrays(left, right) + + @pytest.mark.parametrize('left_subtype, right_subtype', [ + (np.int64, np.float64), (np.float64, np.int64)]) + def test_mixed_float_int(self, left_subtype, right_subtype): + """mixed int/float left/right results in float for both sides""" + left = np.arange(9, dtype=left_subtype) + right = np.arange(1, 10, dtype=right_subtype) + result = IntervalIndex.from_arrays(left, right) + + expected_left = Float64Index(left) + expected_right = Float64Index(right) + expected_subtype = np.float64 + + tm.assert_index_equal(result.left, expected_left) + tm.assert_index_equal(result.right, expected_right) + assert result.dtype.subtype == expected_subtype + + +class TestFromBreaks(Base): + """Tests specific to IntervalIndex.from_breaks""" + + @pytest.fixture + def constructor(self): + return IntervalIndex.from_breaks + + def get_kwargs_from_breaks(self, breaks, closed='right'): + """ + converts intervals in breaks format to a dictionary of kwargs to + specific to the format expected by IntervalIndex.from_breaks + """ + return {'breaks': breaks} + + def test_constructor_errors(self): + # GH 19016: categorical data + data = Categorical(list('01234abcde'), ordered=True) + msg = ('category, object, and string subtypes are not supported ' + 'for IntervalIndex') + with tm.assert_raises_regex(TypeError, msg): + IntervalIndex.from_breaks(data) + + def test_length_one(self): + """breaks of length one produce an empty IntervalIndex""" + breaks = [0] + result = IntervalIndex.from_breaks(breaks) + expected = IntervalIndex.from_breaks([]) + tm.assert_index_equal(result, expected) + + +class TestFromTuples(Base): + """Tests specific to IntervalIndex.from_tuples""" + + @pytest.fixture + def constructor(self): + return IntervalIndex.from_tuples + + def get_kwargs_from_breaks(self, breaks, closed='right'): + """ + converts intervals in breaks format to a dictionary of kwargs to + specific to the format expected by IntervalIndex.from_tuples + """ + if len(breaks) == 0: + return {'data': breaks} + + tuples = lzip(breaks[:-1], breaks[1:]) + if isinstance(breaks, (list, tuple)): + return {'data': tuples} + return {'data': com._asarray_tuplesafe(tuples)} + + def test_constructor_errors(self): + # non-tuple + tuples = [(0, 1), 2, (3, 4)] + msg = 'IntervalIndex.from_tuples received an invalid item, 2' + with tm.assert_raises_regex(TypeError, msg.format(t=tuples)): + IntervalIndex.from_tuples(tuples) + + # too few/many items + tuples = [(0, 1), (2,), (3, 4)] + msg = 'IntervalIndex.from_tuples requires tuples of length 2, got {t}' + with tm.assert_raises_regex(ValueError, msg.format(t=tuples)): + IntervalIndex.from_tuples(tuples) + + tuples = [(0, 1), (2, 3, 4), (5, 6)] + with tm.assert_raises_regex(ValueError, msg.format(t=tuples)): + IntervalIndex.from_tuples(tuples) + + def test_na_tuples(self): + # tuple (NA, NA) evaluates the same as NA as an elemenent + na_tuple = [(0, 1), (np.nan, np.nan), (2, 3)] + idx_na_tuple = IntervalIndex.from_tuples(na_tuple) + idx_na_element = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)]) + tm.assert_index_equal(idx_na_tuple, idx_na_element) + + +class TestClassConstructors(Base): + """Tests specific to the IntervalIndex/Index constructors""" + + @pytest.fixture(params=[IntervalIndex, partial(Index, dtype='interval')], + ids=['IntervalIndex', 'Index']) + def constructor(self, request): + return request.param + + def get_kwargs_from_breaks(self, breaks, closed='right'): + """ + converts intervals in breaks format to a dictionary of kwargs to + specific to the format expected by the IntervalIndex/Index constructors + """ + if len(breaks) == 0: + return {'data': breaks} + + ivs = [Interval(l, r, closed) if notna(l) else l + for l, r in zip(breaks[:-1], breaks[1:])] + + if isinstance(breaks, list): + return {'data': ivs} + return {'data': np.array(ivs, dtype=object)} + + def test_generic_errors(self, constructor): + """ + override the base class implementation since errors are handled + differently; checks unnecessary since caught at the Interval level + """ + pass + + def test_constructor_errors(self, constructor): + # mismatched closed inferred from intervals vs constructor. + ivs = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')] + msg = 'conflicting values for closed' + with tm.assert_raises_regex(ValueError, msg): + constructor(ivs, closed='neither') + + # mismatched closed within intervals + ivs = [Interval(0, 1, closed='right'), Interval(2, 3, closed='left')] + msg = 'intervals must all be closed on the same side' + with tm.assert_raises_regex(ValueError, msg): + constructor(ivs) + + # scalar + msg = (r'IntervalIndex\(...\) must be called with a collection of ' + 'some kind, 5 was passed') + with tm.assert_raises_regex(TypeError, msg): + constructor(5) + + # not an interval + msg = ("type <(class|type) 'numpy.int64'> with value 0 " + "is not an interval") + with tm.assert_raises_regex(TypeError, msg): + constructor([0, 1]) + + +class TestFromIntervals(TestClassConstructors): + """ + Tests for IntervalIndex.from_intervals, which is deprecated in favor of the + IntervalIndex constructor. Same tests as the IntervalIndex constructor, + plus deprecation test. Should only need to delete this class when removed. + """ + + @pytest.fixture + def constructor(self): + def from_intervals_ignore_warnings(*args, **kwargs): + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False): + return IntervalIndex.from_intervals(*args, **kwargs) + return from_intervals_ignore_warnings + + def test_deprecated(self): + ivs = [Interval(0, 1), Interval(1, 2)] + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + IntervalIndex.from_intervals(ivs) diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 0e509c241fe51..71a6f78125004 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -4,7 +4,7 @@ import numpy as np from pandas import ( Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp, - Timedelta, date_range, timedelta_range, Categorical) + Timedelta, date_range, timedelta_range) from pandas.compat import lzip import pandas.core.common as com from pandas.tests.indexes.common import Base @@ -40,249 +40,6 @@ def create_index_with_nan(self, closed='right'): np.where(mask, np.arange(10), np.nan), np.where(mask, np.arange(1, 11), np.nan), closed=closed) - @pytest.mark.parametrize('data', [ - Index([0, 1, 2, 3, 4]), - date_range('2017-01-01', periods=5), - date_range('2017-01-01', periods=5, tz='US/Eastern'), - timedelta_range('1 day', periods=5)]) - def test_constructors(self, data, closed, name): - left, right = data[:-1], data[1:] - ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)] - expected = IntervalIndex._simple_new( - left=left, right=right, closed=closed, name=name) - - # validate expected - assert expected.closed == closed - assert expected.name == name - assert expected.dtype.subtype == data.dtype - tm.assert_index_equal(expected.left, data[:-1]) - tm.assert_index_equal(expected.right, data[1:]) - - # validated constructors - result = IntervalIndex(ivs, name=name) - tm.assert_index_equal(result, expected) - - result = IntervalIndex.from_intervals(ivs, name=name) - tm.assert_index_equal(result, expected) - - result = IntervalIndex.from_breaks(data, closed=closed, name=name) - tm.assert_index_equal(result, expected) - - result = IntervalIndex.from_arrays( - left, right, closed=closed, name=name) - tm.assert_index_equal(result, expected) - - result = IntervalIndex.from_tuples( - lzip(left, right), closed=closed, name=name) - tm.assert_index_equal(result, expected) - - result = Index(ivs, name=name) - assert isinstance(result, IntervalIndex) - tm.assert_index_equal(result, expected) - - # idempotent - tm.assert_index_equal(Index(expected), expected) - tm.assert_index_equal(IntervalIndex(expected), expected) - - result = IntervalIndex.from_intervals(expected) - tm.assert_index_equal(result, expected) - - result = IntervalIndex.from_intervals( - expected.values, name=expected.name) - tm.assert_index_equal(result, expected) - - left, right = expected.left, expected.right - result = IntervalIndex.from_arrays( - left, right, closed=expected.closed, name=expected.name) - tm.assert_index_equal(result, expected) - - result = IntervalIndex.from_tuples( - expected.to_tuples(), closed=expected.closed, name=expected.name) - tm.assert_index_equal(result, expected) - - breaks = expected.left.tolist() + [expected.right[-1]] - result = IntervalIndex.from_breaks( - breaks, closed=expected.closed, name=expected.name) - tm.assert_index_equal(result, expected) - - @pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50]) - def test_constructors_nan(self, closed, data): - # GH 18421 - expected_values = np.array(data, dtype=object) - expected_idx = IntervalIndex(data, closed=closed) - - # validate the expected index - assert expected_idx.closed == closed - tm.assert_numpy_array_equal(expected_idx.values, expected_values) - - result = IntervalIndex.from_tuples(data, closed=closed) - tm.assert_index_equal(result, expected_idx) - tm.assert_numpy_array_equal(result.values, expected_values) - - result = IntervalIndex.from_breaks([np.nan] + data, closed=closed) - tm.assert_index_equal(result, expected_idx) - tm.assert_numpy_array_equal(result.values, expected_values) - - result = IntervalIndex.from_arrays(data, data, closed=closed) - tm.assert_index_equal(result, expected_idx) - tm.assert_numpy_array_equal(result.values, expected_values) - - if closed == 'right': - # Can't specify closed for IntervalIndex.from_intervals - result = IntervalIndex.from_intervals(data) - tm.assert_index_equal(result, expected_idx) - tm.assert_numpy_array_equal(result.values, expected_values) - - @pytest.mark.parametrize('data', [ - [], - np.array([], dtype='int64'), - np.array([], dtype='float64'), - np.array([], dtype='datetime64[ns]')]) - def test_constructors_empty(self, data, closed): - # GH 18421 - expected_dtype = getattr(data, 'dtype', np.int64) - expected_values = np.array([], dtype=object) - expected_index = IntervalIndex(data, closed=closed) - - # validate the expected index - assert expected_index.empty - assert expected_index.closed == closed - assert expected_index.dtype.subtype == expected_dtype - tm.assert_numpy_array_equal(expected_index.values, expected_values) - - result = IntervalIndex.from_tuples(data, closed=closed) - tm.assert_index_equal(result, expected_index) - tm.assert_numpy_array_equal(result.values, expected_values) - - result = IntervalIndex.from_breaks(data, closed=closed) - tm.assert_index_equal(result, expected_index) - tm.assert_numpy_array_equal(result.values, expected_values) - - result = IntervalIndex.from_arrays(data, data, closed=closed) - tm.assert_index_equal(result, expected_index) - tm.assert_numpy_array_equal(result.values, expected_values) - - if closed == 'right': - # Can't specify closed for IntervalIndex.from_intervals - result = IntervalIndex.from_intervals(data) - tm.assert_index_equal(result, expected_index) - tm.assert_numpy_array_equal(result.values, expected_values) - - def test_constructors_errors(self): - - # scalar - msg = (r'IntervalIndex\(...\) must be called with a collection of ' - 'some kind, 5 was passed') - with tm.assert_raises_regex(TypeError, msg): - IntervalIndex(5) - - # not an interval - msg = ("type <(class|type) 'numpy.int64'> with value 0 " - "is not an interval") - with tm.assert_raises_regex(TypeError, msg): - IntervalIndex([0, 1]) - - with tm.assert_raises_regex(TypeError, msg): - IntervalIndex.from_intervals([0, 1]) - - # invalid closed - msg = "invalid options for 'closed': invalid" - with tm.assert_raises_regex(ValueError, msg): - IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid') - - # mismatched closed within intervals - msg = 'intervals must all be closed on the same side' - with tm.assert_raises_regex(ValueError, msg): - IntervalIndex.from_intervals([Interval(0, 1), - Interval(1, 2, closed='left')]) - - with tm.assert_raises_regex(ValueError, msg): - IntervalIndex([Interval(0, 1), Interval(2, 3, closed='left')]) - - with tm.assert_raises_regex(ValueError, msg): - Index([Interval(0, 1), Interval(2, 3, closed='left')]) - - # mismatched closed inferred from intervals vs constructor. - msg = 'conflicting values for closed' - with tm.assert_raises_regex(ValueError, msg): - iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')] - IntervalIndex(iv, closed='neither') - - # no point in nesting periods in an IntervalIndex - msg = 'Period dtypes are not supported, use a PeriodIndex instead' - with tm.assert_raises_regex(ValueError, msg): - IntervalIndex.from_breaks( - pd.period_range('2000-01-01', periods=3)) - - # decreasing breaks/arrays - msg = 'left side of interval must be <= right side' - with tm.assert_raises_regex(ValueError, msg): - IntervalIndex.from_breaks(range(10, -1, -1)) - - with tm.assert_raises_regex(ValueError, msg): - IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1)) - - # GH 19016: categorical data - data = Categorical(list('01234abcde'), ordered=True) - msg = ('category, object, and string subtypes are not supported ' - 'for IntervalIndex') - - with tm.assert_raises_regex(TypeError, msg): - IntervalIndex.from_breaks(data) - - with tm.assert_raises_regex(TypeError, msg): - IntervalIndex.from_arrays(data[:-1], data[1:]) - - @pytest.mark.parametrize('data', [ - tuple('0123456789'), - list('abcdefghij'), - np.array(list('abcdefghij'), dtype=object), - np.array(list('abcdefghij'), dtype='<U1')]) - def test_constructors_errors_string(self, data): - # GH 19016 - left, right = data[:-1], data[1:] - tuples = lzip(left, right) - ivs = [Interval(l, r) for l, r in tuples] or data - msg = ('category, object, and string subtypes are not supported ' - 'for IntervalIndex') - - with tm.assert_raises_regex(TypeError, msg): - IntervalIndex(ivs) - - with tm.assert_raises_regex(TypeError, msg): - Index(ivs) - - with tm.assert_raises_regex(TypeError, msg): - IntervalIndex.from_intervals(ivs) - - with tm.assert_raises_regex(TypeError, msg): - IntervalIndex.from_breaks(data) - - with tm.assert_raises_regex(TypeError, msg): - IntervalIndex.from_arrays(left, right) - - with tm.assert_raises_regex(TypeError, msg): - IntervalIndex.from_tuples(tuples) - - @pytest.mark.parametrize('tz_left, tz_right', [ - (None, 'UTC'), ('UTC', None), ('UTC', 'US/Eastern')]) - def test_constructors_errors_tz(self, tz_left, tz_right): - # GH 18537 - left = date_range('2017-01-01', periods=4, tz=tz_left) - right = date_range('2017-01-02', periods=4, tz=tz_right) - - # don't need to check IntervalIndex(...) or from_intervals, since - # mixed tz are disallowed at the Interval level - with pytest.raises(ValueError): - IntervalIndex.from_arrays(left, right) - - with pytest.raises(ValueError): - IntervalIndex.from_tuples(lzip(left, right)) - - with pytest.raises(ValueError): - breaks = left.tolist() + [right[-1]] - IntervalIndex.from_breaks(breaks) - def test_properties(self, closed): index = self.create_index(closed=closed) assert len(index) == 10 @@ -384,7 +141,7 @@ def test_ensure_copied_data(self, closed): check_same='same') # by-definition make a copy - result = IntervalIndex.from_intervals(index.values, copy=False) + result = IntervalIndex(index.values, copy=False) tm.assert_numpy_array_equal(index.left.values, result.left.values, check_same='copy') tm.assert_numpy_array_equal(index.right.values, result.right.values, diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index 268376b1f0d32..c2e40c79f8914 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -422,7 +422,7 @@ def test_astype(self): expected = ii.take([0, 1, -1]) tm.assert_index_equal(result, expected) - result = IntervalIndex.from_intervals(result.values) + result = IntervalIndex(result.values) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('name', [None, 'foo']) diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py index 48f25112b45cf..f7262a2f0da63 100644 --- a/pandas/tests/reshape/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -236,7 +236,7 @@ def test_qcut_include_lowest(self): ii = qcut(values, 4) - ex_levels = IntervalIndex.from_intervals( + ex_levels = IntervalIndex( [Interval(-0.001, 2.25), Interval(2.25, 4.5), Interval(4.5, 6.75), @@ -333,8 +333,7 @@ def test_series_retbins(self): def test_qcut_duplicates_bin(self): # GH 7751 values = [0, 0, 0, 0, 1, 2, 3] - expected = IntervalIndex.from_intervals([Interval(-0.001, 1), - Interval(1, 3)]) + expected = IntervalIndex([Interval(-0.001, 1), Interval(1, 3)]) result = qcut(values, 3, duplicates='drop') tm.assert_index_equal(result.categories, expected) @@ -447,7 +446,7 @@ def test_datetime_cut(self): result, bins = cut(data, 3, retbins=True) expected = ( - Series(IntervalIndex.from_intervals([ + Series(IntervalIndex([ Interval(Timestamp('2012-12-31 23:57:07.200000'), Timestamp('2013-01-01 16:00:00')), Interval(Timestamp('2013-01-01 16:00:00'), @@ -480,7 +479,7 @@ def test_datetime_bin(self): data = [np.datetime64('2012-12-13'), np.datetime64('2012-12-15')] bin_data = ['2012-12-12', '2012-12-14', '2012-12-16'] expected = ( - Series(IntervalIndex.from_intervals([ + Series(IntervalIndex([ Interval(Timestamp(bin_data[0]), Timestamp(bin_data[1])), Interval(Timestamp(bin_data[1]), Timestamp(bin_data[2]))])) .astype(CDT(ordered=True)))
- [X] closes #19262 - [X] closes #19263 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Summary: - Added support for a `dtype` parameter to all IntervalIndex constructors - Allows users to override the inferred dtype - Deprecated `IntervalIndex.from_intervals` - Still added `dtype` parameter, since it's just a pass through to `IntervalIndex` - Removed usage and references to `IntervalIndex.from_intervals` throughout the codebase - Split construction tests off into `interval/test_construction.py` - Created a base class for common tests, and subclasses for each constructor - Was previously written in a more flat style, where each constructor was explicitly called - Expanded the tests to hit some previously untested behavior
https://api.github.com/repos/pandas-dev/pandas/pulls/19339
2018-01-22T03:53:42Z
2018-01-25T01:29:03Z
2018-01-25T01:29:02Z
2018-01-25T01:31:16Z
remove BlockManager.reindex
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 5c3481ed6d4ff..c2d3d0852384c 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -4407,42 +4407,6 @@ def _blklocs(self): """ compat with BlockManager """ return None - def reindex(self, new_axis, indexer=None, method=None, fill_value=None, - limit=None, copy=True): - # if we are the same and don't copy, just return - if self.index.equals(new_axis): - if copy: - return self.copy(deep=True) - else: - return self - - values = self._block.get_values() - - if indexer is None: - indexer = self.items.get_indexer_for(new_axis) - - if fill_value is None: - fill_value = np.nan - - new_values = algos.take_1d(values, indexer, fill_value=fill_value) - - # fill if needed - if method is not None or limit is not None: - new_values = missing.interpolate_2d(new_values, - method=method, - limit=limit, - fill_value=fill_value) - - if self._block.is_sparse: - make_block = self._block.make_block_same_class - - block = make_block(new_values, copy=copy, - placement=slice(0, len(new_axis))) - - mgr = SingleBlockManager(block, new_axis) - mgr._consolidate_inplace() - return mgr - def get_slice(self, slobj, axis=0): if axis >= self.ndim: raise IndexError("Requested axis not found in manager") diff --git a/pandas/core/series.py b/pandas/core/series.py index b7dcc48599f37..a14eb69d86377 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -197,8 +197,13 @@ def __init__(self, data=None, index=None, dtype=None, name=None, elif isinstance(data, SingleBlockManager): if index is None: index = data.index - else: - data = data.reindex(index, copy=copy) + elif not data.index.equals(index) or copy: + # GH#19275 SingleBlockManager input should only be called + # internally + raise AssertionError('Cannot pass both SingleBlockManager ' + '`data` argument and a different ' + '`index` argument. `copy` must ' + 'be False.') elif isinstance(data, Categorical): # GH12574: Allow dtype=category only, otherwise error if ((dtype is not None) and diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index 4b649927f8f72..2106fdf8e5e8e 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -166,9 +166,13 @@ def __init__(self, data=None, index=None, sparse_index=None, kind='block', data = data.astype(dtype) if index is None: index = data.index.view() - else: - - data = data.reindex(index, copy=False) + elif not data.index.equals(index) or copy: # pragma: no cover + # GH#19275 SingleBlockManager input should only be called + # internally + raise AssertionError('Cannot pass both SingleBlockManager ' + '`data` argument and a different ' + '`index` argument. `copy` must ' + 'be False.') else: length = len(index)
- [x] closes #19275 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19338
2018-01-22T03:32:00Z
2018-01-24T01:23:36Z
2018-01-24T01:23:35Z
2018-02-11T21:59:30Z
separate numeric tests so we can isolate division by zero
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 05ccb25960b1f..554b3e15d8f10 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -596,77 +596,81 @@ def test_divide_decimal(self): assert_series_equal(expected, s) - def test_div(self): + @pytest.mark.parametrize( + 'dtype2', + [ + np.int64, np.int32, np.int16, np.int8, + np.float64, np.float32, np.float16, + np.uint64, np.uint32, + np.uint16, np.uint8 + ]) + @pytest.mark.parametrize('dtype1', [np.int64, np.float64, np.uint64]) + def test_ser_div_ser(self, dtype1, dtype2): + # no longer do integer div for any ops, but deal with the 0's + first = Series([3, 4, 5, 8], name='first').astype(dtype1) + second = Series([0, 0, 0, 3], name='second').astype(dtype2) + with np.errstate(all='ignore'): - # no longer do integer div for any ops, but deal with the 0's - p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) - result = p['first'] / p['second'] - expected = Series( - p['first'].values.astype(float) / p['second'].values, - dtype='float64') - expected.iloc[0:3] = np.inf - assert_series_equal(result, expected) + expected = Series(first.values.astype(np.float64) / second.values, + dtype='float64', name=None) + expected.iloc[0:3] = np.inf - result = p['first'] / 0 - expected = Series(np.inf, index=p.index, name='first') - assert_series_equal(result, expected) + result = first / second + assert_series_equal(result, expected) + assert not result.equals(second / first) - p = p.astype('float64') - result = p['first'] / p['second'] - expected = Series(p['first'].values / p['second'].values) - assert_series_equal(result, expected) + def test_div_equiv_binop(self): + # Test Series.div as well as Series.__div__ + # float/integer issue + # GH#7785 + first = pd.Series([1, 0], name='first') + second = pd.Series([-0.01, -0.02], name='second') + expected = Series([-0.01, -np.inf]) - p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]}) - result = p['first'] / p['second'] - assert_series_equal(result, p['first'].astype('float64'), - check_names=False) - assert result.name is None - assert not result.equals(p['second'] / p['first']) - - # inf signing - s = Series([np.nan, 1., -1.]) - result = s / 0 - expected = Series([np.nan, np.inf, -np.inf]) - assert_series_equal(result, expected) + result = second.div(first) + assert_series_equal(result, expected, check_names=False) - # float/integer issue - # GH 7785 - p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)}) - expected = Series([-0.01, -np.inf]) + result = second / first + assert_series_equal(result, expected) - result = p['second'].div(p['first']) - assert_series_equal(result, expected, check_names=False) + def test_rdiv_zero_compat(self): + # GH#8674 + zero_array = np.array([0] * 5) + data = np.random.randn(5) + expected = pd.Series([0.] * 5) - result = p['second'] / p['first'] - assert_series_equal(result, expected) + result = zero_array / pd.Series(data) + assert_series_equal(result, expected) - # GH 9144 - s = Series([-1, 0, 1]) + result = pd.Series(zero_array) / data + assert_series_equal(result, expected) - result = 0 / s - expected = Series([0.0, nan, 0.0]) - assert_series_equal(result, expected) + result = pd.Series(zero_array) / pd.Series(data) + assert_series_equal(result, expected) - result = s / 0 - expected = Series([-inf, nan, inf]) - assert_series_equal(result, expected) + def test_div_zero_inf_signs(self): + # GH#9144, inf signing + ser = Series([-1, 0, 1], name='first') + expected = Series([-np.inf, np.nan, np.inf], name='first') - result = s // 0 - expected = Series([-inf, nan, inf]) - assert_series_equal(result, expected) + result = ser / 0 + assert_series_equal(result, expected) - # GH 8674 - zero_array = np.array([0] * 5) - data = np.random.randn(5) - expected = pd.Series([0.] * 5) - result = zero_array / pd.Series(data) - assert_series_equal(result, expected) + def test_rdiv_zero(self): + # GH#9144 + ser = Series([-1, 0, 1], name='first') + expected = Series([0.0, np.nan, 0.0], name='first') - result = pd.Series(zero_array) / data - assert_series_equal(result, expected) + result = 0 / ser + assert_series_equal(result, expected) - result = pd.Series(zero_array) / pd.Series(data) - assert_series_equal(result, expected) + def test_floordiv_div(self): + # GH#9144 + ser = Series([-1, 0, 1], name='first') + + result = ser // 0 + expected = Series([-inf, nan, inf], name='first') + assert_series_equal(result, expected) class TestTimedeltaSeriesArithmeticWithIntegers(object): @@ -1576,33 +1580,42 @@ def test_dt64_series_add_intlike(self, tz): class TestSeriesOperators(TestData): - def test_op_method(self): - def check(series, other, check_reverse=False): - simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow'] - if not compat.PY3: - simple_ops.append('div') - - for opname in simple_ops: - op = getattr(Series, opname) - - if op == 'div': - alt = operator.truediv - else: - alt = getattr(operator, opname) - - result = op(series, other) - expected = alt(series, other) - assert_almost_equal(result, expected) - if check_reverse: - rop = getattr(Series, "r" + opname) - result = rop(series, other) - expected = alt(other, series) - assert_almost_equal(result, expected) + @pytest.mark.parametrize( + 'ts', + [ + (lambda x: x, lambda x: x * 2, False), + (lambda x: x, lambda x: x[::2], False), + (lambda x: x, lambda x: 5, True), + (lambda x: tm.makeFloatSeries(), + lambda x: tm.makeFloatSeries(), + True) + ]) + @pytest.mark.parametrize('opname', ['add', 'sub', 'mul', 'floordiv', + 'truediv', 'div', 'pow']) + def test_op_method(self, opname, ts): + # check that Series.{opname} behaves like Series.__{opname}__, + series = ts[0](self.ts) + other = ts[1](self.ts) + check_reverse = ts[2] + + if opname == 'div' and compat.PY3: + pytest.skip('div test only for Py3') + + op = getattr(Series, opname) + + if op == 'div': + alt = operator.truediv + else: + alt = getattr(operator, opname) - check(self.ts, self.ts * 2) - check(self.ts, self.ts[::2]) - check(self.ts, 5, check_reverse=True) - check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True) + result = op(series, other) + expected = alt(series, other) + assert_almost_equal(result, expected) + if check_reverse: + rop = getattr(Series, "r" + opname) + result = rop(series, other) + expected = alt(other, series) + assert_almost_equal(result, expected) def test_neg(self): assert_series_equal(-self.series, -1 * self.series) @@ -1971,20 +1984,15 @@ def test_operators_corner(self): index=self.ts.index[:-5], name='ts') tm.assert_series_equal(added[:-5], expected) - def test_operators_reverse_object(self): + @pytest.mark.parametrize('op', [operator.add, operator.sub, operator.mul, + operator.truediv, operator.floordiv]) + def test_operators_reverse_object(self, op): # GH 56 arr = Series(np.random.randn(10), index=np.arange(10), dtype=object) - def _check_op(arr, op): - result = op(1., arr) - expected = op(1., arr.astype(float)) - assert_series_equal(result.astype(float), expected) - - _check_op(arr, operator.add) - _check_op(arr, operator.sub) - _check_op(arr, operator.mul) - _check_op(arr, operator.truediv) - _check_op(arr, operator.floordiv) + result = op(1., arr) + expected = op(1., arr.astype(float)) + assert_series_equal(result.astype(float), expected) def test_arith_ops_df_compat(self): # GH 1134
The upcoming fix(es) for #19322 are going to involve parametrizing a bunch of variants of division by zero. This separates out the existing test cases into method-specific tests, some of which will be parametrized in upcoming PRs. This PR does not change the aggregate contents of the tests.
https://api.github.com/repos/pandas-dev/pandas/pulls/19336
2018-01-22T00:21:09Z
2018-02-08T11:28:55Z
2018-02-08T11:28:55Z
2018-02-08T14:14:13Z
DOC: organize 0.23 bug fix whatsnew
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index fe5342c520196..ad0f4bdbcbac2 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -398,67 +398,82 @@ Bug Fixes ~~~~~~~~~ -Conversion -^^^^^^^^^^ +Datetimelike +^^^^^^^^^^^^ -- Bug in :class:`Index` constructor with ``dtype='uint64'`` where int-like floats were not coerced to :class:`UInt64Index` (:issue:`18400`) -- Bug in the :class:`DataFrame` constructor in which data containing very large positive or very large negative numbers was causing ``OverflowError`` (:issue:`18584`) -- Fixed a bug where creating a Series from an array that contains both tz-naive and tz-aware values will result in a Series whose dtype is tz-aware instead of object (:issue:`16406`) +- Bug in :func:`Series.__sub__` subtracting a non-nanosecond ``np.datetime64`` object from a ``Series`` gave incorrect results (:issue:`7996`) +- Bug in :class:`DatetimeIndex`, :class:`TimedeltaIndex` addition and subtraction of zero-dimensional integer arrays gave incorrect results (:issue:`19012`) +- Bug in :func:`Series.__add__` adding Series with dtype ``timedelta64[ns]`` to a timezone-aware ``DatetimeIndex`` incorrectly dropped timezone information (:issue:`13905`) +- Bug in :func:`Timedelta.__floordiv__` and :func:`Timedelta.__rfloordiv__` dividing by many incompatible numpy objects was incorrectly allowed (:issue:`18846`) - Adding a ``Period`` object to a ``datetime`` or ``Timestamp`` object will now correctly raise a ``TypeError`` (:issue:`17983`) -- Fixed a bug where ``FY5253`` date offsets could incorrectly raise an ``AssertionError`` in arithmetic operatons (:issue:`14774`) -- Bug in :meth:`Index.astype` with a categorical dtype where the resultant index is not converted to a :class:`CategoricalIndex` for all types of index (:issue:`18630`) -- Bug in :meth:`Series.astype` and ``Categorical.astype()`` where an existing categorical data does not get updated (:issue:`10696`, :issue:`18593`) -- Bug in :class:`Series` constructor with an int or float list where specifying ``dtype=str``, ``dtype='str'`` or ``dtype='U'`` failed to convert the data elements to strings (:issue:`16605`) - Bug in :class:`Timestamp` where comparison with an array of ``Timestamp`` objects would result in a ``RecursionError`` (:issue:`15183`) -- Bug in :class:`WeekOfMonth` and class:`Week` where addition and subtraction did not roll correctly (:issue:`18510`,:issue:`18672`,:issue:`18864`) -- Bug in :meth:`DatetimeIndex.astype` when converting between timezone aware dtypes, and converting from timezone aware to naive (:issue:`18951`) -- Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`) - Bug in :class:`DatetimeIndex` and :class:`TimedeltaIndex` where adding or subtracting an array-like of ``DateOffset`` objects either raised (``np.array``, ``pd.Index``) or broadcast incorrectly (``pd.Series``) (:issue:`18849`) - Bug in :class:`Series` floor-division where operating on a scalar ``timedelta`` raises an exception (:issue:`18846`) -- Bug in :class:`FY5253Quarter`, :class:`LastWeekOfMonth` where rollback and rollforward behavior was inconsistent with addition and subtraction behavior (:issue:`18854`) -- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) -- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) - Bug in :class:`Series`` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` had results cast to ``dtype='int64'`` (:issue:`17250`) - Bug in :class:`TimedeltaIndex` where division by a ``Series`` would return a ``TimedeltaIndex`` instead of a ``Series`` (issue:`19042`) - Bug in :class:`Series` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` could return a ``Series`` with an incorrect name (issue:`19043`) -- Fixed bug where comparing :class:`DatetimeIndex` failed to raise ``TypeError`` when attempting to compare timezone-aware and timezone-naive datetimelike objects (:issue:`18162`) - Bug in :class:`DatetimeIndex` where the repr was not showing high-precision time values at the end of a day (e.g., 23:59:59.999999999) (:issue:`19030`) - Bug where dividing a scalar timedelta-like object with :class:`TimedeltaIndex` performed the reciprocal operation (:issue:`19125`) -- Bug in :class:`WeekOfMonth` and :class:`LastWeekOfMonth` where default keyword arguments for constructor raised ``ValueError`` (:issue:`19142`) +- Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`) +- Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`) +- Bug in :func:`Series.truncate` which raises ``TypeError`` with a monotonic ``PeriodIndex`` (:issue:`17717`) + +Timezones +^^^^^^^^^ + +- Bug in creating a ``Series`` from an array that contains both tz-naive and tz-aware values will result in a ``Series`` whose dtype is tz-aware instead of object (:issue:`16406`) +- Bug in comparison of timezone-aware :class:`DatetimeIndex` against ``NaT`` incorrectly raising ``TypeError`` (:issue:`19276`) +- Bug in :meth:`DatetimeIndex.astype` when converting between timezone aware dtypes, and converting from timezone aware to naive (:issue:`18951`) +- Bug in comparing :class:`DatetimeIndex`, which failed to raise ``TypeError`` when attempting to compare timezone-aware and timezone-naive datetimelike objects (:issue:`18162`) - Bug in localization of a naive, datetime string in a ``Series`` constructor with a ``datetime64[ns, tz]`` dtype (:issue:`174151`) - :func:`Timestamp.replace` will now handle Daylight Savings transitions gracefully (:issue:`18319`) -- Bug in :class:`Index` multiplication and division methods where operating with a ``Series`` would return an ``Index`` object instead of a ``Series`` object (:issue:`19042`) +- Bug in tz-aware :class:`DatetimeIndex` where addition/subtraction with a :class:`TimedeltaIndex` or array with ``dtype='timedelta64[ns]'`` was incorrect (:issue:`17558`) +- Bug in :func:`DatetimeIndex.insert` where inserting ``NaT`` into a timezone-aware index incorrectly raised (:issue:`16357`) + +Offsets +^^^^^^^ + +- Bug in :class:`WeekOfMonth` and class:`Week` where addition and subtraction did not roll correctly (:issue:`18510`,:issue:`18672`,:issue:`18864`) +- Bug in :class:`WeekOfMonth` and :class:`LastWeekOfMonth` where default keyword arguments for constructor raised ``ValueError`` (:issue:`19142`) +- Bug in :class:`FY5253Quarter`, :class:`LastWeekOfMonth` where rollback and rollforward behavior was inconsistent with addition and subtraction behavior (:issue:`18854`) +- Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`) +- Bug in :class:`FY5253` where date offsets could incorrectly raise an ``AssertionError`` in arithmetic operatons (:issue:`14774`) +Numeric +^^^^^^^ +- Bug in :class:`Series` constructor with an int or float list where specifying ``dtype=str``, ``dtype='str'`` or ``dtype='U'`` failed to convert the data elements to strings (:issue:`16605`) +- Bug in :class:`Index` multiplication and division methods where operating with a ``Series`` would return an ``Index`` object instead of a ``Series`` object (:issue:`19042`) +- Bug in the :class:`DataFrame` constructor in which data containing very large positive or very large negative numbers was causing ``OverflowError`` (:issue:`18584`) +- Bug in :class:`Index` constructor with ``dtype='uint64'`` where int-like floats were not coerced to :class:`UInt64Index` (:issue:`18400`) + - -- Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`) -- Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`) -- Bug in comparison of timezone-aware :class:`DatetimeIndex` against ``NaT`` incorrectly raising ``TypeError`` (:issue:`19276`) + Indexing ^^^^^^^^ -- Bug in :func:`Series.truncate` which raises ``TypeError`` with a monotonic ``PeriodIndex`` (:issue:`17717`) -- Bug in :func:`DataFrame.groupby` where tuples were interpreted as lists of keys rather than as keys (:issue:`17979`, :issue:`18249`) -- Bug in :func:`MultiIndex.get_level_values` which would return an invalid index on level of ints with missing values (:issue:`17924`) -- Bug in :func:`MultiIndex.remove_unused_levels` which would fill nan values (:issue:`18417`) -- Bug in :func:`MultiIndex.from_tuples`` which would fail to take zipped tuples in python3 (:issue:`18434`) - Bug in :class:`Index` construction from list of mixed type tuples (:issue:`18505`) - Bug in :func:`Index.drop` when passing a list of both tuples and non-tuples (:issue:`18304`) -- Bug in :class:`IntervalIndex` where empty and purely NA data was constructed inconsistently depending on the construction method (:issue:`18421`) -- Bug in :func:`IntervalIndex.symmetric_difference` where the symmetric difference with a non-``IntervalIndex`` did not raise (:issue:`18475`) +- Bug in :meth:`~DataFrame.drop`, :meth:`~Panel.drop`, :meth:`~Series.drop`, :meth:`~Index.drop` where no ``KeyError`` is raised when dropping a non-existent element from an axis that contains duplicates (:issue:`19186`) - Bug in indexing a datetimelike ``Index`` that raised ``ValueError`` instead of ``IndexError`` (:issue:`18386`). -- Bug in tz-aware :class:`DatetimeIndex` where addition/subtraction with a :class:`TimedeltaIndex` or array with ``dtype='timedelta64[ns]'`` was incorrect (:issue:`17558`) - :func:`Index.to_series` now accepts ``index`` and ``name`` kwargs (:issue:`18699`) - :func:`DatetimeIndex.to_series` now accepts ``index`` and ``name`` kwargs (:issue:`18699`) - Bug in indexing non-scalar value from ``Series`` having non-unique ``Index`` will return value flattened (:issue:`17610`) -- Bug in :func:`DatetimeIndex.insert` where inserting ``NaT`` into a timezone-aware index incorrectly raised (:issue:`16357`) - Bug in ``__setitem__`` when indexing a :class:`DataFrame` with a 2-d boolean ndarray (:issue:`18582`) -- Bug in :func:`MultiIndex.__contains__` where non-tuple keys would return ``True`` even if they had been dropped (:issue:`19027`) -- Bug in :func:`MultiIndex.set_labels` which would cause casting (and potentially clipping) of the new labels if the ``level`` argument is not 0 or a list like [0, 1, ... ] (:issue:`19057`) - Bug in ``str.extractall`` when there were no matches empty :class:`Index` was returned instead of appropriate :class:`MultiIndex` (:issue:`19034`) +- Bug in :class:`IntervalIndex` where empty and purely NA data was constructed inconsistently depending on the construction method (:issue:`18421`) +- Bug in :func:`IntervalIndex.symmetric_difference` where the symmetric difference with a non-``IntervalIndex`` did not raise (:issue:`18475`) - Bug in :class:`IntervalIndex` where set operations that returned an empty ``IntervalIndex`` had the wrong dtype (:issue:`19101`) -- Bug in :meth:`~DataFrame.drop`, :meth:`~Panel.drop`, :meth:`~Series.drop`, :meth:`~Index.drop` where no ``KeyError`` is raised when dropping a non-existent element from an axis that contains duplicates (:issue:`19186`) + +MultiIndex +^^^^^^^^^^ + +- Bug in :func:`MultiIndex.__contains__` where non-tuple keys would return ``True`` even if they had been dropped (:issue:`19027`) +- Bug in :func:`MultiIndex.set_labels` which would cause casting (and potentially clipping) of the new labels if the ``level`` argument is not 0 or a list like [0, 1, ... ] (:issue:`19057`) +- Bug in :func:`MultiIndex.get_level_values` which would return an invalid index on level of ints with missing values (:issue:`17924`) +- Bug in :func:`MultiIndex.remove_unused_levels` which would fill nan values (:issue:`18417`) +- Bug in :func:`MultiIndex.from_tuples`` which would fail to take zipped tuples in python3 (:issue:`18434`) - I/O @@ -488,6 +503,7 @@ Groupby/Resample/Rolling - Bug when grouping by a single column and aggregating with a class like ``list`` or ``tuple`` (:issue:`18079`) - Fixed regression in :func:`DataFrame.groupby` which would not emit an error when called with a tuple key not in the index (:issue:`18798`) - Bug in :func:`DataFrame.resample` which silently ignored unsupported (or mistyped) options for ``label``, ``closed`` and ``convention`` (:issue:`19303`) +- Bug in :func:`DataFrame.groupby` where tuples were interpreted as lists of keys rather than as keys (:issue:`17979`, :issue:`18249`) - Sparse @@ -512,14 +528,6 @@ Reshaping - Bug in timezone comparisons, manifesting as a conversion of the index to UTC in ``.concat()`` (:issue:`18523`) - -Numeric -^^^^^^^ - -- Bug in :func:`Series.__sub__` subtracting a non-nanosecond ``np.datetime64`` object from a ``Series`` gave incorrect results (:issue:`7996`) -- Bug in :class:`DatetimeIndex`, :class:`TimedeltaIndex` addition and subtraction of zero-dimensional integer arrays gave incorrect results (:issue:`19012`) -- Bug in :func:`Series.__add__` adding Series with dtype ``timedelta64[ns]`` to a timezone-aware ``DatetimeIndex`` incorrectly dropped timezone information (:issue:`13905`) -- Bug in :func:`Timedelta.__floordiv__` and :func:`Timedelta.__rfloordiv__` dividing by many incompatible numpy objects was incorrectly allowed (:issue:`18846`) -- Categorical ^^^^^^^^^^^ @@ -529,6 +537,9 @@ Categorical when all the categoricals had the same categories, but in a different order. This affected :func:`pandas.concat` with Categorical data (:issue:`19096`). - Bug in ``Categorical.equals`` between two unordered categories with the same categories, but in a different order (:issue:`16603`) +- Bug in :meth:`Index.astype` with a categorical dtype where the resultant index is not converted to a :class:`CategoricalIndex` for all types of index (:issue:`18630`) +- Bug in :meth:`Series.astype` and ``Categorical.astype()`` where an existing categorical data does not get updated (:issue:`10696`, :issue:`18593`) +- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) - Other
https://api.github.com/repos/pandas-dev/pandas/pulls/19335
2018-01-21T22:27:38Z
2018-01-21T22:28:07Z
2018-01-21T22:28:07Z
2018-01-21T22:28:07Z
Split test_ops_compat into targeted tests
diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 1a6aabc2f258f..ef6523a9eb270 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -442,39 +442,36 @@ def test_tdi_floordiv_timedelta_scalar(self, scalar_td): res = tdi // (scalar_td) tm.assert_index_equal(res, expected) - # TODO: Split by operation, better name - def test_ops_compat(self): + def test_tdi_floordiv_tdlike_scalar(self, delta): + tdi = timedelta_range('1 days', '10 days', name='foo') + expected = Int64Index((np.arange(10) + 1) * 12, name='foo') - offsets = [pd.offsets.Hour(2), timedelta(hours=2), - np.timedelta64(2, 'h'), Timedelta(hours=2)] + result = tdi // delta + tm.assert_index_equal(result, expected, exact=False) + def test_tdi_mul_tdlike_scalar_raises(self, delta): rng = timedelta_range('1 days', '10 days', name='foo') + with pytest.raises(TypeError): + rng * delta - # multiply - for offset in offsets: - pytest.raises(TypeError, lambda: rng * offset) + def test_tdi_div_nat_raises(self): + # don't allow division by NaT (make could in the future) + rng = timedelta_range('1 days', '10 days', name='foo') + with pytest.raises(TypeError): + rng / pd.NaT - # divide + def test_tdi_div_tdlike_scalar(self, delta): + rng = timedelta_range('1 days', '10 days', name='foo') expected = Int64Index((np.arange(10) + 1) * 12, name='foo') - for offset in offsets: - result = rng / offset - tm.assert_index_equal(result, expected, exact=False) - # floor divide - expected = Int64Index((np.arange(10) + 1) * 12, name='foo') - for offset in offsets: - result = rng // offset - tm.assert_index_equal(result, expected, exact=False) + result = rng / delta + tm.assert_index_equal(result, expected, exact=False) - # divide with nats + def test_tdi_div_tdlike_scalar_with_nat(self, delta): rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo') expected = Float64Index([12, np.nan, 24], name='foo') - for offset in offsets: - result = rng / offset - tm.assert_index_equal(result, expected) - - # don't allow division by NaT (make could in the future) - pytest.raises(TypeError, lambda: rng / pd.NaT) + result = rng / delta + tm.assert_index_equal(result, expected) def test_subtraction_ops(self): # with datetimes/timedelta and tdi/dti
This and a couple other test-organizing PRs are being split off of a WIP branch to fix division-by-zero consistency. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19334
2018-01-21T22:05:37Z
2018-01-22T11:14:47Z
2018-01-22T11:14:47Z
2018-02-11T21:58:49Z
Fix Index __mul__-like ops with timedelta scalars
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a4b943f995a33..7e03449f2ac57 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -744,6 +744,7 @@ Timedelta - Bug in :func:`Timedelta.__floordiv__`, :func:`Timedelta.__rfloordiv__` where operating with a ``Tick`` object would raise a ``TypeError`` instead of returning a numeric value (:issue:`19738`) - Bug in :func:`Period.asfreq` where periods near ``datetime(1, 1, 1)`` could be converted incorrectly (:issue:`19643`) - Bug in :func:`Timedelta.total_seconds()` causing precision errors i.e. ``Timedelta('30S').total_seconds()==30.000000000000004`` (:issue:`19458`) +- Multiplication of :class:`TimedeltaIndex` by ``TimedeltaIndex`` will now raise ``TypeError`` instead of raising ``ValueError`` in cases of length mis-match (:issue`19333`) - Timezones @@ -778,6 +779,7 @@ Numeric - Bug in the :class:`DataFrame` constructor in which data containing very large positive or very large negative numbers was causing ``OverflowError`` (:issue:`18584`) - Bug in :class:`Index` constructor with ``dtype='uint64'`` where int-like floats were not coerced to :class:`UInt64Index` (:issue:`18400`) - Bug in :class:`DataFrame` flex arithmetic (e.g. ``df.add(other, fill_value=foo)``) with a ``fill_value`` other than ``None`` failed to raise ``NotImplementedError`` in corner cases where either the frame or ``other`` has length zero (:issue:`19522`) +- Multiplication and division of numeric-dtyped :class:`Index` objects with timedelta-like scalars returns ``TimedeltaIndex`` instead of raising ``TypeError`` (:issue:`19333`) Indexing diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 7dfa34bd634ad..59fe4bba649d3 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -5,7 +5,7 @@ import numpy as np from pandas._libs import (lib, index as libindex, tslib as libts, algos as libalgos, join as libjoin, - Timestamp) + Timestamp, Timedelta) from pandas._libs.lib import is_datetime_array from pandas.compat import range, u, set_function_name @@ -16,7 +16,7 @@ from pandas.core.dtypes.generic import ( ABCSeries, ABCDataFrame, ABCMultiIndex, - ABCPeriodIndex, + ABCPeriodIndex, ABCTimedeltaIndex, ABCDateOffset) from pandas.core.dtypes.missing import isna, array_equivalent from pandas.core.dtypes.common import ( @@ -3918,7 +3918,21 @@ def dropna(self, how='any'): return self._shallow_copy() def _evaluate_with_timedelta_like(self, other, op, opstr, reversed=False): - raise TypeError("can only perform ops with timedelta like values") + # Timedelta knows how to operate with np.array, so dispatch to that + # operation and then wrap the results + other = Timedelta(other) + values = self.values + if reversed: + values, other = other, values + + with np.errstate(all='ignore'): + result = op(values, other) + + attrs = self._get_attributes_dict() + attrs = self._maybe_update_attributes(attrs) + if op == divmod: + return Index(result[0], **attrs), Index(result[1], **attrs) + return Index(result, **attrs) def _evaluate_with_datetime_like(self, other, op, opstr): raise TypeError("can only perform ops with datetime like values") @@ -4061,6 +4075,9 @@ def _make_evaluate_binop(op, opstr, reversed=False, constructor=Index): def _evaluate_numeric_binop(self, other): if isinstance(other, (ABCSeries, ABCDataFrame)): return NotImplemented + elif isinstance(other, ABCTimedeltaIndex): + # Defer to subclass implementation + return NotImplemented other = self._validate_for_numeric_binop(other, op, opstr) diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 0ed92a67c7e14..0ac415ee0b701 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -1,5 +1,6 @@ from sys import getsizeof import operator +from datetime import timedelta import numpy as np from pandas._libs import index as libindex @@ -8,7 +9,7 @@ is_integer, is_scalar, is_int64_dtype) -from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.generic import ABCSeries, ABCTimedeltaIndex from pandas import compat from pandas.compat import lrange, range, get_range_parameters @@ -587,6 +588,15 @@ def _make_evaluate_binop(op, opstr, reversed=False, step=False): def _evaluate_numeric_binop(self, other): if isinstance(other, ABCSeries): return NotImplemented + elif isinstance(other, ABCTimedeltaIndex): + # Defer to TimedeltaIndex implementation + return NotImplemented + elif isinstance(other, (timedelta, np.timedelta64)): + # GH#19333 is_integer evaluated True on timedelta64, + # so we need to catch these explicitly + if reversed: + return op(other, self._int64index) + return op(self._int64index, other) other = self._validate_for_numeric_binop(other, op, opstr) attrs = self._get_attributes_dict() diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index c6883df7ee91a..bafb6ae2e45f4 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -13,7 +13,7 @@ import pandas.util.testing as tm import pandas as pd -from pandas._libs.tslib import Timestamp +from pandas._libs.tslib import Timestamp, Timedelta from pandas.tests.indexes.common import Base @@ -26,6 +26,42 @@ def full_like(array, value): return ret +class TestIndexArithmeticWithTimedeltaScalar(object): + + @pytest.mark.parametrize('index', [ + Int64Index(range(1, 11)), + UInt64Index(range(1, 11)), + Float64Index(range(1, 11)), + RangeIndex(1, 11)]) + @pytest.mark.parametrize('scalar_td', [Timedelta(days=1), + Timedelta(days=1).to_timedelta64(), + Timedelta(days=1).to_pytimedelta()]) + def test_index_mul_timedelta(self, scalar_td, index): + # GH#19333 + expected = pd.timedelta_range('1 days', '10 days') + + result = index * scalar_td + tm.assert_index_equal(result, expected) + commute = scalar_td * index + tm.assert_index_equal(commute, expected) + + @pytest.mark.parametrize('index', [Int64Index(range(1, 3)), + UInt64Index(range(1, 3)), + Float64Index(range(1, 3)), + RangeIndex(1, 3)]) + @pytest.mark.parametrize('scalar_td', [Timedelta(days=1), + Timedelta(days=1).to_timedelta64(), + Timedelta(days=1).to_pytimedelta()]) + def test_index_rdiv_timedelta(self, scalar_td, index): + expected = pd.TimedeltaIndex(['1 Day', '12 Hours']) + + result = scalar_td / index + tm.assert_index_equal(result, expected) + + with pytest.raises(TypeError): + index / scalar_td + + class Numeric(Base): def test_numeric_compat(self): diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 3dc60ed33b958..c6e5b477a2a06 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -351,7 +351,7 @@ def test_dti_mul_dti_raises(self): def test_dti_mul_too_short_raises(self): idx = self._holder(np.arange(5, dtype='int64')) - with pytest.raises(ValueError): + with pytest.raises(TypeError): idx * self._holder(np.arange(3)) with pytest.raises(ValueError): idx * np.array([1, 2]) @@ -527,6 +527,20 @@ def test_tdi_div_tdlike_scalar_with_nat(self, delta): result = rng / delta tm.assert_index_equal(result, expected) + @pytest.mark.parametrize('other', [np.arange(1, 11), + pd.Int64Index(range(1, 11)), + pd.UInt64Index(range(1, 11)), + pd.Float64Index(range(1, 11)), + pd.RangeIndex(1, 11)]) + def test_tdi_rmul_arraylike(self, other): + tdi = TimedeltaIndex(['1 Day'] * 10) + expected = timedelta_range('1 days', '10 days') + + result = other * tdi + tm.assert_index_equal(result, expected) + commute = tdi * other + tm.assert_index_equal(commute, expected) + def test_subtraction_ops(self): # with datetimes/timedelta and tdi/dti tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
Fixes the following current behavior for each of the numeric index classes and each of the basic timedelta-like scalars: ``` idx = pd.Index(range(3)) td = pd.Timedelta(days=1) >>> idx * td TypeError: can only perform ops with timedelta like values >>> td * idx TypeError: can only perform ops with timedelta like values >>> td / idx TypeError: can only perform ops with timedelta like values >>> td // idx TypeError: can only perform ops with timedelta like values ``` - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19333
2018-01-21T21:46:55Z
2018-02-22T01:37:34Z
2018-02-22T01:37:34Z
2018-02-22T05:45:23Z
Refactor test_parquet.py to use check_round_trip at module level
diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index d472a5ed23c75..8a6a22abe23fa 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -110,48 +110,79 @@ def df_full(): pd.Timestamp('20130103')]}) -def test_invalid_engine(df_compat): +def check_round_trip(df, engine=None, path=None, + write_kwargs=None, read_kwargs=None, + expected=None, check_names=True, + repeat=2): + """Verify parquet serializer and deserializer produce the same results. + + Performs a pandas to disk and disk to pandas round trip, + then compares the 2 resulting DataFrames to verify equality. + + Parameters + ---------- + df: Dataframe + engine: str, optional + 'pyarrow' or 'fastparquet' + path: str, optional + write_kwargs: dict of str:str, optional + read_kwargs: dict of str:str, optional + expected: DataFrame, optional + Expected deserialization result, otherwise will be equal to `df` + check_names: list of str, optional + Closed set of column names to be compared + repeat: int, optional + How many times to repeat the test + """ + + write_kwargs = write_kwargs or {'compression': None} + read_kwargs = read_kwargs or {} + + if expected is None: + expected = df + + if engine: + write_kwargs['engine'] = engine + read_kwargs['engine'] = engine + + def compare(repeat): + for _ in range(repeat): + df.to_parquet(path, **write_kwargs) + actual = read_parquet(path, **read_kwargs) + tm.assert_frame_equal(expected, actual, + check_names=check_names) + + if path is None: + with tm.ensure_clean() as path: + compare(repeat) + else: + compare(repeat) + +def test_invalid_engine(df_compat): with pytest.raises(ValueError): - df_compat.to_parquet('foo', 'bar') + check_round_trip(df_compat, 'foo', 'bar') def test_options_py(df_compat, pa): # use the set option - df = df_compat - with tm.ensure_clean() as path: - - with pd.option_context('io.parquet.engine', 'pyarrow'): - df.to_parquet(path) - - result = read_parquet(path) - tm.assert_frame_equal(result, df) + with pd.option_context('io.parquet.engine', 'pyarrow'): + check_round_trip(df_compat) def test_options_fp(df_compat, fp): # use the set option - df = df_compat - with tm.ensure_clean() as path: - - with pd.option_context('io.parquet.engine', 'fastparquet'): - df.to_parquet(path, compression=None) - - result = read_parquet(path) - tm.assert_frame_equal(result, df) + with pd.option_context('io.parquet.engine', 'fastparquet'): + check_round_trip(df_compat) def test_options_auto(df_compat, fp, pa): + # use the set option - df = df_compat - with tm.ensure_clean() as path: - - with pd.option_context('io.parquet.engine', 'auto'): - df.to_parquet(path) - - result = read_parquet(path) - tm.assert_frame_equal(result, df) + with pd.option_context('io.parquet.engine', 'auto'): + check_round_trip(df_compat) def test_options_get_engine(fp, pa): @@ -228,53 +259,23 @@ def check_error_on_write(self, df, engine, exc): with tm.ensure_clean() as path: to_parquet(df, path, engine, compression=None) - def check_round_trip(self, df, engine, expected=None, path=None, - write_kwargs=None, read_kwargs=None, - check_names=True): - - if write_kwargs is None: - write_kwargs = {'compression': None} - - if read_kwargs is None: - read_kwargs = {} - - if expected is None: - expected = df - - if path is None: - with tm.ensure_clean() as path: - check_round_trip_equals(df, path, engine, - write_kwargs=write_kwargs, - read_kwargs=read_kwargs, - expected=expected, - check_names=check_names) - else: - check_round_trip_equals(df, path, engine, - write_kwargs=write_kwargs, - read_kwargs=read_kwargs, - expected=expected, - check_names=check_names) - class TestBasic(Base): def test_error(self, engine): - for obj in [pd.Series([1, 2, 3]), 1, 'foo', pd.Timestamp('20130101'), np.array([1, 2, 3])]: self.check_error_on_write(obj, engine, ValueError) def test_columns_dtypes(self, engine): - df = pd.DataFrame({'string': list('abc'), 'int': list(range(1, 4))}) # unicode df.columns = [u'foo', u'bar'] - self.check_round_trip(df, engine) + check_round_trip(df, engine) def test_columns_dtypes_invalid(self, engine): - df = pd.DataFrame({'string': list('abc'), 'int': list(range(1, 4))}) @@ -302,8 +303,7 @@ def test_compression(self, engine, compression): pytest.importorskip('brotli') df = pd.DataFrame({'A': [1, 2, 3]}) - self.check_round_trip(df, engine, - write_kwargs={'compression': compression}) + check_round_trip(df, engine, write_kwargs={'compression': compression}) def test_read_columns(self, engine): # GH18154 @@ -311,8 +311,8 @@ def test_read_columns(self, engine): 'int': list(range(1, 4))}) expected = pd.DataFrame({'string': list('abc')}) - self.check_round_trip(df, engine, expected=expected, - read_kwargs={'columns': ['string']}) + check_round_trip(df, engine, expected=expected, + read_kwargs={'columns': ['string']}) def test_write_index(self, engine): check_names = engine != 'fastparquet' @@ -323,7 +323,7 @@ def test_write_index(self, engine): pytest.skip("pyarrow is < 0.7.0") df = pd.DataFrame({'A': [1, 2, 3]}) - self.check_round_trip(df, engine) + check_round_trip(df, engine) indexes = [ [2, 3, 4], @@ -334,12 +334,12 @@ def test_write_index(self, engine): # non-default index for index in indexes: df.index = index - self.check_round_trip(df, engine, check_names=check_names) + check_round_trip(df, engine, check_names=check_names) # index with meta-data df.index = [0, 1, 2] df.index.name = 'foo' - self.check_round_trip(df, engine) + check_round_trip(df, engine) def test_write_multiindex(self, pa_ge_070): # Not suppoprted in fastparquet as of 0.1.3 or older pyarrow version @@ -348,7 +348,7 @@ def test_write_multiindex(self, pa_ge_070): df = pd.DataFrame({'A': [1, 2, 3]}) index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]) df.index = index - self.check_round_trip(df, engine) + check_round_trip(df, engine) def test_write_column_multiindex(self, engine): # column multi-index @@ -357,7 +357,6 @@ def test_write_column_multiindex(self, engine): self.check_error_on_write(df, engine, ValueError) def test_multiindex_with_columns(self, pa_ge_070): - engine = pa_ge_070 dates = pd.date_range('01-Jan-2018', '01-Dec-2018', freq='MS') df = pd.DataFrame(np.random.randn(2 * len(dates), 3), @@ -368,14 +367,10 @@ def test_multiindex_with_columns(self, pa_ge_070): index2 = index1.copy(names=None) for index in [index1, index2]: df.index = index - with tm.ensure_clean() as path: - df.to_parquet(path, engine) - result = read_parquet(path, engine) - expected = df - tm.assert_frame_equal(result, expected) - result = read_parquet(path, engine, columns=['A', 'B']) - expected = df[['A', 'B']] - tm.assert_frame_equal(result, expected) + + check_round_trip(df, engine) + check_round_trip(df, engine, read_kwargs={'columns': ['A', 'B']}, + expected=df[['A', 'B']]) class TestParquetPyArrow(Base): @@ -391,7 +386,7 @@ def test_basic(self, pa, df_full): tz='Europe/Brussels') df['bool_with_none'] = [True, None, True] - self.check_round_trip(df, pa) + check_round_trip(df, pa) @pytest.mark.xfail(reason="pyarrow fails on this (ARROW-1883)") def test_basic_subset_columns(self, pa, df_full): @@ -402,8 +397,8 @@ def test_basic_subset_columns(self, pa, df_full): df['datetime_tz'] = pd.date_range('20130101', periods=3, tz='Europe/Brussels') - self.check_round_trip(df, pa, expected=df[['string', 'int']], - read_kwargs={'columns': ['string', 'int']}) + check_round_trip(df, pa, expected=df[['string', 'int']], + read_kwargs={'columns': ['string', 'int']}) def test_duplicate_columns(self, pa): # not currently able to handle duplicate columns @@ -433,7 +428,7 @@ def test_categorical(self, pa_ge_070): # de-serialized as object expected = df.assign(a=df.a.astype(object)) - self.check_round_trip(df, pa, expected) + check_round_trip(df, pa, expected=expected) def test_categorical_unsupported(self, pa_lt_070): pa = pa_lt_070 @@ -444,20 +439,19 @@ def test_categorical_unsupported(self, pa_lt_070): def test_s3_roundtrip(self, df_compat, s3_resource, pa): # GH #19134 - self.check_round_trip(df_compat, pa, - path='s3://pandas-test/pyarrow.parquet') + check_round_trip(df_compat, pa, + path='s3://pandas-test/pyarrow.parquet') class TestParquetFastParquet(Base): def test_basic(self, fp, df_full): - df = df_full # additional supported types for fastparquet df['timedelta'] = pd.timedelta_range('1 day', periods=3) - self.check_round_trip(df, fp) + check_round_trip(df, fp) @pytest.mark.skip(reason="not supported") def test_duplicate_columns(self, fp): @@ -470,7 +464,7 @@ def test_duplicate_columns(self, fp): def test_bool_with_none(self, fp): df = pd.DataFrame({'a': [True, None, False]}) expected = pd.DataFrame({'a': [1.0, np.nan, 0.0]}, dtype='float16') - self.check_round_trip(df, fp, expected=expected) + check_round_trip(df, fp, expected=expected) def test_unsupported(self, fp): @@ -486,7 +480,7 @@ def test_categorical(self, fp): if LooseVersion(fastparquet.__version__) < LooseVersion("0.1.3"): pytest.skip("CategoricalDtype not supported for older fp") df = pd.DataFrame({'a': pd.Categorical(list('abc'))}) - self.check_round_trip(df, fp) + check_round_trip(df, fp) def test_datetime_tz(self, fp): # doesn't preserve tz @@ -495,7 +489,7 @@ def test_datetime_tz(self, fp): # warns on the coercion with catch_warnings(record=True): - self.check_round_trip(df, fp, df.astype('datetime64[ns]')) + check_round_trip(df, fp, expected=df.astype('datetime64[ns]')) def test_filter_row_groups(self, fp): d = {'a': list(range(0, 3))} @@ -508,5 +502,5 @@ def test_filter_row_groups(self, fp): def test_s3_roundtrip(self, df_compat, s3_resource, fp): # GH #19134 - self.check_round_trip(df_compat, fp, - path='s3://pandas-test/fastparquet.parquet') + check_round_trip(df_compat, fp, + path='s3://pandas-test/fastparquet.parquet')
Refactoring and unification of testing approach in `test_parquet.py` module. Iteration upon work that was done for https://github.com/pandas-dev/pandas/pull/19135#issuecomment-358161165 - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] tests added / passed
https://api.github.com/repos/pandas-dev/pandas/pulls/19332
2018-01-21T19:39:38Z
2018-01-23T11:36:51Z
2018-01-23T11:36:51Z
2018-05-16T05:07:38Z
TST: Clean up json/test_compression.py
diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index a83ec53904b28..2cf4c435bdc12 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -1,38 +1,10 @@ import pytest import pandas as pd -from pandas import compat import pandas.util.testing as tm -import pandas.util._test_decorators as td from pandas.util.testing import assert_frame_equal, assert_raises_regex -COMPRESSION_TYPES = [None, 'bz2', 'gzip', - pytest.param('xz', marks=td.skip_if_no_lzma)] - - -def decompress_file(path, compression): - if compression is None: - f = open(path, 'rb') - elif compression == 'gzip': - import gzip - f = gzip.GzipFile(path, 'rb') - elif compression == 'bz2': - import bz2 - f = bz2.BZ2File(path, 'rb') - elif compression == 'xz': - lzma = compat.import_lzma() - f = lzma.open(path, 'rb') - else: - msg = 'Unrecognized compression type: {}'.format(compression) - raise ValueError(msg) - - result = f.read().decode('utf8') - f.close() - return result - - -@pytest.mark.parametrize('compression', COMPRESSION_TYPES) def test_compression_roundtrip(compression): df = pd.DataFrame([[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], @@ -43,8 +15,9 @@ def test_compression_roundtrip(compression): assert_frame_equal(df, pd.read_json(path, compression=compression)) # explicitly ensure file was compressed. - uncompressed_content = decompress_file(path, compression) - assert_frame_equal(df, pd.read_json(uncompressed_content)) + with tm.decompress_file(path, compression) as fh: + result = fh.read().decode('utf8') + assert_frame_equal(df, pd.read_json(result)) def test_compress_zip_value_error(): @@ -67,7 +40,6 @@ def test_read_zipped_json(): assert_frame_equal(uncompressed_df, compressed_df) -@pytest.mark.parametrize('compression', COMPRESSION_TYPES) def test_with_s3_url(compression): boto3 = pytest.importorskip('boto3') pytest.importorskip('s3fs') @@ -88,7 +60,6 @@ def test_with_s3_url(compression): assert_frame_equal(df, roundtripped_df) -@pytest.mark.parametrize('compression', COMPRESSION_TYPES) def test_lines_with_compression(compression): with tm.ensure_clean() as path: df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}') @@ -98,7 +69,6 @@ def test_lines_with_compression(compression): assert_frame_equal(df, roundtripped_df) -@pytest.mark.parametrize('compression', COMPRESSION_TYPES) def test_chunksize_with_compression(compression): with tm.ensure_clean() as path: df = pd.read_json('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}')
xref #19226 Use new ``decompress_file`` utility and compression fixture to clean up json compression tests
https://api.github.com/repos/pandas-dev/pandas/pulls/19331
2018-01-21T15:50:00Z
2018-01-21T17:50:06Z
2018-01-21T17:50:06Z
2018-01-22T19:51:27Z
BUG: DatetimeIndex(tz) & single column name, return empty df (GH19157)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 4dde76dee46a5..3016b0490873b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -431,6 +431,7 @@ Timezones - :func:`Timestamp.replace` will now handle Daylight Savings transitions gracefully (:issue:`18319`) - Bug in tz-aware :class:`DatetimeIndex` where addition/subtraction with a :class:`TimedeltaIndex` or array with ``dtype='timedelta64[ns]'`` was incorrect (:issue:`17558`) - Bug in :func:`DatetimeIndex.insert` where inserting ``NaT`` into a timezone-aware index incorrectly raised (:issue:`16357`) +- Bug in the :class:`DataFrame` constructor, where tz-aware Datetimeindex and a given column name will result in an empty ``DataFrame`` (:issue:`19157`) Offsets ^^^^^^^ diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 847779b1747cf..7328cd336babf 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -512,7 +512,11 @@ def _get_axes(N, K, index=index, columns=columns): return _arrays_to_mgr([values], columns, index, columns, dtype=dtype) elif is_datetimetz(values): - return self._init_dict({0: values}, index, columns, dtype=dtype) + # GH19157 + if columns is None: + columns = [0] + return _arrays_to_mgr([values], columns, index, columns, + dtype=dtype) # by definition an array here # the dtypes will be coerced to a single dtype diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index b7d3a60ecf6e4..8b57e96e6fa06 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -2092,3 +2092,14 @@ def test_frame_timeseries_to_records(self): result['index'].dtype == 'M8[ns]' result = df.to_records(index=False) + + def test_frame_timeseries_column(self): + # GH19157 + dr = date_range(start='20130101T10:00:00', periods=3, freq='T', + tz='US/Eastern') + result = DataFrame(dr, columns=['timestamps']) + expected = DataFrame({'timestamps': [ + Timestamp('20130101T10:00:00', tz='US/Eastern'), + Timestamp('20130101T10:01:00', tz='US/Eastern'), + Timestamp('20130101T10:02:00', tz='US/Eastern')]}) + tm.assert_frame_equal(result, expected)
- [x] closes #19157 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This issue is due to self._init_dict({0: values}, index, columns, dtype=dtype) will call for the filtering if columns passed (`data = {k: v for k, v in compat.iteritems(data) if k in columns}`), see function ``_init_dict`` of [frame.py](https://github.com/pandas-dev/pandas/blob/master/pandas/core/frame.py) So `self._init_dict({0: values}, index, columns, dtype=dtype)` expects the column name of values as '0', but since we pass a column with a different name, upon filtering it will result in an empty DataFrame. My solution assumes that the conversion of a series of DatetimeIndex with tz_info. Hence, we will initialize a DataFrame according to the given column name. If no column name specified, index '0' is chosen. I introduced an assertion to warn the users if multiple column names are passed. Update 27-01-2018: The updated PR includes a test, and update on whatsnew entry. The revised solution uses ``_arrays_to_mgr`` instead, such that a default column name 0 is specified if ``columns`` not specified.
https://api.github.com/repos/pandas-dev/pandas/pulls/19330
2018-01-21T13:57:58Z
2018-01-27T16:55:24Z
2018-01-27T16:55:23Z
2018-01-27T16:55:48Z
Change IntervalIndex set-ops error code type
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 86fc47dee09fc..cf3c3089750f8 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -312,6 +312,7 @@ Other API Changes - Addition or subtraction of ``NaT`` from :class:`TimedeltaIndex` will return ``TimedeltaIndex`` instead of ``DatetimeIndex`` (:issue:`19124`) - :func:`DatetimeIndex.shift` and :func:`TimedeltaIndex.shift` will now raise ``NullFrequencyError`` (which subclasses ``ValueError``, which was raised in older versions) when the index object frequency is ``None`` (:issue:`19147`) - Addition and subtraction of ``NaN`` from a :class:`Series` with ``dtype='timedelta64[ns]'`` will raise a ``TypeError` instead of treating the ``NaN`` as ``NaT`` (:issue:`19274`) +- Set operations (union, difference...) on :class:`IntervalIndex` with incompatible index types will now raise a ``TypeError`` rather than a ``ValueError`` (:issue:`19329`) .. _whatsnew_0230.deprecations: diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 58b1bdb3f55ea..68145ebaed7e7 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -1152,12 +1152,17 @@ def insert(self, loc, item): new_right = self.right.insert(loc, right_insert) return self._shallow_copy(new_left, new_right) - def _as_like_interval_index(self, other, error_msg): + def _as_like_interval_index(self, other): self._assert_can_do_setop(other) other = _ensure_index(other) - if (not isinstance(other, IntervalIndex) or - self.closed != other.closed): - raise ValueError(error_msg) + if not isinstance(other, IntervalIndex): + msg = ('the other index needs to be an IntervalIndex too, but ' + 'was type {}').format(other.__class__.__name__) + raise TypeError(msg) + elif self.closed != other.closed: + msg = ('can only do set operations between two IntervalIndex ' + 'objects that are closed on the same side') + raise ValueError(msg) return other def _concat_same_dtype(self, to_concat, name): @@ -1296,9 +1301,7 @@ def equals(self, other): def _setop(op_name): def func(self, other): - msg = ('can only do set operations between two IntervalIndex ' - 'objects that are closed on the same side') - other = self._as_like_interval_index(other, msg) + other = self._as_like_interval_index(other) # GH 19016: ensure set op will not return a prohibited dtype subtypes = [self.dtype.subtype, other.dtype.subtype] diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 9895ee06a22c0..345d3a9a0878b 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -934,12 +934,14 @@ def test_set_operation_errors(self, closed, op_name): set_op = getattr(index, op_name) # non-IntervalIndex - msg = ('can only do set operations between two IntervalIndex objects ' - 'that are closed on the same side') - with tm.assert_raises_regex(ValueError, msg): + msg = ('the other index needs to be an IntervalIndex too, but ' + 'was type Int64Index') + with tm.assert_raises_regex(TypeError, msg): set_op(Index([1, 2, 3])) # mixed closed + msg = ('can only do set operations between two IntervalIndex objects ' + 'that are closed on the same side') for other_closed in {'right', 'left', 'both', 'neither'} - {closed}: other = self.create_index(closed=other_closed) with tm.assert_raises_regex(ValueError, msg):
- [x] xref #19021 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Set operations (union, difference...) on ``IntervalIndex`` with incompatible index types will now raise a ``TypeError`` rather than a ``ValueError``. This PR is needed to make the changes requested in #19021. EDIT: I've improved the error message also. Previously, you'd get: ```python >>> pd.IntervalIndex.from_breaks([0,1,2,3]).union(pd.RangeIndex(3)) ValueError: can only do set operations between two IntervalIndex objects that are closed on the same side ``` Which made no sense in this case. Now we get: ```python >>> pd.IntervalIndex.from_breaks([0,1,2,3]).union(pd.RangeIndex(3)) TypeError: the other index needs to be an IntervalIndex too, but was type RangeIndex ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19329
2018-01-21T06:00:52Z
2018-01-21T15:50:03Z
2018-01-21T15:50:03Z
2018-01-22T07:04:50Z
small cleanups aggregated
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 6d0a415f5b420..105fe9622a93f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -5,7 +5,7 @@ import numpy as np from pandas._libs import (lib, index as libindex, tslib as libts, algos as libalgos, join as libjoin, - Timestamp, Timedelta, ) + Timestamp) from pandas._libs.lib import is_datetime_array from pandas.compat import range, u, set_function_name @@ -3979,7 +3979,7 @@ def _validate_for_numeric_binop(self, other, op, opstr): internal method called by ops """ # if we are an inheritor of numeric, - # but not actually numeric (e.g. DatetimeIndex/PeriodInde) + # but not actually numeric (e.g. DatetimeIndex/PeriodIndex) if not self._is_numeric_dtype: raise TypeError("cannot evaluate a numeric op {opstr} " "for type: {typ}".format( @@ -4006,7 +4006,7 @@ def _validate_for_numeric_binop(self, other, op, opstr): raise TypeError("cannot evaluate a numeric op " "with a non-numeric dtype") elif isinstance(other, (ABCDateOffset, np.timedelta64, - Timedelta, datetime.timedelta)): + datetime.timedelta)): # higher up to handle pass elif isinstance(other, (Timestamp, np.datetime64)): @@ -4031,13 +4031,13 @@ def _evaluate_numeric_binop(self, other): # handle time-based others if isinstance(other, (ABCDateOffset, np.timedelta64, - Timedelta, datetime.timedelta)): + datetime.timedelta)): return self._evaluate_with_timedelta_like(other, op, opstr, reversed) elif isinstance(other, (Timestamp, np.datetime64)): return self._evaluate_with_datetime_like(other, op, opstr) - # if we are a reversed non-communative op + # if we are a reversed non-commutative op values = self.values if reversed: values, other = other, values @@ -4081,11 +4081,8 @@ def _evaluate_numeric_binop(self, other): cls.__divmod__ = _make_evaluate_binop( divmod, '__divmod__', - constructor=lambda result, **attrs: ( - Index(result[0], **attrs), - Index(result[1], **attrs), - ), - ) + constructor=lambda result, **attrs: (Index(result[0], **attrs), + Index(result[1], **attrs))) @classmethod def _add_numeric_methods_unary(cls): @@ -4275,8 +4272,7 @@ def _ensure_index(index_like, copy=False): def _get_na_value(dtype): if is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype): return libts.NaT - return {np.datetime64: libts.NaT, - np.timedelta64: libts.NaT}.get(dtype, np.nan) + return np.nan def _ensure_has_len(seq): diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 10a923c056be2..bafccbf35dae3 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -595,7 +595,7 @@ def _evaluate_numeric_binop(self, other): self, other = other, self try: - # alppy if we have an override + # apply if we have an override if step: with np.errstate(all='ignore'): rstep = step(self._step, other) diff --git a/pandas/core/ops.py b/pandas/core/ops.py index fc04d9d291bf9..d6922182e47c7 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -150,22 +150,7 @@ def names(x): return new_methods -def add_methods(cls, new_methods, force, select, exclude): - if select and exclude: - raise TypeError("May only pass either select or exclude") - - if select: - select = set(select) - methods = {} - for key, method in new_methods.items(): - if key in select: - methods[key] = method - new_methods = methods - - if exclude: - for k in exclude: - new_methods.pop(k, None) - +def add_methods(cls, new_methods, force): for name, method in new_methods.items(): if force or name not in cls.__dict__: bind_method(cls, name, method) @@ -175,8 +160,8 @@ def add_methods(cls, new_methods, force, select, exclude): # Arithmetic def add_special_arithmetic_methods(cls, arith_method=None, comp_method=None, bool_method=None, - use_numexpr=True, force=False, select=None, - exclude=None, have_divmod=False): + use_numexpr=True, force=False, + have_divmod=False): """ Adds the full suite of special arithmetic methods (``__add__``, ``__sub__``, etc.) to the class. @@ -195,10 +180,6 @@ def add_special_arithmetic_methods(cls, arith_method=None, force : bool, default False if False, checks whether function is defined **on ``cls.__dict__``** before defining if True, always defines functions on class base - select : iterable of strings (optional) - if passed, only sets functions with names in select - exclude : iterable of strings (optional) - if passed, will not set functions with names in exclude have_divmod : bool, (optional) should a divmod method be added? this method is special because it returns a tuple of cls instead of a single element of type cls @@ -247,14 +228,12 @@ def f(self, other): __ior__=_wrap_inplace_method(new_methods["__or__"]), __ixor__=_wrap_inplace_method(new_methods["__xor__"]))) - add_methods(cls, new_methods=new_methods, force=force, select=select, - exclude=exclude) + add_methods(cls, new_methods=new_methods, force=force) def add_flex_arithmetic_methods(cls, flex_arith_method, flex_comp_method=None, flex_bool_method=None, - use_numexpr=True, force=False, select=None, - exclude=None): + use_numexpr=True, force=False): """ Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``) to the class. @@ -271,10 +250,6 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, force : bool, default False if False, checks whether function is defined **on ``cls.__dict__``** before defining if True, always defines functions on class base - select : iterable of strings (optional) - if passed, only sets functions with names in select - exclude : iterable of strings (optional) - if passed, will not set functions with names in exclude """ # in frame, default axis is 'columns', doesn't matter for series and panel new_methods = _create_methods(flex_arith_method, @@ -289,8 +264,7 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, if k in new_methods: new_methods.pop(k) - add_methods(cls, new_methods=new_methods, force=force, select=select, - exclude=exclude) + add_methods(cls, new_methods=new_methods, force=force) def _align_method_SERIES(left, right, align_asobject=False): @@ -389,16 +363,16 @@ def wrapper(left, right, name=name, na_op=na_op): return NotImplemented left, right = _align_method_SERIES(left, right) + res_name = _get_series_op_result_name(left, right) + if is_datetime64_dtype(left) or is_datetime64tz_dtype(left): result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex) - res_name = _get_series_op_result_name(left, right) return construct_result(left, result, index=left.index, name=res_name, dtype=result.dtype) elif is_timedelta64_dtype(left): result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex) - res_name = _get_series_op_result_name(left, right) return construct_result(left, result, index=left.index, name=res_name, dtype=result.dtype) @@ -409,7 +383,6 @@ def wrapper(left, right, name=name, na_op=na_op): rvalues = getattr(rvalues, 'values', rvalues) result = safe_na_op(lvalues, rvalues) - res_name = _get_series_op_result_name(left, right) return construct_result(left, result, index=left.index, name=res_name, dtype=None)
A few cleanups that I've found myself making repeatedly in WIP branches, figured it was worth pushing them to avoid cluttering diffs down the road. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19328
2018-01-21T00:58:00Z
2018-01-21T15:10:46Z
2018-01-21T15:10:46Z
2018-01-23T04:40:02Z
BUG: Concatentation of TZ-aware dataframes (#12396) (#18447)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index d10d51352d0e4..f51dcf662f593 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -1370,6 +1370,8 @@ Reshaping - Bug in :meth:`DataFrame.astype` where column metadata is lost when converting to categorical or a dictionary of dtypes (:issue:`19920`) - Bug in :func:`cut` and :func:`qcut` where timezone information was dropped (:issue:`19872`) - Bug in :class:`Series` constructor with a ``dtype=str``, previously raised in some cases (:issue:`19853`) +- Bug in :func:`concat` which raises an error when concatenating TZ-aware dataframes and all-NaT dataframes (:issue:`12396`) +- Bug in :func:`concat` which raises an error when concatenating empty TZ-aware series (:issue:`18447`) - Bug in :func:`get_dummies`, and :func:`select_dtypes`, where duplicate column names caused incorrect behavior (:issue:`20848`) - Bug in :func:`isna`, which cannot handle ambiguous typed lists (:issue:`20675`) diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 4aa74cdbbc2c0..1f4d3069838ba 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -465,8 +465,12 @@ def convert_to_pydatetime(x, axis): if _contains_datetime: if 'datetime' in typs: - new_values = np.concatenate([x.view(np.int64) for x in - to_concat], axis=axis) + to_concat = [np.array(x, copy=False).view(np.int64) + for x in to_concat] + if axis == 1: + to_concat = [np.atleast_2d(x) for x in to_concat] + + new_values = np.concatenate(to_concat, axis=axis) return new_values.view(_NS_DTYPE) else: # when to_concat has different tz, len(typs) > 1. diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 7837faf5b4c0f..8b1178576c6d8 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -2183,17 +2183,19 @@ def _assert_take_fillable(self, values, indices, allow_fill=True, fill_value=None, na_value=np.nan): """ Internal method to handle NA filling of take """ indices = _ensure_platform_int(indices) - # only fill if we are passing a non-None fill_value if allow_fill and fill_value is not None: if (indices < -1).any(): msg = ('When allow_fill=True and fill_value is not None, ' 'all indices must be >= -1') raise ValueError(msg) - taken = values.take(indices) mask = indices == -1 - if mask.any(): - taken[mask] = na_value + if mask.all(): + taken = np.full(indices.shape, fill_value=na_value) + else: + taken = values.take(indices) + if mask.any(): + taken[mask] = na_value else: taken = values.take(indices) return taken diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 474894aba65df..34f8e36f338ea 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -5835,8 +5835,10 @@ def get_reindexed_values(self, empty_dtype, upcasted_na): if len(values) and values[0] is None: fill_value = None - if getattr(self.block, 'is_datetimetz', False): - pass + if getattr(self.block, 'is_datetimetz', False) or \ + is_datetimetz(empty_dtype): + missing_arr = np.full(np.prod(self.shape), fill_value) + return DatetimeIndex(missing_arr, dtype=empty_dtype) elif getattr(self.block, 'is_categorical', False): pass elif getattr(self.block, 'is_sparse', False): diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 57af67422d65f..7d4ffc964c7af 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1917,6 +1917,92 @@ def test_concat_tz_series_tzlocal(self): tm.assert_series_equal(result, pd.Series(x + y)) assert result.dtype == 'datetime64[ns, tzlocal()]' + @pytest.mark.parametrize('tz1', [None, 'UTC']) + @pytest.mark.parametrize('tz2', [None, 'UTC']) + @pytest.mark.parametrize('s', [pd.NaT, pd.Timestamp('20150101')]) + def test_concat_NaT_dataframes_all_NaT_axis_0(self, tz1, tz2, s): + # GH 12396 + + # tz-naive + first = pd.DataFrame([[pd.NaT], [pd.NaT]]).apply( + lambda x: x.dt.tz_localize(tz1)) + second = pd.DataFrame([s]).apply(lambda x: x.dt.tz_localize(tz2)) + + # we are all NaT so this is ok + if tz1 is None: + tz = tz2 + elif tz2 is None: + tz = tz1 + elif tz1 == tz2: + tz = tz1 + else: + tz = None + + result = pd.concat([first, second], axis=0) + expected = pd.DataFrame(pd.Series( + [pd.NaT, pd.NaT, s], index=[0, 1, 0])) + expected = expected.apply(lambda x: x.dt.tz_localize(tz)) + assert_frame_equal(result, expected) + + @pytest.mark.parametrize('tz1', [None, 'UTC']) + @pytest.mark.parametrize('tz2', [None, 'UTC']) + def test_concat_NaT_dataframes_all_NaT_axis_1(self, tz1, tz2): + # GH 12396 + + first = pd.DataFrame(pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)) + second = pd.DataFrame(pd.Series( + [pd.NaT]).dt.tz_localize(tz2), columns=[1]) + expected = pd.DataFrame( + {0: pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1), + 1: pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2)} + ) + result = pd.concat([first, second], axis=1) + assert_frame_equal(result, expected) + + @pytest.mark.parametrize('tz1', [None, 'UTC']) + @pytest.mark.parametrize('tz2', [None, 'UTC']) + def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2): + # GH 12396 + + # tz-naive + first = pd.Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1) + second = pd.DataFrame([[pd.Timestamp('2015/01/01', tz=tz2)], + [pd.Timestamp('2016/01/01', tz=tz2)]], + index=[2, 3]) + + if tz1 is None and tz2 is None: + tz = None + + # we are all NaT so this is ok + elif tz1 is None: + tz = tz2 + elif tz1 == tz2: + tz = tz1 + else: + tz = None + expected = pd.DataFrame([pd.NaT, pd.NaT, + pd.Timestamp('2015/01/01', tz=tz), + pd.Timestamp('2016/01/01', tz=tz)]) + + result = pd.concat([first, second]) + assert_frame_equal(result, expected) + + @pytest.mark.parametrize('tz', [None, 'UTC']) + def test_concat_NaT_dataframes(self, tz): + # GH 12396 + + first = pd.DataFrame([[pd.NaT], [pd.NaT]]) + first = first.apply(lambda x: x.dt.tz_localize(tz)) + second = pd.DataFrame([[pd.Timestamp('2015/01/01', tz=tz)], + [pd.Timestamp('2016/01/01', tz=tz)]], + index=[2, 3]) + expected = pd.DataFrame([pd.NaT, pd.NaT, + pd.Timestamp('2015/01/01', tz=tz), + pd.Timestamp('2016/01/01', tz=tz)]) + + result = pd.concat([first, second], axis=0) + assert_frame_equal(result, expected) + def test_concat_period_series(self): x = Series(pd.PeriodIndex(['2015-11-01', '2015-12-01'], freq='D')) y = Series(pd.PeriodIndex(['2015-10-01', '2016-01-01'], freq='D')) @@ -1978,6 +2064,21 @@ def test_concat_empty_series(self): columns=['x', 0]) tm.assert_frame_equal(res, exp) + @pytest.mark.parametrize('tz', [None, 'UTC']) + @pytest.mark.parametrize('values', [[], [1, 2, 3]]) + def test_concat_empty_series_timelike(self, tz, values): + # GH 18447 + + first = Series([], dtype='M8[ns]').dt.tz_localize(tz) + second = Series(values) + expected = DataFrame( + {0: pd.Series([pd.NaT] * len(values), + dtype='M8[ns]' + ).dt.tz_localize(tz), + 1: values}) + result = concat([first, second], axis=1) + assert_frame_equal(result, expected) + def test_default_index(self): # is_series and ignore_index s1 = pd.Series([1, 2, 3], name='x')
closes #12396 closes #18447 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19327
2018-01-20T23:30:49Z
2018-05-11T21:52:24Z
null
2018-05-11T21:52:25Z
BUG: Crosstab bug in #18321
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 86fc47dee09fc..6ffafe2fae9c7 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -509,6 +509,7 @@ Reshaping - Bug in :func:`DataFrame.merge` in which merging using ``Index`` objects as vectors raised an Exception (:issue:`19038`) - Bug in :func:`DataFrame.stack`, :func:`DataFrame.unstack`, :func:`Series.unstack` which were not returning subclasses (:issue:`15563`) - Bug in timezone comparisons, manifesting as a conversion of the index to UTC in ``.concat()`` (:issue:`18523`) +- Bug in :func:`crosstab` when performing crosstab operation on two series with tupple name, the resulting data frame has incorrectly named output columns (:issue:`18321`) - Numeric diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 0e92fc4edce85..4a2f39fcab4a3 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -455,6 +455,9 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None, from pandas import DataFrame df = DataFrame(data, index=common_idx) + common_cols_idx = df.columns + + # adding dummy column for calculation of pivot table if values is None: df['__dummy__'] = 0 kwargs = {'aggfunc': len, 'fill_value': 0} @@ -462,10 +465,15 @@ def crosstab(index, columns, values=None, rownames=None, colnames=None, df['__dummy__'] = values kwargs = {'aggfunc': aggfunc} - table = df.pivot_table('__dummy__', index=rownames, columns=colnames, + table = df.pivot_table(['__dummy__'], index=rownames, columns=colnames, margins=margins, margins_name=margins_name, dropna=dropna, **kwargs) + # since column dummy is before computing pivot table, it has to be removed + if not table.empty: + added_cols_idx = list(df.columns.difference(common_cols_idx))[0] + table = table[added_cols_idx] + # Post-process if normalize is not False: table = _normalize(table, normalize=normalize, margins=margins, diff --git a/pandas/tests/reshape/test_pivot.py b/pandas/tests/reshape/test_pivot.py index 786c57a4a82df..5a6e7cd4d9bb3 100644 --- a/pandas/tests/reshape/test_pivot.py +++ b/pandas/tests/reshape/test_pivot.py @@ -1628,7 +1628,8 @@ def test_crosstab_dup_index_names(self): pytest.raises(ValueError, pd.crosstab, s, s) @pytest.mark.parametrize("names", [['a', ('b', 'c')], - [('a', 'b'), 'c']]) + [('a', 'b'), 'c'], + [('a', 'b'), ('c', 'd')]]) def test_crosstab_tuple_name(self, names): s1 = pd.Series(range(3), name=names[0]) s2 = pd.Series(range(1, 4), name=names[1]) @@ -1638,3 +1639,21 @@ def test_crosstab_tuple_name(self, names): result = pd.crosstab(s1, s2) tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("names, input_data, expected_data_out", [ + (['a', 'b'], [[1, 2, 3], [1, 1, 1]], [1, 1, 1]), + ([('a', 'b'), 'c'], [[1, 2, 2], [1, 1, 1]], [1, 2]), + ([('a', 'b'), ('c', 'd')], [[1, 2, 3], [1, 2, 3]], + np.eye(3, dtype=int))]) + def test_crosstab_cols_output(self, names, input_data, expected_data_out): + row_series = pd.Series(input_data[0], name=names[0]) + col_series = pd.Series(input_data[1], name=names[1]) + expected_crosstab = pd.DataFrame( + expected_data_out, + index=pd.Index(set(input_data[0]), name=names[0]), + columns=pd.Index(set(input_data[1]), name=names[1]) + ) + tm.assert_frame_equal( + pd.crosstab(row_series, col_series), expected_crosstab, + check_exact=True + )
- [x] closes #18321 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Fixing bug in #18321 The bug is because the function `df.pivot_table` tries to remove only the top level of the column in crosstab data frame. When the series names are tupples, the added column `__dummy__` in function `crosstab` has several levels, meaning removing only the top layer in the resulting crosstab as done in the current `pivot_table` function won't be enough. The fix keeps track of the exact added column, then remove the extra levels in resulting crosstab data frame it in the same function rather than delegate the removal to `df.pivot_table`. Based on my investigation, `df.pivot_table` removes the dummy layers after all the calculation, so moving the removal of the extra layers outside won't affect the current behavior. Also, by add the columns and removing the resulting extra layers in the crosstab in the same function, the logic is easier to read.
https://api.github.com/repos/pandas-dev/pandas/pulls/19326
2018-01-20T23:28:34Z
2018-01-24T12:15:03Z
null
2018-01-24T12:15:14Z
DOC: Improve docs (GH19312) for Series.nonzero()
diff --git a/pandas/core/series.py b/pandas/core/series.py index be40f65186d2d..fc512d23a05ba 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -490,7 +490,7 @@ def compress(self, condition, *args, **kwargs): def nonzero(self): """ - Return the indices of the elements that are non-zero + Return the *integer* indices of the elements that are non-zero This method is equivalent to calling `numpy.nonzero` on the series data. For compatibility with NumPy, the return value is @@ -508,6 +508,15 @@ def nonzero(self): 3 4 dtype: int64 + >>> s = pd.Series([0, 3, 0, 4], index=['a', 'b', 'c', 'd']) + # same return although index of s is different + >>> s.nonzero() + (array([1, 3]),) + >>> s.iloc[s.nonzero()[0]] + b 3 + d 4 + dtype: int64 + See Also -------- numpy.nonzero
- [x] closes #19312 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19324
2018-01-20T15:19:06Z
2018-01-23T00:10:50Z
2018-01-23T00:10:50Z
2018-01-23T14:00:21Z
0.22.x
diff --git a/.gitignore b/.gitignore index ff0a6aef47163..b1748ae72b8ba 100644 --- a/.gitignore +++ b/.gitignore @@ -106,3 +106,4 @@ doc/build/html/index.html doc/tmp.sv doc/source/styled.xlsx doc/source/templates/ +env/ diff --git a/.travis.yml b/.travis.yml index fe1a2950dbf08..42b4ef0396fc8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -102,8 +102,6 @@ before_install: - uname -a - git --version - git tag - - ci/before_install_travis.sh - - export DISPLAY=":99.0" install: - echo "install start" @@ -114,6 +112,8 @@ install: before_script: - ci/install_db_travis.sh + - export DISPLAY=":99.0" + - ci/before_script_travis.sh script: - echo "script start" diff --git a/appveyor.yml b/appveyor.yml index a1f8886f6d068..44af73b498aa8 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -22,7 +22,7 @@ environment: PYTHON_VERSION: "3.6" PYTHON_ARCH: "64" CONDA_PY: "36" - CONDA_NPY: "112" + CONDA_NPY: "113" - CONDA_ROOT: "C:\\Miniconda3_64" PYTHON_VERSION: "2.7" diff --git a/asv_bench/benchmarks/plotting.py b/asv_bench/benchmarks/plotting.py index dda684b35e301..16889b2f19e89 100644 --- a/asv_bench/benchmarks/plotting.py +++ b/asv_bench/benchmarks/plotting.py @@ -10,15 +10,37 @@ def date_range(start=None, end=None, periods=None, freq=None): from pandas.tools.plotting import andrews_curves +class Plotting(object): + goal_time = 0.2 + + def setup(self): + import matplotlib + matplotlib.use('Agg') + self.s = Series(np.random.randn(1000000)) + self.df = DataFrame({'col': self.s}) + + def time_series_plot(self): + self.s.plot() + + def time_frame_plot(self): + self.df.plot() + + class TimeseriesPlotting(object): goal_time = 0.2 def setup(self): import matplotlib matplotlib.use('Agg') - self.N = 2000 - self.M = 5 - self.df = DataFrame(np.random.randn(self.N, self.M), index=date_range('1/1/1975', periods=self.N)) + N = 2000 + M = 5 + idx = date_range('1/1/1975', periods=N) + self.df = DataFrame(np.random.randn(N, M), index=idx) + + idx_irregular = pd.DatetimeIndex(np.concatenate((idx.values[0:10], + idx.values[12:]))) + self.df2 = DataFrame(np.random.randn(len(idx_irregular), M), + index=idx_irregular) def time_plot_regular(self): self.df.plot() @@ -26,6 +48,9 @@ def time_plot_regular(self): def time_plot_regular_compat(self): self.df.plot(x_compat=True) + def time_plot_irregular(self): + self.df2.plot() + class Misc(object): goal_time = 0.6 diff --git a/ci/before_install_travis.sh b/ci/before_script_travis.sh similarity index 93% rename from ci/before_install_travis.sh rename to ci/before_script_travis.sh index 2d0b4da6120dc..0b3939b1906a2 100755 --- a/ci/before_install_travis.sh +++ b/ci/before_script_travis.sh @@ -4,6 +4,7 @@ echo "inside $0" if [ "${TRAVIS_OS_NAME}" == "linux" ]; then sh -e /etc/init.d/xvfb start + sleep 3 fi # Never fail because bad things happened here. diff --git a/ci/check_imports.py b/ci/check_imports.py index a83436e7d258c..d6f24ebcc4d3e 100644 --- a/ci/check_imports.py +++ b/ci/check_imports.py @@ -9,7 +9,6 @@ 'ipython', 'jinja2' 'lxml', - 'matplotlib', 'numexpr', 'openpyxl', 'py', diff --git a/ci/environment-dev.yaml b/ci/environment-dev.yaml new file mode 100644 index 0000000000000..c3d3d59f895c6 --- /dev/null +++ b/ci/environment-dev.yaml @@ -0,0 +1,14 @@ +name: pandas-dev +channels: + - defaults + - conda-forge +dependencies: + - Cython + - NumPy + - moto + - pytest + - python-dateutil + - python=3 + - pytz + - setuptools + - sphinx diff --git a/ci/install_travis.sh b/ci/install_travis.sh index b85263daa1eac..dac3625cba4ba 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -34,9 +34,9 @@ fi # install miniconda if [ "${TRAVIS_OS_NAME}" == "osx" ]; then - time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda.sh || exit 1 + time wget http://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -q -O miniconda.sh || exit 1 else - time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh || exit 1 + time wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -q -O miniconda.sh || exit 1 fi time bash miniconda.sh -b -p "$MINICONDA_DIR" || exit 1 @@ -107,7 +107,7 @@ time conda install -n pandas pytest>=3.1.0 time pip install pytest-xdist moto if [ "$LINT" ]; then - conda install flake8 + conda install flake8=3.4.1 pip install cpplint fi diff --git a/ci/requirements-2.7_BUILD_TEST.pip b/ci/requirements-2.7_BUILD_TEST.pip index a0fc77c40bc00..f4617133cad5b 100644 --- a/ci/requirements-2.7_BUILD_TEST.pip +++ b/ci/requirements-2.7_BUILD_TEST.pip @@ -1,7 +1,6 @@ xarray geopandas seaborn -pandas_gbq pandas_datareader statsmodels scikit-learn diff --git a/ci/requirements-3.5.pip b/ci/requirements-3.5.pip index 6e4f7b65f9728..c9565f2173070 100644 --- a/ci/requirements-3.5.pip +++ b/ci/requirements-3.5.pip @@ -1,2 +1,2 @@ xarray==0.9.1 -pandas-gbq +pandas_gbq diff --git a/ci/requirements-3.6.sh b/ci/requirements-3.6.sh new file mode 100644 index 0000000000000..f5c3dbf59a29d --- /dev/null +++ b/ci/requirements-3.6.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +source activate pandas + +echo "[install 3.6 downstream deps]" + +conda install -n pandas -c conda-forge pandas-datareader xarray geopandas seaborn statsmodels scikit-learn dask diff --git a/ci/requirements-3.6_WIN.run b/ci/requirements-3.6_WIN.run index af7a90b126f22..db2d429a2a4ff 100644 --- a/ci/requirements-3.6_WIN.run +++ b/ci/requirements-3.6_WIN.run @@ -1,6 +1,6 @@ python-dateutil pytz -numpy=1.12* +numpy=1.13* bottleneck openpyxl xlsxwriter diff --git a/ci/requirements_all.txt b/ci/requirements-optional-conda.txt similarity index 68% rename from ci/requirements_all.txt rename to ci/requirements-optional-conda.txt index e13afd619f105..6edb8d17337e4 100644 --- a/ci/requirements_all.txt +++ b/ci/requirements-optional-conda.txt @@ -1,28 +1,27 @@ -pytest>=3.1.0 -pytest-cov -pytest-xdist -flake8 -sphinx=1.5* -nbsphinx -ipython -python-dateutil -pytz -openpyxl -xlsxwriter -xlrd -xlwt -html5lib -patsy beautifulsoup4 -numpy -cython -scipy +blosc +bottleneck +fastparquet +feather-format +html5lib +ipython +ipykernel +jinja2 +lxml +matplotlib +nbsphinx numexpr +openpyxl +pyarrow +pymysql pytables -matplotlib +pytest-cov +pytest-xdist +s3fs +scipy seaborn -lxml sqlalchemy -bottleneck -pymysql -Jinja2 +xarray +xlrd +xlsxwriter +xlwt diff --git a/ci/requirements-optional-pip.txt b/ci/requirements-optional-pip.txt new file mode 100644 index 0000000000000..06b22bd8f2c63 --- /dev/null +++ b/ci/requirements-optional-pip.txt @@ -0,0 +1,27 @@ +# This file was autogenerated by scripts/convert_deps.py +# Do not modify directlybeautifulsoup4 +blosc +bottleneck +fastparquet +feather-format +html5lib +ipython +jinja2 +lxml +matplotlib +nbsphinx +numexpr +openpyxl +pyarrow +pymysql +tables +pytest-cov +pytest-xdist +s3fs +scipy +seaborn +sqlalchemy +xarray +xlrd +xlsxwriter +xlwt \ No newline at end of file diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt index dbc4f6cbd6509..2fb36b7cd70d8 100644 --- a/ci/requirements_dev.txt +++ b/ci/requirements_dev.txt @@ -1,8 +1,10 @@ +# This file was autogenerated by scripts/convert_deps.py +# Do not modify directly +Cython +NumPy +moto +pytest python-dateutil pytz -numpy -cython -pytest>=3.1.0 -pytest-cov -flake8 -moto +setuptools +sphinx \ No newline at end of file diff --git a/ci/script_multi.sh b/ci/script_multi.sh index ee9fbcaad5ef5..ae8f030b92d66 100755 --- a/ci/script_multi.sh +++ b/ci/script_multi.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/bin/bash -e echo "[script multi]" diff --git a/doc/source/api.rst b/doc/source/api.rst index 80f8d42be8ed6..a9766b5c04496 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -1794,6 +1794,7 @@ Methods Timestamp.strftime Timestamp.strptime Timestamp.time + Timestamp.timestamp Timestamp.timetuple Timestamp.timetz Timestamp.to_datetime64 @@ -2173,6 +2174,17 @@ Style Export and Import Styler.export Styler.use +Plotting +~~~~~~~~ + +.. currentmodule:: pandas + +.. autosummary:: + :toctree: generated/ + + plotting.register_matplotlib_converters + plotting.deregister_matplotlib_converters + .. currentmodule:: pandas General utility functions diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 466ac3c9cbf51..cd3cc282a8010 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -346,7 +346,9 @@ The following methods are available: :meth:`~Window.sum`, Sum of values :meth:`~Window.mean`, Mean of values -The weights used in the window are specified by the ``win_type`` keyword. The list of recognized types are: +The weights used in the window are specified by the ``win_type`` keyword. +The list of recognized types are the `scipy.signal window functions +<https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__: - ``boxcar`` - ``triang`` diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index d8d57a8bfffdd..4426d3fb0165e 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -11,32 +11,32 @@ Where to start? =============== All contributions, bug reports, bug fixes, documentation improvements, -enhancements and ideas are welcome. +enhancements, and ideas are welcome. -If you are simply looking to start working with the *pandas* codebase, navigate to the -`GitHub "issues" tab <https://github.com/pandas-dev/pandas/issues>`_ and start looking through -interesting issues. There are a number of issues listed under `Docs +If you are brand new to pandas or open-source development, we recommend going +through the `GitHub "issues" tab <https://github.com/pandas-dev/pandas/issues>`_ +to find issues that interest you. There are a number of issues listed under `Docs <https://github.com/pandas-dev/pandas/issues?labels=Docs&sort=updated&state=open>`_ and `Difficulty Novice <https://github.com/pandas-dev/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22>`_ -where you could start out. - -Or maybe through using *pandas* you have an idea of your own or are looking for something -in the documentation and thinking 'this can be improved'...you can do something -about it! +where you could start out. Once you've found an interesting issue, you can +return here to get your development environment setup. Feel free to ask questions on the `mailing list -<https://groups.google.com/forum/?fromgroups#!forum/pydata>`_ or on `Gitter -<https://gitter.im/pydata/pandas>`_. +<https://groups.google.com/forum/?fromgroups#!forum/pydata>`_ or on `Gitter`_. + +.. _contributing.bug_reports: Bug reports and enhancement requests ==================================== -Bug reports are an important part of making *pandas* more stable. Having a complete bug report -will allow others to reproduce the bug and provide insight into fixing. Because many versions of -*pandas* are supported, knowing version information will also identify improvements made since -previous versions. Trying the bug-producing code out on the *master* branch is often a worthwhile exercise -to confirm the bug still exists. It is also worth searching existing bug reports and pull requests +Bug reports are an important part of making *pandas* more stable. Having a complete bug report +will allow others to reproduce the bug and provide insight into fixing. See +`this stackoverflow article <https://stackoverflow.com/help/mcve>`_ for tips on +writing a good bug report. + +Trying the bug-producing code out on the *master* branch is often a worthwhile exercise +to confirm the bug still exists. It is also worth searching existing bug reports and pull requests to see if the issue has already been reported and/or fixed. Bug reports must: @@ -60,12 +60,16 @@ Bug reports must: The issue will then show up to the *pandas* community and be open to comments/ideas from others. +.. _contributing.github + Working with the code ===================== Now that you have an issue you want to fix, enhancement to add, or documentation to improve, you need to learn how to work with GitHub and the *pandas* code base. +.. _contributing.version_control: + Version control, Git, and GitHub -------------------------------- @@ -103,167 +107,164 @@ want to clone your fork to your machine:: git clone https://github.com/your-user-name/pandas.git pandas-yourname cd pandas-yourname - git remote add upstream git://github.com/pandas-dev/pandas.git + git remote add upstream https://github.com/pandas-dev/pandas.git This creates the directory `pandas-yourname` and connects your repository to the upstream (main project) *pandas* repository. -Creating a branch ------------------ +.. _contributing.dev_env: -You want your master branch to reflect only production-ready code, so create a -feature branch for making your changes. For example:: +Creating a development environment +---------------------------------- - git branch shiny-new-feature - git checkout shiny-new-feature +To test out code changes, you'll need to build pandas from source, which +requires a C compiler and python environment. If you're making documentation +changes, you can skip to :ref:`contributing.documentation` but you won't be able +to build the documentation locally before pushing your changes. -The above can be simplified to:: +.. _contributiong.dev_c: - git checkout -b shiny-new-feature +Installing a C Complier +~~~~~~~~~~~~~~~~~~~~~~~ -This changes your working directory to the shiny-new-feature branch. Keep any -changes in this branch specific to one bug or feature so it is clear -what the branch brings to *pandas*. You can have many shiny-new-features -and switch in between them using the git checkout command. +Pandas uses C extensions (mostly written using Cython) to speed up certain +operations. To install pandas from source, you need to compile these C +extensions, which means you need a C complier. This process depends on which +platform you're using. Follow the `CPython contributing guidelines +<https://docs.python.org/devguide/setup.html#build-dependencies>`_ for getting a +complier installed. You don't need to do any of the ``./configure`` or ``make`` +steps; you only need to install the complier. -To update this branch, you need to retrieve the changes from the master branch:: +For Windows developers, the following links may be helpful. - git fetch upstream - git rebase upstream/master +- https://blogs.msdn.microsoft.com/pythonengineering/2016/04/11/unable-to-find-vcvarsall-bat/ +- https://github.com/conda/conda-recipes/wiki/Building-from-Source-on-Windows-32-bit-and-64-bit +- https://cowboyprogrammer.org/building-python-wheels-for-windows/ +- https://blog.ionelmc.ro/2014/12/21/compiling-python-extensions-on-windows/ +- https://support.enthought.com/hc/en-us/articles/204469260-Building-Python-extensions-with-Canopy -This will replay your commits on top of the latest pandas git master. If this -leads to merge conflicts, you must resolve these before submitting your pull -request. If you have uncommitted changes, you will need to ``stash`` them prior -to updating. This will effectively store your changes and they can be reapplied -after updating. +Let us know if you have any difficulties by opening an issue or reaching out on +`Gitter`_. -.. _contributing.dev_env: +.. _contributiong.dev_python: -Creating a development environment ----------------------------------- +Creating a Python Environment +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -An easy way to create a *pandas* development environment is as follows. +Now that you have a C complier, create an isolated pandas development +environment: -- Install either :ref:`Anaconda <install.anaconda>` or :ref:`miniconda <install.miniconda>` +- Install either `Anaconda <https://www.anaconda.com/download/>`_ or `miniconda + <https://conda.io/miniconda.html>`_ +- Make sure your conda is up to date (``conda update conda``) - Make sure that you have :ref:`cloned the repository <contributing.forking>` - ``cd`` to the *pandas* source directory -Tell conda to create a new environment, named ``pandas_dev``, or any other name you would like -for this environment, by running:: - - conda create -n pandas_dev --file ci/requirements_dev.txt - - -For a python 3 environment:: - - conda create -n pandas_dev python=3 --file ci/requirements_dev.txt - -.. warning:: - - If you are on Windows, see :ref:`here for a fully compliant Windows environment <contributing.windows>`. - -This will create the new environment, and not touch any of your existing environments, -nor any existing python installation. It will install all of the basic dependencies of -*pandas*, as well as the development and testing tools. If you would like to install -other dependencies, you can install them as follows:: +We'll now kick off a three-step process: - conda install -n pandas_dev -c pandas pytables scipy +1. Install the build dependencies +2. Build and install pandas +3. Install the optional dependencies -To install *all* pandas dependencies you can do the following:: +.. code-block:: none - conda install -n pandas_dev -c conda-forge --file ci/requirements_all.txt + # Create and activate the build environment + conda env create -f ci/environment-dev.yaml + conda activate pandas-dev -To work in this environment, Windows users should ``activate`` it as follows:: + # Build and install pandas + python setup.py build_ext --inplace -j 4 + python -m pip install -e . - activate pandas_dev + # Install the rest of the optional dependencies + conda install -c defaults -c conda-forge --file=ci/requirements-optional-conda.txt -Mac OSX / Linux users should use:: +At this point you should be able to import pandas from your locally built version:: - source activate pandas_dev + $ python # start an interpreter + >>> import pandas + >>> print(pandas.__version__) + 0.22.0.dev0+29.g4ad6d4d74 -You will then see a confirmation message to indicate you are in the new development environment. +This will create the new environment, and not touch any of your existing environments, +nor any existing python installation. To view your environments:: conda info -e -To return to your home root environment in Windows:: - - deactivate +To return to your root environment:: -To return to your home root environment in OSX / Linux:: - - source deactivate + conda deactivate See the full conda docs `here <http://conda.pydata.org/docs>`__. -At this point you can easily do an *in-place* install, as detailed in the next section. - -.. _contributing.windows: - -Creating a Windows development environment ------------------------------------------- +.. _contributing.pip: -To build on Windows, you need to have compilers installed to build the extensions. You will need to install the appropriate Visual Studio compilers, VS 2008 for Python 2.7, VS 2010 for 3.4, and VS 2015 for Python 3.5 and 3.6. +Creating a Python Environment (pip) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -For Python 2.7, you can install the ``mingw`` compiler which will work equivalently to VS 2008:: +If you aren't using conda for you development environment, follow these instructions. +You'll need to have at least python3.5 installed on your system. - conda install -n pandas_dev libpython +.. code-block:: none -or use the `Microsoft Visual Studio VC++ compiler for Python <https://www.microsoft.com/en-us/download/details.aspx?id=44266>`__. Note that you have to check the ``x64`` box to install the ``x64`` extension building capability as this is not installed by default. + # Create a virtual environment + # Use an ENV_DIR of your choice. We'll use ~/virtualenvs/pandas-dev + # Any parent directories should already exist + python3 -m venv ~/virtualenvs/pandas-dev + # Activate the virtulaenv + . ~/virtualenvs/pandas-dev/bin/activate -For Python 3.4, you can download and install the `Windows 7.1 SDK <https://www.microsoft.com/en-us/download/details.aspx?id=8279>`__. Read the references below as there may be various gotchas during the installation. - -For Python 3.5 and 3.6, you can download and install the `Visual Studio 2015 Community Edition <https://www.visualstudio.com/en-us/downloads/visual-studio-2015-downloads-vs.aspx>`__. - -Here are some references and blogs: - -- https://blogs.msdn.microsoft.com/pythonengineering/2016/04/11/unable-to-find-vcvarsall-bat/ -- https://github.com/conda/conda-recipes/wiki/Building-from-Source-on-Windows-32-bit-and-64-bit -- https://cowboyprogrammer.org/building-python-wheels-for-windows/ -- https://blog.ionelmc.ro/2014/12/21/compiling-python-extensions-on-windows/ -- https://support.enthought.com/hc/en-us/articles/204469260-Building-Python-extensions-with-Canopy + # Install the build dependencies + python -m pip install -r ci/requirements_dev.txt + # Build and install pandas + python setup.py build_ext --inplace -j 4 + python -m pip install -e . -.. _contributing.getting_source: + # Install additional dependencies + python -m pip install -r ci/requirements-optional-pip.txt -Making changes --------------- +Creating a branch +----------------- -Before making your code changes, it is often necessary to build the code that was -just checked out. There are two primary methods of doing this. +You want your master branch to reflect only production-ready code, so create a +feature branch for making your changes. For example:: -#. The best way to develop *pandas* is to build the C extensions in-place by - running:: + git branch shiny-new-feature + git checkout shiny-new-feature - python setup.py build_ext --inplace +The above can be simplified to:: - If you startup the Python interpreter in the *pandas* source directory you - will call the built C extensions + git checkout -b shiny-new-feature -#. Another very common option is to do a ``develop`` install of *pandas*:: +This changes your working directory to the shiny-new-feature branch. Keep any +changes in this branch specific to one bug or feature so it is clear +what the branch brings to *pandas*. You can have many shiny-new-features +and switch in between them using the git checkout command. - python setup.py develop +To update this branch, you need to retrieve the changes from the master branch:: - This makes a symbolic link that tells the Python interpreter to import *pandas* - from your development directory. Thus, you can always be using the development - version on your system without being inside the clone directory. + git fetch upstream + git rebase upstream/master +This will replay your commits on top of the latest pandas git master. If this +leads to merge conflicts, you must resolve these before submitting your pull +request. If you have uncommitted changes, you will need to ``stash`` them prior +to updating. This will effectively store your changes and they can be reapplied +after updating. .. _contributing.documentation: Contributing to the documentation ================================= -If you're not the developer type, contributing to the documentation is still -of huge value. You don't even have to be an expert on -*pandas* to do so! Something as simple as rewriting small passages for clarity -as you reference the docs is a simple but effective way to contribute. The -next person to read that passage will be in your debt! - -In fact, there are sections of the docs that are worse off after being written -by experts. If something in the docs doesn't make sense to you, updating the -relevant section after you figure it out is a simple way to ensure it will -help the next person. +If you're not the developer type, contributing to the documentation is still of +huge value. You don't even have to be an expert on *pandas* to do so! In fact, +there are sections of the docs that are worse off after being written by +experts. If something in the docs doesn't make sense to you, updating the +relevant section after you figure it out is a great way to ensure it will help +the next person. .. contents:: Documentation: :local: @@ -330,7 +331,7 @@ The utility script ``scripts/api_rst_coverage.py`` can be used to compare the list of methods documented in ``doc/source/api.rst`` (which is used to generate the `API Reference <http://pandas.pydata.org/pandas-docs/stable/api.html>`_ page) and the actual public methods. -This will identify methods documented in in ``doc/source/api.rst`` that are not actually +This will identify methods documented in ``doc/source/api.rst`` that are not actually class methods, and existing methods that are not documented in ``doc/source/api.rst``. @@ -342,30 +343,6 @@ Requirements First, you need to have a development environment to be able to build pandas (see the docs on :ref:`creating a development environment above <contributing.dev_env>`). -Further, to build the docs, there are some extra requirements: you will need to -have ``sphinx`` and ``ipython`` installed. `numpydoc -<https://github.com/numpy/numpydoc>`_ is used to parse the docstrings that -follow the Numpy Docstring Standard (see above), but you don't need to install -this because a local copy of numpydoc is included in the *pandas* source -code. `nbsphinx <https://nbsphinx.readthedocs.io/>`_ is required to build -the Jupyter notebooks included in the documentation. - -If you have a conda environment named ``pandas_dev``, you can install the extra -requirements with:: - - conda install -n pandas_dev sphinx ipython nbconvert nbformat - conda install -n pandas_dev -c conda-forge nbsphinx - -Furthermore, it is recommended to have all :ref:`optional dependencies <install.optional_dependencies>`. -installed. This is not strictly necessary, but be aware that you will see some error -messages when building the docs. This happens because all the code in the documentation -is executed during the doc build, and so code examples using optional dependencies -will generate errors. Run ``pd.show_versions()`` to get an overview of the installed -version of all dependencies. - -.. warning:: - - You need to have ``sphinx`` version >= 1.3.2. Building the documentation ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -386,10 +363,10 @@ If you want to do a full clean build, do:: python make.py clean python make.py html -Starting with *pandas* 0.13.1 you can tell ``make.py`` to compile only a single section -of the docs, greatly reducing the turn-around time for checking your changes. -You will be prompted to delete ``.rst`` files that aren't required. This is okay because -the prior versions of these files can be checked out from git. However, you must make sure +You can tell ``make.py`` to compile only a single section of the docs, greatly +reducing the turn-around time for checking your changes. You will be prompted to +delete ``.rst`` files that aren't required. This is okay because the prior +versions of these files can be checked out from git. However, you must make sure not to commit the file deletions to your Git repository! :: @@ -422,6 +399,8 @@ the documentation are also built by Travis-CI. These docs are then hosted `here <http://pandas-docs.github.io/pandas-docs-travis>`__, see also the :ref:`Continuous Integration <contributing.ci>` section. +.. _contributing.code: + Contributing to the code base ============================= @@ -480,7 +459,7 @@ Once configured, you can run the tool as follows:: clang-format modified-c-file This will output what your file will look like if the changes are made, and to apply -them, just run the following command:: +them, run the following command:: clang-format -i modified-c-file @@ -1033,7 +1012,7 @@ delete your branch:: git checkout master git merge upstream/master -Then you can just do:: +Then you can do:: git branch -d shiny-new-feature @@ -1043,3 +1022,6 @@ branch has not actually been merged. The branch will still exist on GitHub, so to delete it there do:: git push origin --delete shiny-new-feature + + +.. _Gitter: https://gitter.im/pydata/pandas diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index 2348a3d10c54f..69913b2c1fbd8 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -53,6 +53,18 @@ the latest web technologies. Its goal is to provide elegant, concise constructio graphics in the style of Protovis/D3, while delivering high-performance interactivity over large data to thin clients. +`seaborn <https://seaborn.pydata.org>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Seaborn is a Python visualization library based on `matplotlib +<http://matplotlib.org>`__. It provides a high-level, dataset-oriented +interface for creating attractive statistical graphics. The plotting functions +in seaborn understand pandas objects and leverage pandas grouping operations +internally to support concise specification of complex visualizations. Seaborn +also goes beyond matplotlib and pandas with the option to perform statistical +estimation while plotting, aggregating across observations and visualizing the +fit of statistical models to emphasize patterns in a dataset. + `yhat/ggplot <https://github.com/yhat/ggplot>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -64,15 +76,6 @@ but a faithful implementation for python users has long been missing. Although s (as of Jan-2014), the `yhat/ggplot <https://github.com/yhat/ggplot>`__ project has been progressing quickly in that direction. -`Seaborn <https://github.com/mwaskom/seaborn>`__ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Although pandas has quite a bit of "just plot it" functionality built-in, visualization and -in particular statistical graphics is a vast field with a long tradition and lots of ground -to cover. The `Seaborn <https://github.com/mwaskom/seaborn>`__ project builds on top of pandas -and `matplotlib <http://matplotlib.org>`__ to provide easy plotting of data which extends to -more advanced types of plots then those offered by pandas. - `Vincent <https://github.com/wrobstory/vincent>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -222,7 +225,13 @@ Out-of-core ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dask is a flexible parallel computing library for analytics. Dask -allow a familiar ``DataFrame`` interface to out-of-core, parallel and distributed computing. +provides a familiar ``DataFrame`` interface for out-of-core, parallel and distributed computing. + +`Dask-ML <https://dask-ml.readthedocs.io/en/latest/>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Dask-ML enables parallel and distributed machine learning using Dask alongside existing machine learning libraries like Scikit-Learn, XGBoost, and TensorFlow. + `Blaze <http://blaze.pydata.org/>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index fdb002a642d62..b329fac969343 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -1835,15 +1835,27 @@ that you've done this: Yikes! +.. _indexing.evaluation_order: + Evaluation order matters ~~~~~~~~~~~~~~~~~~~~~~~~ -Furthermore, in chained expressions, the order may determine whether a copy is returned or not. -If an expression will set values on a copy of a slice, then a ``SettingWithCopy`` -warning will be issued. +When you use chained indexing, the order and type of the indexing operation +partially determine whether the result is a slice into the original object, or +a copy of the slice. + +Pandas has the ``SettingWithCopyWarning`` because assigning to a copy of a +slice is frequently not intentional, but a mistake caused by chained indexing +returning a copy where a slice was expected. + +If you would like pandas to be more or less trusting about assignment to a +chained indexing expression, you can set the :ref:`option <options>` +``mode.chained_assignment`` to one of these values: -You can control the action of a chained assignment via the option ``mode.chained_assignment``, -which can take the values ``['raise','warn',None]``, where showing a warning is the default. +* ``'warn'``, the default, means a ``SettingWithCopyWarning`` is printed. +* ``'raise'`` means pandas will raise a ``SettingWithCopyException`` + you have to deal with. +* ``None`` will suppress the warnings entirely. .. ipython:: python :okwarning: diff --git a/doc/source/install.rst b/doc/source/install.rst index c805f84d0faaa..27dde005e5a87 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -141,28 +141,24 @@ and can take a few minutes to complete. Installing using your Linux distribution's package manager. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The commands in this table will install pandas for Python 2 from your distribution. -To install pandas for Python 3 you may need to use the package ``python3-pandas``. +The commands in this table will install pandas for Python 3 from your distribution. +To install pandas for Python 2 you may need to use the package ``python-pandas``. .. csv-table:: :header: "Distribution", "Status", "Download / Repository Link", "Install method" :widths: 10, 10, 20, 50 - Debian, stable, `official Debian repository <http://packages.debian.org/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python-pandas`` - Debian & Ubuntu, unstable (latest packages), `NeuroDebian <http://neuro.debian.net/index.html#how-to-use-this-repository>`__ , ``sudo apt-get install python-pandas`` - Ubuntu, stable, `official Ubuntu repository <http://packages.ubuntu.com/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python-pandas`` - Ubuntu, unstable (daily builds), `PythonXY PPA <https://code.launchpad.net/~pythonxy/+archive/pythonxy-devel>`__; activate by: ``sudo add-apt-repository ppa:pythonxy/pythonxy-devel && sudo apt-get update``, ``sudo apt-get install python-pandas`` - OpenSuse, stable, `OpenSuse Repository <http://software.opensuse.org/package/python-pandas?search_term=pandas>`__ , ``zypper in python-pandas`` - Fedora, stable, `official Fedora repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``dnf install python-pandas`` - Centos/RHEL, stable, `EPEL repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``yum install python-pandas`` - - - - - - + Debian, stable, `official Debian repository <http://packages.debian.org/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python3-pandas`` + Debian & Ubuntu, unstable (latest packages), `NeuroDebian <http://neuro.debian.net/index.html#how-to-use-this-repository>`__ , ``sudo apt-get install python3-pandas`` + Ubuntu, stable, `official Ubuntu repository <http://packages.ubuntu.com/search?keywords=pandas&searchon=names&suite=all&section=all>`__ , ``sudo apt-get install python3-pandas`` + OpenSuse, stable, `OpenSuse Repository <http://software.opensuse.org/package/python-pandas?search_term=pandas>`__ , ``zypper in python3-pandas`` + Fedora, stable, `official Fedora repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``dnf install python3-pandas`` + Centos/RHEL, stable, `EPEL repository <https://admin.fedoraproject.org/pkgdb/package/rpms/python-pandas/>`__ , ``yum install python3-pandas`` +**However**, the packages in the linux package managers are often a few versions behind, so +to get the newest version of pandas, it's recommended to install using the ``pip`` or ``conda`` +methods described above. Installing from source @@ -258,7 +254,8 @@ Optional Dependencies <http://www.vergenet.net/~conrad/software/xsel/>`__, or `xclip <https://github.com/astrand/xclip/>`__: necessary to use :func:`~pandas.read_clipboard`. Most package managers on Linux distributions will have ``xclip`` and/or ``xsel`` immediately available for installation. -* For Google BigQuery I/O - see `here <https://pandas-gbq.readthedocs.io/en/latest/install.html#dependencies>`__ +* `pandas-gbq <https://pandas-gbq.readthedocs.io/en/latest/install.html#dependencies>`__: for Google BigQuery I/O. + * `Backports.lzma <https://pypi.python.org/pypi/backports.lzma/>`__: Only for Python 2, for writing to and/or reading from an xz compressed DataFrame in CSV; Python 3 support is built into the standard library. * One of the following combinations of libraries is needed to use the diff --git a/doc/source/io.rst b/doc/source/io.rst index 82cb83c168b22..ba33c449e701f 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -103,15 +103,20 @@ Column and Index Locations and Names ++++++++++++++++++++++++++++++++++++ header : int or list of ints, default ``'infer'`` - Row number(s) to use as the column names, and the start of the data. Default - behavior is as if ``header=0`` if no ``names`` passed, otherwise as if - ``header=None``. Explicitly pass ``header=0`` to be able to replace existing - names. The header can be a list of ints that specify row locations for a - multi-index on the columns e.g. ``[0,1,3]``. Intervening rows that are not - specified will be skipped (e.g. 2 in this example is skipped). Note that - this parameter ignores commented lines and empty lines if - ``skip_blank_lines=True``, so header=0 denotes the first line of data - rather than the first line of the file. + Row number(s) to use as the column names, and the start of the + data. Default behavior is to infer the column names: if no names are + passed the behavior is identical to ``header=0`` and column names + are inferred from the first line of the file, if column names are + passed explicitly then the behavior is identical to + ``header=None``. Explicitly pass ``header=0`` to be able to replace + existing names. + + The header can be a list of ints that specify row locations + for a multi-index on the columns e.g. ``[0,1,3]``. Intervening rows + that are not specified will be skipped (e.g. 2 in this example is + skipped). Note that this parameter ignores commented lines and empty + lines if ``skip_blank_lines=True``, so header=0 denotes the first + line of data rather than the first line of the file. names : array-like, default ``None`` List of column names to use. If file contains no header row, then you should explicitly pass ``header=None``. Duplicates in this list will cause @@ -553,6 +558,14 @@ If the header is in a row other than the first, pass the row number to data = 'skip this skip it\na,b,c\n1,2,3\n4,5,6\n7,8,9' pd.read_csv(StringIO(data), header=1) +.. note:: + + Default behavior is to infer the column names: if no names are + passed the behavior is identical to ``header=0`` and column names + are inferred from the first nonblank line of the file, if column + names are passed explicitly then the behavior is identical to + ``header=None``. + .. _io.dupe_names: Duplicate names parsing @@ -4469,8 +4482,10 @@ Several caveats. - This is a newer library, and the format, though stable, is not guaranteed to be backward compatible to the earlier versions. -- The format will NOT write an ``Index``, or ``MultiIndex`` for the ``DataFrame`` and will raise an - error if a non-default one is provided. You can simply ``.reset_index()`` in order to store the index. +- The format will NOT write an ``Index``, or ``MultiIndex`` for the + ``DataFrame`` and will raise an error if a non-default one is provided. You + can ``.reset_index()`` to store the index or ``.reset_index(drop=True)`` to + ignore it. - Duplicate column names and non-string columns names are not supported - Non supported types include ``Period`` and actual python object types. These will raise a helpful error message on an attempt at serialization. @@ -4533,9 +4548,8 @@ dtypes, including extension dtypes such as datetime with tz. Several caveats. -- The format will NOT write an ``Index``, or ``MultiIndex`` for the ``DataFrame`` and will raise an - error if a non-default one is provided. You can simply ``.reset_index(drop=True)`` in order to store the index. - Duplicate column names and non-string columns names are not supported +- Index level names, if specified, must be strings - Categorical dtypes can be serialized to parquet, but will de-serialize as ``object`` dtype. - Non supported types include ``Period`` and actual python object types. These will raise a helpful error message on an attempt at serialization. @@ -4580,6 +4594,15 @@ Read from a parquet file. result.dtypes +Read only certain columns of a parquet file. + +.. ipython:: python + + result = pd.read_parquet('example_fp.parquet', engine='fastparquet', columns=['a', 'b']) + + result.dtypes + + .. ipython:: python :suppress: diff --git a/doc/source/options.rst b/doc/source/options.rst index 2da55a5a658a4..505a5ade68de0 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -273,164 +273,167 @@ Options are 'right', and 'left'. Available Options ----------------- -=================================== ============ ================================== -Option Default Function -=================================== ============ ================================== -display.chop_threshold None If set to a float value, all float - values smaller then the given - threshold will be displayed as - exactly 0 by repr and friends. -display.colheader_justify right Controls the justification of - column headers. used by DataFrameFormatter. -display.column_space 12 No description available. -display.date_dayfirst False When True, prints and parses dates - with the day first, eg 20/01/2005 -display.date_yearfirst False When True, prints and parses dates - with the year first, eg 2005/01/20 -display.encoding UTF-8 Defaults to the detected encoding - of the console. Specifies the encoding - to be used for strings returned by - to_string, these are generally strings - meant to be displayed on the console. -display.expand_frame_repr True Whether to print out the full DataFrame - repr for wide DataFrames across - multiple lines, `max_columns` is - still respected, but the output will - wrap-around across multiple "pages" - if its width exceeds `display.width`. -display.float_format None The callable should accept a floating - point number and return a string with - the desired format of the number. - This is used in some places like - SeriesFormatter. - See core.format.EngFormatter for an example. -display.large_repr truncate For DataFrames exceeding max_rows/max_cols, - the repr (and HTML repr) can show - a truncated table (the default), - or switch to the view from df.info() - (the behaviour in earlier versions of pandas). - allowable settings, ['truncate', 'info'] -display.latex.repr False Whether to produce a latex DataFrame - representation for jupyter frontends - that support it. -display.latex.escape True Escapes special characters in DataFrames, when - using the to_latex method. -display.latex.longtable False Specifies if the to_latex method of a DataFrame - uses the longtable format. -display.latex.multicolumn True Combines columns when using a MultiIndex -display.latex.multicolumn_format 'l' Alignment of multicolumn labels -display.latex.multirow False Combines rows when using a MultiIndex. - Centered instead of top-aligned, - separated by clines. -display.max_columns 20 max_rows and max_columns are used - in __repr__() methods to decide if - to_string() or info() is used to - render an object to a string. In - case python/IPython is running in - a terminal this can be set to 0 and - pandas will correctly auto-detect - the width the terminal and swap to - a smaller format in case all columns - would not fit vertically. The IPython - notebook, IPython qtconsole, or IDLE - do not run in a terminal and hence - it is not possible to do correct - auto-detection. 'None' value means - unlimited. -display.max_colwidth 50 The maximum width in characters of - a column in the repr of a pandas - data structure. When the column overflows, - a "..." placeholder is embedded in - the output. -display.max_info_columns 100 max_info_columns is used in DataFrame.info - method to decide if per column information - will be printed. -display.max_info_rows 1690785 df.info() will usually show null-counts - for each column. For large frames - this can be quite slow. max_info_rows - and max_info_cols limit this null - check only to frames with smaller - dimensions then specified. -display.max_rows 60 This sets the maximum number of rows - pandas should output when printing - out various output. For example, - this value determines whether the - repr() for a dataframe prints out - fully or just a summary repr. - 'None' value means unlimited. -display.max_seq_items 100 when pretty-printing a long sequence, - no more then `max_seq_items` will - be printed. If items are omitted, - they will be denoted by the addition - of "..." to the resulting string. - If set to None, the number of items - to be printed is unlimited. -display.memory_usage True This specifies if the memory usage of - a DataFrame should be displayed when the - df.info() method is invoked. -display.multi_sparse True "Sparsify" MultiIndex display (don't - display repeated elements in outer - levels within groups) -display.notebook_repr_html True When True, IPython notebook will - use html representation for - pandas objects (if it is available). -display.pprint_nest_depth 3 Controls the number of nested levels - to process when pretty-printing -display.precision 6 Floating point output precision in - terms of number of places after the - decimal, for regular formatting as well - as scientific notation. Similar to - numpy's ``precision`` print option -display.show_dimensions truncate Whether to print out dimensions - at the end of DataFrame repr. - If 'truncate' is specified, only - print out the dimensions if the - frame is truncated (e.g. not display - all rows and/or columns) -display.width 80 Width of the display in characters. - In case python/IPython is running in - a terminal this can be set to None - and pandas will correctly auto-detect - the width. Note that the IPython notebook, - IPython qtconsole, or IDLE do not run in a - terminal and hence it is not possible - to correctly detect the width. -display.html.table_schema False Whether to publish a Table Schema - representation for frontends that - support it. -display.html.border 1 A ``border=value`` attribute is - inserted in the ``<table>`` tag - for the DataFrame HTML repr. -io.excel.xls.writer xlwt The default Excel writer engine for - 'xls' files. -io.excel.xlsm.writer openpyxl The default Excel writer engine for - 'xlsm' files. Available options: - 'openpyxl' (the default). -io.excel.xlsx.writer openpyxl The default Excel writer engine for - 'xlsx' files. -io.hdf.default_format None default format writing format, if - None, then put will default to - 'fixed' and append will default to - 'table' -io.hdf.dropna_table True drop ALL nan rows when appending - to a table -io.parquet.engine None The engine to use as a default for - parquet reading and writing. If None - then try 'pyarrow' and 'fastparquet' -mode.chained_assignment warn Raise an exception, warn, or no - action if trying to use chained - assignment, The default is warn -mode.sim_interactive False Whether to simulate interactive mode - for purposes of testing. -mode.use_inf_as_na False True means treat None, NaN, -INF, - INF as NA (old way), False means - None and NaN are null, but INF, -INF - are not NA (new way). -compute.use_bottleneck True Use the bottleneck library to accelerate - computation if it is installed. -compute.use_numexpr True Use the numexpr library to accelerate - computation if it is installed. -=================================== ============ ================================== +======================================= ============ ================================== +Option Default Function +======================================= ============ ================================== +display.chop_threshold None If set to a float value, all float + values smaller then the given + threshold will be displayed as + exactly 0 by repr and friends. +display.colheader_justify right Controls the justification of + column headers. used by DataFrameFormatter. +display.column_space 12 No description available. +display.date_dayfirst False When True, prints and parses dates + with the day first, eg 20/01/2005 +display.date_yearfirst False When True, prints and parses dates + with the year first, eg 2005/01/20 +display.encoding UTF-8 Defaults to the detected encoding + of the console. Specifies the encoding + to be used for strings returned by + to_string, these are generally strings + meant to be displayed on the console. +display.expand_frame_repr True Whether to print out the full DataFrame + repr for wide DataFrames across + multiple lines, `max_columns` is + still respected, but the output will + wrap-around across multiple "pages" + if its width exceeds `display.width`. +display.float_format None The callable should accept a floating + point number and return a string with + the desired format of the number. + This is used in some places like + SeriesFormatter. + See core.format.EngFormatter for an example. +display.large_repr truncate For DataFrames exceeding max_rows/max_cols, + the repr (and HTML repr) can show + a truncated table (the default), + or switch to the view from df.info() + (the behaviour in earlier versions of pandas). + allowable settings, ['truncate', 'info'] +display.latex.repr False Whether to produce a latex DataFrame + representation for jupyter frontends + that support it. +display.latex.escape True Escapes special characters in DataFrames, when + using the to_latex method. +display.latex.longtable False Specifies if the to_latex method of a DataFrame + uses the longtable format. +display.latex.multicolumn True Combines columns when using a MultiIndex +display.latex.multicolumn_format 'l' Alignment of multicolumn labels +display.latex.multirow False Combines rows when using a MultiIndex. + Centered instead of top-aligned, + separated by clines. +display.max_columns 20 max_rows and max_columns are used + in __repr__() methods to decide if + to_string() or info() is used to + render an object to a string. In + case python/IPython is running in + a terminal this can be set to 0 and + pandas will correctly auto-detect + the width the terminal and swap to + a smaller format in case all columns + would not fit vertically. The IPython + notebook, IPython qtconsole, or IDLE + do not run in a terminal and hence + it is not possible to do correct + auto-detection. 'None' value means + unlimited. +display.max_colwidth 50 The maximum width in characters of + a column in the repr of a pandas + data structure. When the column overflows, + a "..." placeholder is embedded in + the output. +display.max_info_columns 100 max_info_columns is used in DataFrame.info + method to decide if per column information + will be printed. +display.max_info_rows 1690785 df.info() will usually show null-counts + for each column. For large frames + this can be quite slow. max_info_rows + and max_info_cols limit this null + check only to frames with smaller + dimensions then specified. +display.max_rows 60 This sets the maximum number of rows + pandas should output when printing + out various output. For example, + this value determines whether the + repr() for a dataframe prints out + fully or just a summary repr. + 'None' value means unlimited. +display.max_seq_items 100 when pretty-printing a long sequence, + no more then `max_seq_items` will + be printed. If items are omitted, + they will be denoted by the addition + of "..." to the resulting string. + If set to None, the number of items + to be printed is unlimited. +display.memory_usage True This specifies if the memory usage of + a DataFrame should be displayed when the + df.info() method is invoked. +display.multi_sparse True "Sparsify" MultiIndex display (don't + display repeated elements in outer + levels within groups) +display.notebook_repr_html True When True, IPython notebook will + use html representation for + pandas objects (if it is available). +display.pprint_nest_depth 3 Controls the number of nested levels + to process when pretty-printing +display.precision 6 Floating point output precision in + terms of number of places after the + decimal, for regular formatting as well + as scientific notation. Similar to + numpy's ``precision`` print option +display.show_dimensions truncate Whether to print out dimensions + at the end of DataFrame repr. + If 'truncate' is specified, only + print out the dimensions if the + frame is truncated (e.g. not display + all rows and/or columns) +display.width 80 Width of the display in characters. + In case python/IPython is running in + a terminal this can be set to None + and pandas will correctly auto-detect + the width. Note that the IPython notebook, + IPython qtconsole, or IDLE do not run in a + terminal and hence it is not possible + to correctly detect the width. +display.html.table_schema False Whether to publish a Table Schema + representation for frontends that + support it. +display.html.border 1 A ``border=value`` attribute is + inserted in the ``<table>`` tag + for the DataFrame HTML repr. +io.excel.xls.writer xlwt The default Excel writer engine for + 'xls' files. +io.excel.xlsm.writer openpyxl The default Excel writer engine for + 'xlsm' files. Available options: + 'openpyxl' (the default). +io.excel.xlsx.writer openpyxl The default Excel writer engine for + 'xlsx' files. +io.hdf.default_format None default format writing format, if + None, then put will default to + 'fixed' and append will default to + 'table' +io.hdf.dropna_table True drop ALL nan rows when appending + to a table +io.parquet.engine None The engine to use as a default for + parquet reading and writing. If None + then try 'pyarrow' and 'fastparquet' +mode.chained_assignment warn Controls ``SettingWithCopyWarning``: + 'raise', 'warn', or None. Raise an + exception, warn, or no action if + trying to use :ref:`chained assignment <indexing.evaluation_order>`. +mode.sim_interactive False Whether to simulate interactive mode + for purposes of testing. +mode.use_inf_as_na False True means treat None, NaN, -INF, + INF as NA (old way), False means + None and NaN are null, but INF, -INF + are not NA (new way). +compute.use_bottleneck True Use the bottleneck library to accelerate + computation if it is installed. +compute.use_numexpr True Use the numexpr library to accelerate + computation if it is installed. +plotting.matplotlib.register_converters True Register custom converters with + matplotlib. Set to False to de-register. +======================================= ============ ================================== .. _basics.console_output: diff --git a/doc/source/release.rst b/doc/source/release.rst index 6c3e7f847b485..aea6280a490d6 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -37,6 +37,103 @@ analysis / manipulation tool available in any language. * Binary installers on PyPI: http://pypi.python.org/pypi/pandas * Documentation: http://pandas.pydata.org +pandas 0.22.0 +------------- + +**Release date:** December 29, 2017 + +This is a major release from 0.21.1 and includes a single, API-breaking change. +We recommend that all users upgrade to this version after carefully reading the +release note. + +The only changes are: + +- The sum of an empty or all-*NA* ``Series`` is now ``0`` +- The product of an empty or all-*NA* ``Series`` is now ``1`` +- We've added a ``min_count`` parameter to ``.sum()`` and ``.prod()`` controlling + the minimum number of valid values for the result to be valid. If fewer than + ``min_count`` non-*NA* values are present, the result is *NA*. The default is + ``0``. To return ``NaN``, the 0.21 behavior, use ``min_count=1``. + +See the :ref:`v0.22.0 Whatsnew <whatsnew_0220>` overview for further explanation +of all the places in the library this affects. + +pandas 0.21.1 +------------- + +**Release date:** December 12, 2017 + +This is a minor bug-fix release in the 0.21.x series and includes some small +regression fixes, bug fixes and performance improvements. We recommend that all +users upgrade to this version. + +Highlights include: + +- Temporarily restore matplotlib datetime plotting functionality. This should + resolve issues for users who relied implicitly on pandas to plot datetimes + with matplotlib. See :ref:`here <whatsnew_0211.special>`. +- Improvements to the Parquet IO functions introduced in 0.21.0. See + :ref:`here <whatsnew_0211.enhancements.parquet>`. + +See the :ref:`v0.21.1 Whatsnew <whatsnew_0211>` overview for an extensive list +of all the changes for 0.21.1. + +Thanks +~~~~~~ + +A total of 46 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +Contributors +============ + +* Aaron Critchley + +* Alex Rychyk +* Alexander Buchkovsky + +* Alexander Michael Schade + +* Chris Mazzullo +* Cornelius Riemenschneider + +* Dave Hirschfeld + +* David Fischer + +* David Stansby + +* Dror Atariah + +* Eric Kisslinger + +* Hans + +* Ingolf Becker + +* Jan Werkmann + +* Jeff Reback +* Joris Van den Bossche +* Jörg Döpfert + +* Kevin Kuhl + +* Krzysztof Chomski + +* Leif Walsh +* Licht Takeuchi +* Manraj Singh + +* Matt Braymer-Hayes + +* Michael Waskom + +* Mie~~~ + +* Peter Hoffmann + +* Robert Meyer + +* Sam Cohan + +* Sietse Brouwer + +* Sven + +* Tim Swast +* Tom Augspurger +* Wes Turner +* William Ayd + +* Yee Mey + +* bolkedebruin + +* cgohlke +* derestle-htwg + +* fjdiod + +* gabrielclow + +* gfyoung +* ghasemnaddaf + +* jbrockmendel +* jschendel +* miker985 + +* topper-123 + pandas 0.21.0 ------------- @@ -52,7 +149,7 @@ Highlights include: - Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` function and :meth:`DataFrame.to_parquet` method, see :ref:`here <whatsnew_0210.enhancements.parquet>`. - New user-facing :class:`pandas.api.types.CategoricalDtype` for specifying categoricals independent of the data, see :ref:`here <whatsnew_0210.enhancements.categorical_dtype>`. -- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. +- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, and ``sum`` and ``prod`` on empty Series now return NaN instead of 0, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. - Compatibility fixes for pypy, see :ref:`here <whatsnew_0210.pypy>`. - Additions to the ``drop``, ``reindex`` and ``rename`` API to make them more consistent, see :ref:`here <whatsnew_0210.enhancements.drop_api>`. - Addition of the new methods ``DataFrame.infer_objects`` (see :ref:`here <whatsnew_0210.enhancements.infer_objects>`) and ``GroupBy.pipe`` (see :ref:`here <whatsnew_0210.enhancements.GroupBy_pipe>`). diff --git a/doc/source/whatsnew.rst b/doc/source/whatsnew.rst index 3385bafc26467..64cbe0b050a61 100644 --- a/doc/source/whatsnew.rst +++ b/doc/source/whatsnew.rst @@ -18,6 +18,10 @@ What's New These are new features and improvements of note in each release. +.. include:: whatsnew/v0.22.0.txt + +.. include:: whatsnew/v0.21.1.txt + .. include:: whatsnew/v0.21.0.txt .. include:: whatsnew/v0.20.3.txt diff --git a/doc/source/whatsnew/v0.21.0.txt b/doc/source/whatsnew/v0.21.0.txt index 4c460eeb85b82..3e673bd4cbc28 100644 --- a/doc/source/whatsnew/v0.21.0.txt +++ b/doc/source/whatsnew/v0.21.0.txt @@ -12,7 +12,7 @@ Highlights include: - Integration with `Apache Parquet <https://parquet.apache.org/>`__, including a new top-level :func:`read_parquet` function and :meth:`DataFrame.to_parquet` method, see :ref:`here <whatsnew_0210.enhancements.parquet>`. - New user-facing :class:`pandas.api.types.CategoricalDtype` for specifying categoricals independent of the data, see :ref:`here <whatsnew_0210.enhancements.categorical_dtype>`. -- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. +- The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames is now consistent and no longer depends on whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, and ``sum`` and ``prod`` on empty Series now return NaN instead of 0, see :ref:`here <whatsnew_0210.api_breaking.bottleneck>`. - Compatibility fixes for pypy, see :ref:`here <whatsnew_0210.pypy>`. - Additions to the ``drop``, ``reindex`` and ``rename`` API to make them more consistent, see :ref:`here <whatsnew_0210.enhancements.drop_api>`. - Addition of the new methods ``DataFrame.infer_objects`` (see :ref:`here <whatsnew_0210.enhancements.infer_objects>`) and ``GroupBy.pipe`` (see :ref:`here <whatsnew_0210.enhancements.GroupBy_pipe>`). @@ -369,11 +369,17 @@ Additionally, support has been dropped for Python 3.4 (:issue:`15251`). .. _whatsnew_0210.api_breaking.bottleneck: -Sum/Prod of all-NaN Series/DataFrames is now consistently NaN -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Sum/Prod of all-NaN or empty Series/DataFrames is now consistently NaN +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. note:: + + The changes described here have been partially reverted. See + the :ref:`v0.22.0 Whatsnew <whatsnew_0220>` for more. + The behavior of ``sum`` and ``prod`` on all-NaN Series/DataFrames no longer depends on -whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed. (:issue:`9422`, :issue:`15507`). +whether `bottleneck <http://berkeleyanalytics.com/bottleneck>`__ is installed, and return value of ``sum`` and ``prod`` on an empty Series has changed (:issue:`9422`, :issue:`15507`). Calling ``sum`` or ``prod`` on an empty or all-``NaN`` ``Series``, or columns of a ``DataFrame``, will result in ``NaN``. See the :ref:`docs <missing_data.numeric_sum>`. @@ -381,35 +387,35 @@ Calling ``sum`` or ``prod`` on an empty or all-``NaN`` ``Series``, or columns of s = Series([np.nan]) -Previously NO ``bottleneck`` +Previously WITHOUT ``bottleneck`` installed: .. code-block:: ipython In [2]: s.sum() Out[2]: np.nan -Previously WITH ``bottleneck`` +Previously WITH ``bottleneck``: .. code-block:: ipython In [2]: s.sum() Out[2]: 0.0 -New Behavior, without regard to the bottleneck installation. +New Behavior, without regard to the bottleneck installation: .. ipython:: python s.sum() -Note that this also changes the sum of an empty ``Series`` - -Previously regardless of ``bottlenck`` +Note that this also changes the sum of an empty ``Series``. Previously this always returned 0 regardless of a ``bottlenck`` installation: .. code-block:: ipython In [1]: pd.Series([]).sum() Out[1]: 0 +but for consistency with the all-NaN case, this was changed to return NaN as well: + .. ipython:: python pd.Series([]).sum() @@ -877,6 +883,28 @@ New Behavior: pd.interval_range(start=0, end=4) +.. _whatsnew_0210.api.mpl_converters: + +No Automatic Matplotlib Converters +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pandas no longer registers our ``date``, ``time``, ``datetime``, +``datetime64``, and ``Period`` converters with matplotlib when pandas is +imported. Matplotlib plot methods (``plt.plot``, ``ax.plot``, ...), will not +nicely format the x-axis for ``DatetimeIndex`` or ``PeriodIndex`` values. You +must explicitly register these methods: + +.. ipython:: python + + from pandas.tseries import converter + converter.register() + + fig, ax = plt.subplots() + plt.plot(pd.date_range('2017', periods=6), range(6)) + +Pandas built-in ``Series.plot`` and ``DataFrame.plot`` *will* register these +converters on first-use (:issue:17710). + .. _whatsnew_0210.api: Other API Changes @@ -900,8 +928,6 @@ Other API Changes - Renamed non-functional ``index`` to ``index_col`` in :func:`read_stata` to improve API consistency (:issue:`16342`) - Bug in :func:`DataFrame.drop` caused boolean labels ``False`` and ``True`` to be treated as labels 0 and 1 respectively when dropping indices from a numeric index. This will now raise a ValueError (:issue:`16877`) - Restricted DateOffset keyword arguments. Previously, ``DateOffset`` subclasses allowed arbitrary keyword arguments which could lead to unexpected behavior. Now, only valid arguments will be accepted. (:issue:`17176`). -- Pandas no longer registers matplotlib converters on import. The converters - will be registered and used when the first plot is draw (:issue:`17710`) .. _whatsnew_0210.deprecations: diff --git a/doc/source/whatsnew/v0.21.1.txt b/doc/source/whatsnew/v0.21.1.txt index 422a239e86ece..9d065d71a4801 100644 --- a/doc/source/whatsnew/v0.21.1.txt +++ b/doc/source/whatsnew/v0.21.1.txt @@ -1,56 +1,94 @@ .. _whatsnew_0211: -v0.21.1 -------- +v0.21.1 (December 12, 2017) +--------------------------- -This is a minor release from 0.21.1 and includes a number of deprecations, new -features, enhancements, and performance improvements along with a large number -of bug fixes. We recommend that all users upgrade to this version. +This is a minor bug-fix release in the 0.21.x series and includes some small regression fixes, +bug fixes and performance improvements. +We recommend that all users upgrade to this version. + +Highlights include: + +- Temporarily restore matplotlib datetime plotting functionality. This should + resolve issues for users who implicitly relied on pandas to plot datetimes + with matplotlib. See :ref:`here <whatsnew_0211.converters>`. +- Improvements to the Parquet IO functions introduced in 0.21.0. See + :ref:`here <whatsnew_0211.enhancements.parquet>`. + + +.. contents:: What's new in v0.21.1 + :local: + :backlinks: none + + +.. _whatsnew_0211.converters: + +Restore Matplotlib datetime Converter Registration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Pandas implements some matplotlib converters for nicely formatting the axis +labels on plots with ``datetime`` or ``Period`` values. Prior to pandas 0.21.0, +these were implicitly registered with matplotlib, as a side effect of ``import +pandas``. + +In pandas 0.21.0, we required users to explicitly register the +converter. This caused problems for some users who relied on those converters +being present for regular ``matplotlib.pyplot`` plotting methods, so we're +temporarily reverting that change; pandas 0.21.1 again registers the converters on +import, just like before 0.21.0. + +We've added a new option to control the converters: +``pd.options.plotting.matplotlib.register_converters``. By default, they are +registered. Toggling this to ``False`` removes pandas' formatters and restore +any converters we overwrote when registering them (:issue:`18301`). + +We're working with the matplotlib developers to make this easier. We're trying +to balance user convenience (automatically registering the converters) with +import performance and best practices (importing pandas shouldn't have the side +effect of overwriting any custom converters you've already set). In the future +we hope to have most of the datetime formatting functionality in matplotlib, +with just the pandas-specific converters in pandas. We'll then gracefully +deprecate the automatic registration of converters in favor of users explicitly +registering them when they want them. .. _whatsnew_0211.enhancements: New features ~~~~~~~~~~~~ -- -- -- +.. _whatsnew_0211.enhancements.parquet: + +Improvements to the Parquet IO functionality +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- :func:`DataFrame.to_parquet` will now write non-default indexes when the + underlying engine supports it. The indexes will be preserved when reading + back in with :func:`read_parquet` (:issue:`18581`). +- :func:`read_parquet` now allows to specify the columns to read from a parquet file (:issue:`18154`) +- :func:`read_parquet` now allows to specify kwargs which are passed to the respective engine (:issue:`18216`) .. _whatsnew_0211.enhancements.other: Other Enhancements ^^^^^^^^^^^^^^^^^^ -- -- -- +- :meth:`Timestamp.timestamp` is now available in Python 2.7. (:issue:`17329`) +- :class:`Grouper` and :class:`TimeGrouper` now have a friendly repr output (:issue:`18203`). .. _whatsnew_0211.deprecations: Deprecations ~~~~~~~~~~~~ -- -- -- +- ``pandas.tseries.register`` has been renamed to + :func:`pandas.plotting.register_matplotlib_converters`` (:issue:`18301`) .. _whatsnew_0211.performance: Performance Improvements ~~~~~~~~~~~~~~~~~~~~~~~~ -- -- -- - -.. _whatsnew_0211.docs: - -Documentation Changes -~~~~~~~~~~~~~~~~~~~~~ - -- -- -- +- Improved performance of plotting large series/dataframes (:issue:`18236`). .. _whatsnew_0211.bug_fixes: @@ -60,65 +98,78 @@ Bug Fixes Conversion ^^^^^^^^^^ -- -- -- +- Bug in :class:`TimedeltaIndex` subtraction could incorrectly overflow when ``NaT`` is present (:issue:`17791`) +- Bug in :class:`DatetimeIndex` subtracting datetimelike from DatetimeIndex could fail to overflow (:issue:`18020`) +- Bug in :meth:`IntervalIndex.copy` when copying and ``IntervalIndex`` with non-default ``closed`` (:issue:`18339`) +- Bug in :func:`DataFrame.to_dict` where columns of datetime that are tz-aware were not converted to required arrays when used with ``orient='records'``, raising``TypeError` (:issue:`18372`) +- Bug in :class:`DateTimeIndex` and :meth:`date_range` where mismatching tz-aware ``start`` and ``end`` timezones would not raise an err if ``end.tzinfo`` is None (:issue:`18431`) +- Bug in :meth:`Series.fillna` which raised when passed a long integer on Python 2 (:issue:`18159`). Indexing ^^^^^^^^ -- -- -- +- Bug in a boolean comparison of a ``datetime.datetime`` and a ``datetime64[ns]`` dtype Series (:issue:`17965`) +- Bug where a ``MultiIndex`` with more than a million records was not raising ``AttributeError`` when trying to access a missing attribute (:issue:`18165`) +- Bug in :class:`IntervalIndex` constructor when a list of intervals is passed with non-default ``closed`` (:issue:`18334`) +- Bug in ``Index.putmask`` when an invalid mask passed (:issue:`18368`) +- Bug in masked assignment of a ``timedelta64[ns]`` dtype ``Series``, incorrectly coerced to float (:issue:`18493`) I/O ^^^ +- Bug in class:`~pandas.io.stata.StataReader` not converting date/time columns with display formatting addressed (:issue:`17990`). Previously columns with display formatting were normally left as ordinal numbers and not converted to datetime objects. +- Bug in :func:`read_csv` when reading a compressed UTF-16 encoded file (:issue:`18071`) +- Bug in :func:`read_csv` for handling null values in index columns when specifying ``na_filter=False`` (:issue:`5239`) +- Bug in :func:`read_csv` when reading numeric category fields with high cardinality (:issue:`18186`) +- Bug in :meth:`DataFrame.to_csv` when the table had ``MultiIndex`` columns, and a list of strings was passed in for ``header`` (:issue:`5539`) +- Bug in parsing integer datetime-like columns with specified format in ``read_sql`` (:issue:`17855`). +- Bug in :meth:`DataFrame.to_msgpack` when serializing data of the numpy.bool_ datatype (:issue:`18390`) +- Bug in :func:`read_json` not decoding when reading line deliminted JSON from S3 (:issue:`17200`) +- Bug in :func:`pandas.io.json.json_normalize` to avoid modification of ``meta`` (:issue:`18610`) +- Bug in :func:`to_latex` where repeated multi-index values were not printed even though a higher level index differed from the previous row (:issue:`14484`) +- Bug when reading NaN-only categorical columns in :class:`HDFStore` (:issue:`18413`) +- Bug in :meth:`DataFrame.to_latex` with ``longtable=True`` where a latex multicolumn always spanned over three columns (:issue:`17959`) + + Plotting ^^^^^^^^ -- -- -- +- Bug in ``DataFrame.plot()`` and ``Series.plot()`` with :class:`DatetimeIndex` where a figure generated by them is not pickleable in Python 3 (:issue:`18439`) Groupby/Resample/Rolling ^^^^^^^^^^^^^^^^^^^^^^^^ -- -- -- - -Sparse -^^^^^^ - -- -- -- +- Bug in ``DataFrame.resample(...).apply(...)`` when there is a callable that returns different columns (:issue:`15169`) +- Bug in ``DataFrame.resample(...)`` when there is a time change (DST) and resampling frequecy is 12h or higher (:issue:`15549`) +- Bug in ``pd.DataFrameGroupBy.count()`` when counting over a datetimelike column (:issue:`13393`) +- Bug in ``rolling.var`` where calculation is inaccurate with a zero-valued array (:issue:`18430`) Reshaping ^^^^^^^^^ -- -- -- +- Error message in ``pd.merge_asof()`` for key datatype mismatch now includes datatype of left and right key (:issue:`18068`) +- Bug in ``pd.concat`` when empty and non-empty DataFrames or Series are concatenated (:issue:`18178` :issue:`18187`) +- Bug in ``DataFrame.filter(...)`` when :class:`unicode` is passed as a condition in Python 2 (:issue:`13101`) +- Bug when merging empty DataFrames when ``np.seterr(divide='raise')`` is set (:issue:`17776`) Numeric ^^^^^^^ -- -- -- +- Bug in ``pd.Series.rolling.skew()`` and ``rolling.kurt()`` with all equal values has floating issue (:issue:`18044`) +- Bug in :class:`TimedeltaIndex` subtraction could incorrectly overflow when ``NaT`` is present (:issue:`17791`) +- Bug in :class:`DatetimeIndex` subtracting datetimelike from DatetimeIndex could fail to overflow (:issue:`18020`) Categorical ^^^^^^^^^^^ -- -- -- +- Bug in :meth:`DataFrame.astype` where casting to 'category' on an empty ``DataFrame`` causes a segmentation fault (:issue:`18004`) +- Error messages in the testing module have been improved when items have + different ``CategoricalDtype`` (:issue:`18069`) +- ``CategoricalIndex`` can now correctly take a ``pd.api.types.CategoricalDtype`` as its dtype (:issue:`18116`) +- Bug in ``Categorical.unique()`` returning read-only ``codes`` array when all categories were ``NaN`` (:issue:`18051`) +- Bug in ``DataFrame.groupby(axis=1)`` with a ``CategoricalIndex`` (:issue:`18432`) -Other -^^^^^ +String +^^^^^^ -- -- -- +- :meth:`Series.str.split()` will now propogate ``NaN`` values across all expanded columns instead of ``None`` (:issue:`18450`) diff --git a/doc/source/whatsnew/v0.22.0.txt b/doc/source/whatsnew/v0.22.0.txt index 53b052a955b45..d165339cb0de9 100644 --- a/doc/source/whatsnew/v0.22.0.txt +++ b/doc/source/whatsnew/v0.22.0.txt @@ -1,156 +1,243 @@ .. _whatsnew_0220: -v0.22.0 -------- +v0.22.0 (December 29, 2017) +--------------------------- -This is a major release from 0.21.1 and includes a number of API changes, -deprecations, new features, enhancements, and performance improvements along -with a large number of bug fixes. We recommend that all users upgrade to this -version. +This is a major release from 0.21.1 and includes a single, API-breaking change. +We recommend that all users upgrade to this version after carefully reading the +release note (singular!). -.. _whatsnew_0220.enhancements: +.. _whatsnew_0220.api_breaking: -New features -~~~~~~~~~~~~ +Backwards incompatible API changes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- -- -- +Pandas 0.22.0 changes the handling of empty and all-*NA* sums and products. The +summary is that -.. _whatsnew_0220.enhancements.other: +* The sum of an empty or all-*NA* ``Series`` is now ``0`` +* The product of an empty or all-*NA* ``Series`` is now ``1`` +* We've added a ``min_count`` parameter to ``.sum()`` and ``.prod()`` controlling + the minimum number of valid values for the result to be valid. If fewer than + ``min_count`` non-*NA* values are present, the result is *NA*. The default is + ``0``. To return ``NaN``, the 0.21 behavior, use ``min_count=1``. -Other Enhancements -^^^^^^^^^^^^^^^^^^ +Some background: In pandas 0.21, we fixed a long-standing inconsistency +in the return value of all-*NA* series depending on whether or not bottleneck +was installed. See :ref:`whatsnew_0210.api_breaking.bottleneck`. At the same +time, we changed the sum and prod of an empty ``Series`` to also be ``NaN``. -- -- -- +Based on feedback, we've partially reverted those changes. -.. _whatsnew_0220.api_breaking: +Arithmetic Operations +^^^^^^^^^^^^^^^^^^^^^ -Backwards incompatible API changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The default sum for empty or all-*NA* ``Series`` is now ``0``. -- -- -- +*pandas 0.21.x* -.. _whatsnew_0220.api: +.. code-block:: ipython -Other API Changes -^^^^^^^^^^^^^^^^^ + In [1]: pd.Series([]).sum() + Out[1]: nan -- -- -- + In [2]: pd.Series([np.nan]).sum() + Out[2]: nan -.. _whatsnew_0220.deprecations: +*pandas 0.22.0* -Deprecations -~~~~~~~~~~~~ +.. ipython:: python -- -- -- + pd.Series([]).sum() + pd.Series([np.nan]).sum() -.. _whatsnew_0220.prior_deprecations: +The default behavior is the same as pandas 0.20.3 with bottleneck installed. It +also matches the behavior of NumPy's ``np.nansum`` on empty and all-*NA* arrays. -Removal of prior version deprecations/changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +To have the sum of an empty series return ``NaN`` (the default behavior of +pandas 0.20.3 without bottleneck, or pandas 0.21.x), use the ``min_count`` +keyword. -- -- -- +.. ipython:: python -.. _whatsnew_0220.performance: + pd.Series([]).sum(min_count=1) -Performance Improvements -~~~~~~~~~~~~~~~~~~~~~~~~ +Thanks to the ``skipna`` parameter, the ``.sum`` on an all-*NA* +series is conceptually the same as the ``.sum`` of an empty one with +``skipna=True`` (the default). -- -- -- +.. ipython:: python -.. _whatsnew_0220.docs: + pd.Series([np.nan]).sum(min_count=1) # skipna=True by default -Documentation Changes -~~~~~~~~~~~~~~~~~~~~~ +The ``min_count`` parameter refers to the minimum number of *non-null* values +required for a non-NA sum or product. -- -- -- +:meth:`Series.prod` has been updated to behave the same as :meth:`Series.sum`, +returning ``1`` instead. -.. _whatsnew_0220.bug_fixes: +.. ipython:: python -Bug Fixes -~~~~~~~~~ + pd.Series([]).prod() + pd.Series([np.nan]).prod() + pd.Series([]).prod(min_count=1) -Conversion -^^^^^^^^^^ +These changes affect :meth:`DataFrame.sum` and :meth:`DataFrame.prod` as well. +Finally, a few less obvious places in pandas are affected by this change. -- -- -- +Grouping by a Categorical +^^^^^^^^^^^^^^^^^^^^^^^^^ -Indexing -^^^^^^^^ +Grouping by a ``Categorical`` and summing now returns ``0`` instead of +``NaN`` for categories with no observations. The product now returns ``1`` +instead of ``NaN``. + +*pandas 0.21.x* + +.. code-block:: ipython + + In [8]: grouper = pd.Categorical(['a', 'a'], categories=['a', 'b']) + + In [9]: pd.Series([1, 2]).groupby(grouper).sum() + Out[9]: + a 3.0 + b NaN + dtype: float64 -- -- -- +*pandas 0.22* -I/O -^^^ +.. ipython:: python -- -- -- + grouper = pd.Categorical(['a', 'a'], categories=['a', 'b']) + pd.Series([1, 2]).groupby(grouper).sum() -Plotting +To restore the 0.21 behavior of returning ``NaN`` for unobserved groups, +use ``min_count>=1``. + +.. ipython:: python + + pd.Series([1, 2]).groupby(grouper).sum(min_count=1) + +Resample ^^^^^^^^ -- -- -- +The sum and product of all-*NA* bins has changed from ``NaN`` to ``0`` for +sum and ``1`` for product. + +*pandas 0.21.x* + +.. code-block:: ipython + + In [11]: s = pd.Series([1, 1, np.nan, np.nan], + ...: index=pd.date_range('2017', periods=4)) + ...: s + Out[11]: + 2017-01-01 1.0 + 2017-01-02 1.0 + 2017-01-03 NaN + 2017-01-04 NaN + Freq: D, dtype: float64 + + In [12]: s.resample('2d').sum() + Out[12]: + 2017-01-01 2.0 + 2017-01-03 NaN + Freq: 2D, dtype: float64 + +*pandas 0.22.0* + +.. ipython:: python + + s = pd.Series([1, 1, np.nan, np.nan], + index=pd.date_range('2017', periods=4)) + s.resample('2d').sum() + +To restore the 0.21 behavior of returning ``NaN``, use ``min_count>=1``. + +.. ipython:: python + + s.resample('2d').sum(min_count=1) + +In particular, upsampling and taking the sum or product is affected, as +upsampling introduces missing values even if the original series was +entirely valid. + +*pandas 0.21.x* + +.. code-block:: ipython + + In [14]: idx = pd.DatetimeIndex(['2017-01-01', '2017-01-02']) + + In [15]: pd.Series([1, 2], index=idx).resample('12H').sum() + Out[15]: + 2017-01-01 00:00:00 1.0 + 2017-01-01 12:00:00 NaN + 2017-01-02 00:00:00 2.0 + Freq: 12H, dtype: float64 + +*pandas 0.22.0* + +.. ipython:: python + + idx = pd.DatetimeIndex(['2017-01-01', '2017-01-02']) + pd.Series([1, 2], index=idx).resample("12H").sum() + +Once again, the ``min_count`` keyword is available to restore the 0.21 behavior. + +.. ipython:: python + + pd.Series([1, 2], index=idx).resample("12H").sum(min_count=1) + +Rolling and Expanding +^^^^^^^^^^^^^^^^^^^^^ + +Rolling and expanding already have a ``min_periods`` keyword that behaves +similar to ``min_count``. The only case that changes is when doing a rolling +or expanding sum with ``min_periods=0``. Previously this returned ``NaN``, +when fewer than ``min_periods`` non-*NA* values were in the window. Now it +returns ``0``. + +*pandas 0.21.1* + +.. code-block:: ipython + + In [17]: s = pd.Series([np.nan, np.nan]) + + In [18]: s.rolling(2, min_periods=0).sum() + Out[18]: + 0 NaN + 1 NaN + dtype: float64 -Groupby/Resample/Rolling -^^^^^^^^^^^^^^^^^^^^^^^^ +*pandas 0.22.0* -- -- -- +.. ipython:: python -Sparse -^^^^^^ + s = pd.Series([np.nan, np.nan]) + s.rolling(2, min_periods=0).sum() -- -- -- +The default behavior of ``min_periods=None``, implying that ``min_periods`` +equals the window size, is unchanged. -Reshaping -^^^^^^^^^ +Compatibility +~~~~~~~~~~~~~ -- -- -- +If you maintain a library that should work across pandas versions, it +may be easiest to exclude pandas 0.21 from your requirements. Otherwise, all your +``sum()`` calls would need to check if the ``Series`` is empty before summing. -Numeric -^^^^^^^ +With setuptools, in your ``setup.py`` use:: -- -- -- + install_requires=['pandas!=0.21.*', ...] -Categorical -^^^^^^^^^^^ +With conda, use -- -- -- +.. code-block:: yaml -Other -^^^^^ + requirements: + run: + - pandas !=0.21.0,!=0.21.1 -- -- -- +Note that the inconsistency in the return value for all-*NA* series is still +there for pandas 0.20.3 and earlier. Avoiding pandas 0.21 will only help with +the empty case. diff --git a/pandas/_libs/algos.pyx b/pandas/_libs/algos.pyx index d159761c3f5e6..a44a7288bda45 100644 --- a/pandas/_libs/algos.pyx +++ b/pandas/_libs/algos.pyx @@ -87,7 +87,7 @@ class NegInfinity(object): @cython.boundscheck(False) def is_lexsorted(list list_of_arrays): cdef: - int i + Py_ssize_t i Py_ssize_t n, nlevels int64_t k, cur, pre ndarray arr @@ -99,11 +99,12 @@ def is_lexsorted(list list_of_arrays): cdef int64_t **vecs = <int64_t**> malloc(nlevels * sizeof(int64_t*)) for i in range(nlevels): arr = list_of_arrays[i] + assert arr.dtype.name == 'int64' vecs[i] = <int64_t*> arr.data # Assume uniqueness?? with nogil: - for i in range(n): + for i in range(1, n): for k in range(nlevels): cur = vecs[k][i] pre = vecs[k][i -1] diff --git a/pandas/_libs/groupby_helper.pxi.in b/pandas/_libs/groupby_helper.pxi.in index d38b677df321c..14d47398ac1df 100644 --- a/pandas/_libs/groupby_helper.pxi.in +++ b/pandas/_libs/groupby_helper.pxi.in @@ -36,7 +36,8 @@ def get_dispatch(dtypes): def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=0): """ Only aggregates on axis=0 """ @@ -88,7 +89,7 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, for i in range(ncounts): for j in range(K): - if nobs[i, j] == 0: + if nobs[i, j] < min_count: out[i, j] = NAN else: out[i, j] = sumx[i, j] @@ -99,7 +100,8 @@ def group_add_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=0): """ Only aggregates on axis=0 """ @@ -147,7 +149,7 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, for i in range(ncounts): for j in range(K): - if nobs[i, j] == 0: + if nobs[i, j] < min_count: out[i, j] = NAN else: out[i, j] = prodx[i, j] @@ -159,12 +161,15 @@ def group_prod_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) {{dest_type2}} val, ct, oldmean ndarray[{{dest_type2}}, ndim=2] nobs, mean + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -208,12 +213,15 @@ def group_var_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): cdef: Py_ssize_t i, j, N, K, lab, ncounts = len(counts) {{dest_type2}} val, count ndarray[{{dest_type2}}, ndim=2] sumx, nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -263,7 +271,8 @@ def group_mean_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -272,6 +281,8 @@ def group_ohlc_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{dest_type2}} val, count Py_ssize_t ngroups = len(counts) + assert min_count == -1, "'min_count' only used in add and prod" + if len(labels) == 0: return @@ -332,7 +343,8 @@ def get_dispatch(dtypes): def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -342,6 +354,8 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[{{dest_type2}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -382,7 +396,8 @@ def group_last_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{c_type}}, ndim=2] values, - ndarray[int64_t] labels, int64_t rank): + ndarray[int64_t] labels, int64_t rank, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -392,6 +407,8 @@ def group_nth_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[{{dest_type2}}, ndim=2] resx ndarray[int64_t, ndim=2] nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -455,7 +472,8 @@ def get_dispatch(dtypes): def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -464,6 +482,8 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{dest_type2}} val, count ndarray[{{dest_type2}}, ndim=2] maxx, nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -526,7 +546,8 @@ def group_max_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, ndarray[int64_t] counts, ndarray[{{dest_type2}}, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -535,6 +556,8 @@ def group_min_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, {{dest_type2}} val, count ndarray[{{dest_type2}}, ndim=2] minx, nobs + assert min_count == -1, "'min_count' only used in add and prod" + if not len(values) == len(labels): raise AssertionError("len(index) != len(labels)") @@ -686,7 +709,8 @@ def group_cummax_{{name}}(ndarray[{{dest_type2}}, ndim=2] out, def group_median_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] counts, ndarray[float64_t, ndim=2] values, - ndarray[int64_t] labels): + ndarray[int64_t] labels, + Py_ssize_t min_count=-1): """ Only aggregates on axis=0 """ @@ -695,6 +719,9 @@ def group_median_float64(ndarray[float64_t, ndim=2] out, ndarray[int64_t] _counts ndarray data float64_t* ptr + + assert min_count == -1, "'min_count' only used in add and prod" + ngroups = len(counts) N, K = (<object> values).shape diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index c96251a0293d6..65e99f5f46fc2 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -19,7 +19,7 @@ from hashtable cimport HashTable from pandas._libs import algos, hashtable as _hash from pandas._libs.tslib import Timestamp, Timedelta -from datetime import datetime, timedelta +from datetime import datetime, timedelta, date from cpython cimport PyTuple_Check, PyList_Check @@ -500,7 +500,7 @@ cpdef convert_scalar(ndarray arr, object value): if arr.descr.type_num == NPY_DATETIME: if isinstance(value, np.ndarray): pass - elif isinstance(value, datetime): + elif isinstance(value, (datetime, np.datetime64, date)): return Timestamp(value).value elif value is None or value != value: return iNaT diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index 0dacdf70a71d5..a90039d789972 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -374,6 +374,17 @@ cdef class TextReader: float_precision=None, skip_blank_lines=True): + # set encoding for native Python and C library + if encoding is not None: + if not isinstance(encoding, bytes): + encoding = encoding.encode('utf-8') + encoding = encoding.lower() + self.c_encoding = <char*> encoding + else: + self.c_encoding = NULL + + self.encoding = encoding + self.parser = parser_new() self.parser.chunksize = tokenize_chunksize @@ -495,17 +506,6 @@ cdef class TextReader: self.parser.double_converter_nogil = NULL self.parser.double_converter_withgil = round_trip - # encoding - if encoding is not None: - if not isinstance(encoding, bytes): - encoding = encoding.encode('utf-8') - encoding = encoding.lower() - self.c_encoding = <char*> encoding - else: - self.c_encoding = NULL - - self.encoding = encoding - if isinstance(dtype, dict): dtype = {k: pandas_dtype(dtype[k]) for k in dtype} @@ -684,6 +684,14 @@ cdef class TextReader: else: raise ValueError('Unrecognized compression type: %s' % self.compression) + + if b'utf-16' in (self.encoding or b''): + # we need to read utf-16 through UTF8Recoder. + # if source is utf-16, convert source to utf-8 by UTF8Recoder. + source = com.UTF8Recoder(source, self.encoding.decode('utf-8')) + self.encoding = b'utf-8' + self.c_encoding = <char*> self.encoding + self.handle = source if isinstance(source, basestring): @@ -2213,9 +2221,10 @@ def _concatenate_chunks(list chunks): for name in names: arrs = [chunk.pop(name) for chunk in chunks] # Check each arr for consistent types. - dtypes = set([a.dtype for a in arrs]) - if len(dtypes) > 1: - common_type = np.find_common_type(dtypes, []) + dtypes = {a.dtype for a in arrs} + numpy_dtypes = {x for x in dtypes if not is_categorical_dtype(x)} + if len(numpy_dtypes) > 1: + common_type = np.find_common_type(numpy_dtypes, []) if common_type == np.object: warning_columns.append(str(name)) diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index b0a64e1ccc225..c340e870e9722 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -349,13 +349,13 @@ def infer_dtype(object value, bint skipna=False): if values.dtype != np.object_: values = values.astype('O') + # make contiguous + values = values.ravel() + n = len(values) if n == 0: return 'empty' - # make contiguous - values = values.ravel() - # try to use a valid value for i in range(n): val = util.get_value_1d(values, i) diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index a0aae6a5de707..20b974ce5a659 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -961,8 +961,7 @@ class NaTType(_NaT): combine = _make_error_func('combine', None) utcnow = _make_error_func('utcnow', None) - if PY3: - timestamp = _make_error_func('timestamp', datetime) + timestamp = _make_error_func('timestamp', Timestamp) # GH9513 NaT methods (except to_datetime64) to raise, return np.nan, or # return NaT create functions that raise, for binding to NaTType @@ -1409,6 +1408,11 @@ cdef class _Timestamp(datetime): def __get__(self): return np.datetime64(self.value, 'ns') + def timestamp(self): + """Return POSIX timestamp as float.""" + # py27 compat, see GH#17329 + return round(self.value / 1e9, 6) + cdef PyTypeObject* ts_type = <PyTypeObject*> Timestamp @@ -3366,7 +3370,7 @@ cpdef int64_t tz_convert_single(int64_t val, object tz1, object tz2): """ Convert the val (in i8) from timezone1 to timezone2 - This is a single timezone versoin of tz_convert + This is a single timezone version of tz_convert Parameters ---------- diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 7f778dde86e23..ba7031bc382b1 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -283,10 +283,9 @@ cdef object get_dst_info(object tz): def infer_tzinfo(start, end): if start is not None and end is not None: tz = start.tzinfo - if end.tzinfo: - if not (get_timezone(tz) == get_timezone(end.tzinfo)): - msg = 'Inputs must both have the same timezone, {tz1} != {tz2}' - raise AssertionError(msg.format(tz1=tz, tz2=end.tzinfo)) + if not (get_timezone(tz) == get_timezone(end.tzinfo)): + msg = 'Inputs must both have the same timezone, {tz1} != {tz2}' + raise AssertionError(msg.format(tz1=tz, tz2=end.tzinfo)) elif start is not None: tz = start.tzinfo elif end is not None: diff --git a/pandas/_libs/window.pyx b/pandas/_libs/window.pyx index b6bd6f92f6199..3a7a6d54d3851 100644 --- a/pandas/_libs/window.pyx +++ b/pandas/_libs/window.pyx @@ -225,14 +225,16 @@ cdef class VariableWindowIndexer(WindowIndexer): right_closed: bint right endpoint closedness True if the right endpoint is closed, False if open - + floor: optional + unit for flooring the unit """ def __init__(self, ndarray input, int64_t win, int64_t minp, - bint left_closed, bint right_closed, ndarray index): + bint left_closed, bint right_closed, ndarray index, + object floor=None): self.is_variable = 1 self.N = len(index) - self.minp = _check_minp(win, minp, self.N) + self.minp = _check_minp(win, minp, self.N, floor=floor) self.start = np.empty(self.N, dtype='int64') self.start.fill(-1) @@ -347,7 +349,7 @@ def get_window_indexer(input, win, minp, index, closed, if index is not None: indexer = VariableWindowIndexer(input, win, minp, left_closed, - right_closed, index) + right_closed, index, floor) elif use_mock: indexer = MockFixedWindowIndexer(input, win, minp, left_closed, right_closed, index, floor) @@ -446,7 +448,7 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, object index, object closed): cdef: double val, prev_x, sum_x = 0 - int64_t s, e + int64_t s, e, range_endpoint int64_t nobs = 0, i, j, N bint is_variable ndarray[int64_t] start, end @@ -454,7 +456,8 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, start, end, N, win, minp, is_variable = get_window_indexer(input, win, minp, index, - closed) + closed, + floor=0) output = np.empty(N, dtype=float) # for performance we are going to iterate @@ -494,13 +497,15 @@ def roll_sum(ndarray[double_t] input, int64_t win, int64_t minp, # fixed window + range_endpoint = int_max(minp, 1) - 1 + with nogil: - for i in range(0, minp - 1): + for i in range(0, range_endpoint): add_sum(input[i], &nobs, &sum_x) output[i] = NaN - for i in range(minp - 1, N): + for i in range(range_endpoint, N): val = input[i] add_sum(val, &nobs, &sum_x) @@ -661,9 +666,11 @@ cdef inline void add_var(double val, double *nobs, double *mean_x, if val == val: nobs[0] = nobs[0] + 1 - delta = (val - mean_x[0]) + # a part of Welford's method for the online variance-calculation + # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + delta = val - mean_x[0] mean_x[0] = mean_x[0] + delta / nobs[0] - ssqdm_x[0] = ssqdm_x[0] + delta * (val - mean_x[0]) + ssqdm_x[0] = ssqdm_x[0] + ((nobs[0] - 1) * delta ** 2) / nobs[0] cdef inline void remove_var(double val, double *nobs, double *mean_x, @@ -675,9 +682,11 @@ cdef inline void remove_var(double val, double *nobs, double *mean_x, if val == val: nobs[0] = nobs[0] - 1 if nobs[0]: - delta = (val - mean_x[0]) + # a part of Welford's method for the online variance-calculation + # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + delta = val - mean_x[0] mean_x[0] = mean_x[0] - delta / nobs[0] - ssqdm_x[0] = ssqdm_x[0] - delta * (val - mean_x[0]) + ssqdm_x[0] = ssqdm_x[0] - ((nobs[0] + 1) * delta ** 2) / nobs[0] else: mean_x[0] = 0 ssqdm_x[0] = 0 @@ -689,7 +698,7 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp, Numerically stable implementation using Welford's method. """ cdef: - double val, prev, mean_x = 0, ssqdm_x = 0, nobs = 0, delta + double val, prev, mean_x = 0, ssqdm_x = 0, nobs = 0, delta, mean_x_old int64_t s, e bint is_variable Py_ssize_t i, j, N @@ -749,6 +758,9 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp, add_var(input[i], &nobs, &mean_x, &ssqdm_x) output[i] = calc_var(minp, ddof, nobs, ssqdm_x) + # a part of Welford's method for the online variance-calculation + # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + # After the first window, observations can both be added and # removed for i from win <= i < N: @@ -760,10 +772,12 @@ def roll_var(ndarray[double_t] input, int64_t win, int64_t minp, # Adding one observation and removing another one delta = val - prev - prev -= mean_x + mean_x_old = mean_x + mean_x += delta / nobs - val -= mean_x - ssqdm_x += (val + prev) * delta + ssqdm_x += ((nobs - 1) * val + + (nobs + 1) * prev + - 2 * nobs * mean_x_old) * delta / nobs else: add_var(val, &nobs, &mean_x, &ssqdm_x) @@ -788,7 +802,17 @@ cdef inline double calc_skew(int64_t minp, int64_t nobs, double x, double xx, A = x / dnobs B = xx / dnobs - A * A C = xxx / dnobs - A * A * A - 3 * A * B - if B <= 0 or nobs < 3: + + # #18044: with uniform distribution, floating issue will + # cause B != 0. and cause the result is a very + # large number. + # + # in core/nanops.py nanskew/nankurt call the function + # _zero_out_fperr(m2) to fix floating error. + # if the variance is less than 1e-14, it could be + # treat as zero, here we follow the original + # skew/kurt behaviour to check B <= 1e-14 + if B <= 1e-14 or nobs < 3: result = NaN else: R = sqrt(B) @@ -915,7 +939,16 @@ cdef inline double calc_kurt(int64_t minp, int64_t nobs, double x, double xx, R = R * A D = xxxx / dnobs - R - 6 * B * A * A - 4 * C * A - if B == 0 or nobs < 4: + # #18044: with uniform distribution, floating issue will + # cause B != 0. and cause the result is a very + # large number. + # + # in core/nanops.py nanskew/nankurt call the function + # _zero_out_fperr(m2) to fix floating error. + # if the variance is less than 1e-14, it could be + # treat as zero, here we follow the original + # skew/kurt behaviour to check B <= 1e-14 + if B <= 1e-14 or nobs < 4: result = NaN else: K = (dnobs * dnobs - 1.) * D / (B * B) - 3 * ((dnobs - 1.) ** 2) diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 3853ac017044c..288d9d7742daf 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -257,6 +257,16 @@ def u(s): def u_safe(s): return s + def to_str(s): + """ + Convert bytes and non-string into Python 3 str + """ + if isinstance(s, binary_type): + s = bytes_to_str(s) + elif not isinstance(s, string_types): + s = str(s) + return s + def strlen(data, encoding=None): # encoding is for compat with PY2 return len(data) @@ -302,6 +312,14 @@ def u_safe(s): except: return s + def to_str(s): + """ + Convert unicode and non-string into Python 2 str + """ + if not isinstance(s, string_types): + s = str(s) + return s + def strlen(data, encoding=None): try: data = data.decode(encoding) @@ -381,17 +399,20 @@ def raise_with_traceback(exc, traceback=Ellipsis): # http://stackoverflow.com/questions/4126348 # Thanks to @martineau at SO -from dateutil import parser as _date_parser import dateutil + +if PY2 and LooseVersion(dateutil.__version__) == '2.0': + # dateutil brokenness + raise Exception('dateutil 2.0 incompatible with Python 2.x, you must ' + 'install version 1.5 or 2.1+!') + +from dateutil import parser as _date_parser if LooseVersion(dateutil.__version__) < '2.0': + @functools.wraps(_date_parser.parse) def parse_date(timestr, *args, **kwargs): timestr = bytes(timestr) return _date_parser.parse(timestr, *args, **kwargs) -elif PY2 and LooseVersion(dateutil.__version__) == '2.0': - # dateutil brokenness - raise Exception('dateutil 2.0 incompatible with Python 2.x, you must ' - 'install version 1.5 or 2.1+!') else: parse_date = _date_parser.parse diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index e709c771b7d18..c574e6d56916b 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -2268,7 +2268,7 @@ def _recode_for_categories(codes, old_categories, new_categories): if len(old_categories) == 0: # All null anyway, so just retain the nulls - return codes + return codes.copy() indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories), new_categories) new_codes = take_1d(indexer, codes.copy(), fill_value=-1) diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index 33531e80449d8..94208a61a4377 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -479,3 +479,29 @@ def use_inf_as_na_cb(key): cf.register_option( 'engine', 'auto', parquet_engine_doc, validator=is_one_of_factory(['auto', 'pyarrow', 'fastparquet'])) + +# -------- +# Plotting +# --------- + +register_converter_doc = """ +: bool + Whether to register converters with matplotlib's units registry for + dates, times, datetimes, and Periods. Toggling to False will remove + the converters, restoring any converters that pandas overwrote. +""" + + +def register_converter_cb(key): + from pandas.plotting import register_matplotlib_converters + from pandas.plotting import deregister_matplotlib_converters + + if cf.get_option(key): + register_matplotlib_converters() + else: + deregister_matplotlib_converters() + + +with cf.config_prefix("plotting.matplotlib"): + cf.register_option("register_converters", True, register_converter_doc, + validator=bool, cb=register_converter_cb) diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index f3b11e52cdd7a..eae283e9bc00d 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -136,7 +136,7 @@ def trans(x): # noqa try: if np.allclose(new_result, result, rtol=0): return new_result - except: + except Exception: # comparison of an object dtype with a number type could # hit here @@ -151,14 +151,14 @@ def trans(x): # noqa elif dtype.kind in ['M', 'm'] and result.dtype.kind in ['i', 'f']: try: result = result.astype(dtype) - except: + except Exception: if dtype.tz: # convert to datetime and change timezone from pandas import to_datetime result = to_datetime(result).tz_localize('utc') result = result.tz_convert(dtype.tz) - except: + except Exception: pass return result @@ -210,7 +210,7 @@ def changeit(): new_result[mask] = om_at result[:] = new_result return result, False - except: + except Exception: pass # we are forced to change the dtype of the result as the input @@ -243,7 +243,7 @@ def changeit(): try: np.place(result, mask, other) - except: + except Exception: return changeit() return result, False @@ -274,14 +274,14 @@ def maybe_promote(dtype, fill_value=np.nan): if issubclass(dtype.type, np.datetime64): try: fill_value = tslib.Timestamp(fill_value).value - except: + except Exception: # the proper thing to do here would probably be to upcast # to object (but numpy 1.6.1 doesn't do this properly) fill_value = iNaT elif issubclass(dtype.type, np.timedelta64): try: fill_value = lib.Timedelta(fill_value).value - except: + except Exception: # as for datetimes, cannot upcast to object fill_value = iNaT else: @@ -592,12 +592,12 @@ def maybe_convert_scalar(values): def coerce_indexer_dtype(indexer, categories): """ coerce the indexer input array to the smallest dtype possible """ - l = len(categories) - if l < _int8_max: + length = len(categories) + if length < _int8_max: return _ensure_int8(indexer) - elif l < _int16_max: + elif length < _int16_max: return _ensure_int16(indexer) - elif l < _int32_max: + elif length < _int32_max: return _ensure_int32(indexer) return _ensure_int64(indexer) @@ -629,7 +629,7 @@ def conv(r, dtype): r = float(r) elif dtype.kind == 'i': r = int(r) - except: + except Exception: pass return r @@ -756,7 +756,7 @@ def maybe_convert_objects(values, convert_dates=True, convert_numeric=True, if not isna(new_values).all(): values = new_values - except: + except Exception: pass else: # soft-conversion @@ -817,7 +817,7 @@ def soft_convert_objects(values, datetime=True, numeric=True, timedelta=True, # If all NaNs, then do not-alter values = converted if not isna(converted).all() else values values = values.copy() if copy else values - except: + except Exception: pass return values @@ -888,10 +888,10 @@ def try_datetime(v): try: from pandas import to_datetime return to_datetime(v) - except: + except Exception: pass - except: + except Exception: pass return v.reshape(shape) @@ -903,7 +903,7 @@ def try_timedelta(v): from pandas import to_timedelta try: return to_timedelta(v)._values.reshape(shape) - except: + except Exception: return v.reshape(shape) inferred_type = lib.infer_datetimelike_array(_ensure_object(v)) diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 93993fd0a0cab..bca5847f3a6cc 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -569,9 +569,10 @@ def _concat_rangeindex_same_dtype(indexes): start = step = next = None - for obj in indexes: - if not len(obj): - continue + # Filter the empty indexes + non_empty_indexes = [obj for obj in indexes if len(obj)] + + for obj in non_empty_indexes: if start is None: # This is set by the first non-empty index @@ -595,8 +596,16 @@ def _concat_rangeindex_same_dtype(indexes): if step is not None: next = obj[-1] + step - if start is None: + if non_empty_indexes: + # Get the stop value from "next" or alternatively + # from the last non-empty index + stop = non_empty_indexes[-1]._stop if next is None else next + else: + # Here all "indexes" had 0 length, i.e. were empty. + # Simply take start, stop, and step from the last empty index. + obj = indexes[-1] start = obj._start step = obj._step - stop = obj._stop if next is None else next + stop = obj._stop + return indexes[0].__class__(start, stop, step) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a1af806e5cb9e..ad79001e45b86 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -997,7 +997,7 @@ def to_dict(self, orient='dict', into=dict): for k, v in compat.iteritems(self)) elif orient.lower().startswith('r'): return [into_c((k, _maybe_box_datetimelike(v)) - for k, v in zip(self.columns, row)) + for k, v in zip(self.columns, np.atleast_1d(row))) for row in self.values] elif orient.lower().startswith('i'): return into_c((k, v.to_dict(into)) for k, v in self.iterrows()) @@ -3751,7 +3751,7 @@ def nlargest(self, n, columns, keep='first'): Number of items to retrieve columns : list or str Column name or names to order by - keep : {'first', 'last', False}, default 'first' + keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. @@ -3788,7 +3788,7 @@ def nsmallest(self, n, columns, keep='first'): Number of items to retrieve columns : list or str Column name or names to order by - keep : {'first', 'last', False}, default 'first' + keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. @@ -4035,6 +4035,8 @@ def combine(self, other, func, fill_value=None, overwrite=True): ---------- other : DataFrame func : function + Function that takes two series as inputs and return a Series or a + scalar fill_value : scalar value overwrite : boolean, default True If True then overwrite values for common keys in the calling frame @@ -4042,8 +4044,21 @@ def combine(self, other, func, fill_value=None, overwrite=True): Returns ------- result : DataFrame - """ + Examples + -------- + >>> df1 = DataFrame({'A': [0, 0], 'B': [4, 4]}) + >>> df2 = DataFrame({'A': [1, 1], 'B': [3, 3]}) + >>> df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2) + A B + 0 0 3 + 1 0 3 + + See Also + -------- + DataFrame.combine_first : Combine two DataFrame objects and default to + non-null values in frame calling the method + """ other_idxlen = len(other.index) # save for compare this, other = self.align(other, copy=False) @@ -4131,16 +4146,24 @@ def combine_first(self, other): ---------- other : DataFrame + Returns + ------- + combined : DataFrame + Examples -------- - a's values prioritized, use values from b to fill holes: - - >>> a.combine_first(b) + df1's values prioritized, use values from df2 to fill holes: + >>> df1 = pd.DataFrame([[1, np.nan]]) + >>> df2 = pd.DataFrame([[3, 4]]) + >>> df1.combine_first(df2) + 0 1 + 0 1 4.0 - Returns - ------- - combined : DataFrame + See Also + -------- + DataFrame.combine : Perform series-wise operation on two DataFrames + using a given function """ import pandas.core.computation.expressions as expressions @@ -4283,7 +4306,7 @@ def first_valid_index(self): return valid_indices[0] if len(valid_indices) else None @Appender(_shared_docs['valid_index'] % { - 'position': 'first', 'klass': 'DataFrame'}) + 'position': 'last', 'klass': 'DataFrame'}) def last_valid_index(self): if len(self) == 0: return None @@ -5113,7 +5136,7 @@ def append(self, other, ignore_index=False, verify_integrity=False): >>> df = pd.DataFrame(columns=['A']) >>> for i in range(5): - ... df = df.append({'A'}: i}, ignore_index=True) + ... df = df.append({'A': i}, ignore_index=True) >>> df A 0 0 @@ -5790,7 +5813,12 @@ def idxmin(self, axis=0, skipna=True): 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result - will be NA + will be NA. + + Raises + ------ + ValueError + * If the row/column is empty Returns ------- @@ -5821,7 +5849,12 @@ def idxmax(self, axis=0, skipna=True): 0 or 'index' for row-wise, 1 or 'columns' for column-wise skipna : boolean, default True Exclude NA/null values. If an entire row/column is NA, the result - will be first index. + will be NA. + + Raises + ------ + ValueError + * If the row/column is empty Returns ------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 118e7d5cd437b..31bb9df53ad81 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -49,7 +49,7 @@ from pandas.tseries.frequencies import to_offset from pandas import compat from pandas.compat.numpy import function as nv -from pandas.compat import (map, zip, lzip, lrange, string_types, +from pandas.compat import (map, zip, lzip, lrange, string_types, to_str, isidentifier, set_function_name, cPickle as pkl) from pandas.core.ops import _align_method_FRAME import pandas.core.nanops as nanops @@ -3235,14 +3235,14 @@ def filter(self, items=None, like=None, regex=None, axis=None): **{name: [r for r in items if r in labels]}) elif like: def f(x): - if not isinstance(x, string_types): - x = str(x) - return like in x + return like in to_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: + def f(x): + return matcher.search(to_str(x)) is not None matcher = re.compile(regex) - values = labels.map(lambda x: matcher.search(str(x)) is not None) + values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError('Must pass either `items`, `like`, or `regex`') @@ -6921,7 +6921,8 @@ def _add_numeric_operations(cls): @Substitution(outname='mad', desc="Return the mean absolute deviation of the values " "for the requested axis", - name1=name, name2=name2, axis_descr=axis_descr) + name1=name, name2=name2, axis_descr=axis_descr, + min_count='', examples='') @Appender(_num_doc) def mad(self, axis=None, skipna=None, level=None): if skipna is None: @@ -6962,7 +6963,8 @@ def mad(self, axis=None, skipna=None, level=None): @Substitution(outname='compounded', desc="Return the compound percentage of the values for " "the requested axis", name1=name, name2=name2, - axis_descr=axis_descr) + axis_descr=axis_descr, + min_count='', examples='') @Appender(_num_doc) def compound(self, axis=None, skipna=None, level=None): if skipna is None: @@ -6986,10 +6988,10 @@ def compound(self, axis=None, skipna=None, level=None): lambda y, axis: np.maximum.accumulate(y, axis), "max", -np.inf, np.nan) - cls.sum = _make_stat_function( + cls.sum = _make_min_count_stat_function( cls, 'sum', name, name2, axis_descr, 'Return the sum of the values for the requested axis', - nanops.nansum) + nanops.nansum, _sum_examples) cls.mean = _make_stat_function( cls, 'mean', name, name2, axis_descr, 'Return the mean of the values for the requested axis', @@ -7005,10 +7007,10 @@ def compound(self, axis=None, skipna=None, level=None): "by N-1\n", nanops.nankurt) cls.kurtosis = cls.kurt - cls.prod = _make_stat_function( + cls.prod = _make_min_count_stat_function( cls, 'prod', name, name2, axis_descr, 'Return the product of the values for the requested axis', - nanops.nanprod) + nanops.nanprod, _prod_examples) cls.product = cls.prod cls.median = _make_stat_function( cls, 'median', name, name2, axis_descr, @@ -7131,18 +7133,20 @@ def _doc_parms(cls): ---------- axis : %(axis_descr)s skipna : boolean, default True - Exclude NA/null values. If an entire row/column is NA or empty, the result - will be NA + Exclude NA/null values when computing the result. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a %(name1)s numeric_only : boolean, default None Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. Not implemented for Series. +%(min_count)s\ Returns ------- -%(outname)s : %(name1)s or %(name2)s (if level specified)\n""" +%(outname)s : %(name1)s or %(name2)s (if level specified) + +%(examples)s""" _num_ddof_doc = """ @@ -7210,9 +7214,92 @@ def _doc_parms(cls): """ +_sum_examples = """\ +Examples +-------- +By default, the sum of an empty or all-NA Series is ``0``. + +>>> pd.Series([]).sum() # min_count=0 is the default +0.0 + +This can be controlled with the ``min_count`` parameter. For example, if +you'd like the sum of an empty series to be NaN, pass ``min_count=1``. + +>>> pd.Series([]).sum(min_count=1) +nan + +Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and +empty series identically. + +>>> pd.Series([np.nan]).sum() +0.0 + +>>> pd.Series([np.nan]).sum(min_count=1) +nan +""" + +_prod_examples = """\ +Examples +-------- +By default, the product of an empty or all-NA Series is ``1`` + +>>> pd.Series([]).prod() +1.0 + +This can be controlled with the ``min_count`` parameter + +>>> pd.Series([]).prod(min_count=1) +nan + +Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and +empty series identically. + +>>> pd.Series([np.nan]).prod() +1.0 + +>>> pd.Series([np.nan]).sum(min_count=1) +nan +""" + + +_min_count_stub = """\ +min_count : int, default 0 + The required number of valid values to perform the operation. If fewer than + ``min_count`` non-NA values are present the result will be NA. + + .. versionadded :: 0.22.0 + + Added with the default being 1. This means the sum or product + of an all-NA or empty series is ``NaN``. +""" + + +def _make_min_count_stat_function(cls, name, name1, name2, axis_descr, desc, + f, examples): + @Substitution(outname=name, desc=desc, name1=name1, name2=name2, + axis_descr=axis_descr, min_count=_min_count_stub, + examples=examples) + @Appender(_num_doc) + def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, + min_count=0, + **kwargs): + nv.validate_stat_func(tuple(), kwargs, fname=name) + if skipna is None: + skipna = True + if axis is None: + axis = self._stat_axis_number + if level is not None: + return self._agg_by_level(name, axis=axis, level=level, + skipna=skipna, min_count=min_count) + return self._reduce(f, name, axis=axis, skipna=skipna, + numeric_only=numeric_only, min_count=min_count) + + return set_function_name(stat_func, name, cls) + + def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f): @Substitution(outname=name, desc=desc, name1=name1, name2=name2, - axis_descr=axis_descr) + axis_descr=axis_descr, min_count='', examples='') @Appender(_num_doc) def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 5c07033f5a68f..aef5ff7ba64d3 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -77,6 +77,119 @@ pandas.Panel.%(name)s """ +_apply_docs = dict( + template=""" + Apply function ``func`` group-wise and combine the results together. + + The function passed to ``apply`` must take a {input} as its first + argument and return a dataframe, a series or a scalar. ``apply`` will + then take care of combining the results back together into a single + dataframe or series. ``apply`` is therefore a highly flexible + grouping method. + + While ``apply`` is a very flexible method, its downside is that + using it can be quite a bit slower than using more specific methods. + Pandas offers a wide range of method that will be much faster + than using ``apply`` for their specific purposes, so try to use them + before reaching for ``apply``. + + Parameters + ---------- + func : function + A callable that takes a {input} as its first argument, and + returns a dataframe, a series or a scalar. In addition the + callable may take positional and keyword arguments + args, kwargs : tuple and dict + Optional positional and keyword arguments to pass to ``func`` + + Returns + ------- + applied : Series or DataFrame + + Notes + ----- + In the current implementation ``apply`` calls func twice on the + first group to decide whether it can take a fast or slow code + path. This can lead to unexpected behavior if func has + side-effects, as they will take effect twice for the first + group. + + Examples + -------- + {examples} + + See also + -------- + pipe : Apply function to the full GroupBy object instead of to each + group. + aggregate, transform + """, + dataframe_examples=""" + >>> df = pd.DataFrame({'A': 'a a b'.split(), 'B': [1,2,3], 'C': [4,6, 5]}) + >>> g = df.groupby('A') + + From ``df`` above we can see that ``g`` has two groups, ``a``, ``b``. + Calling ``apply`` in various ways, we can get different grouping results: + + Example 1: below the function passed to ``apply`` takes a dataframe as + its argument and returns a dataframe. ``apply`` combines the result for + each group together into a new dataframe: + + >>> g.apply(lambda x: x / x.sum()) + B C + 0 0.333333 0.4 + 1 0.666667 0.6 + 2 1.000000 1.0 + + Example 2: The function passed to ``apply`` takes a dataframe as + its argument and returns a series. ``apply`` combines the result for + each group together into a new dataframe: + + >>> g.apply(lambda x: x.max() - x.min()) + B C + A + a 1 2 + b 0 0 + + Example 3: The function passed to ``apply`` takes a dataframe as + its argument and returns a scalar. ``apply`` combines the result for + each group together into a series, including setting the index as + appropriate: + + >>> g.apply(lambda x: x.C.max() - x.B.min()) + A + a 5 + b 2 + dtype: int64 + """, + series_examples=""" + >>> ser = pd.Series([0, 1, 2], index='a a b'.split()) + >>> g = ser.groupby(ser.index) + + From ``ser`` above we can see that ``g`` has two groups, ``a``, ``b``. + Calling ``apply`` in various ways, we can get different grouping results: + + Example 1: The function passed to ``apply`` takes a series as + its argument and returns a series. ``apply`` combines the result for + each group together into a new series: + + >>> g.apply(lambda x: x*2 if x.name == 'b' else x/2) + 0 0.0 + 1 0.5 + 2 4.0 + dtype: float64 + + Example 2: The function passed to ``apply`` takes a series as + its argument and returns a scalar. ``apply`` combines the result for + each group together into a series, including setting the index as + appropriate: + + >>> g.apply(lambda x: x.max() - x.min()) + a 1 + b 0 + dtype: int64 + """) + _transform_template = """ Call function producing a like-indexed %(klass)s on each group and return a %(klass)s having the same indexes as the original object @@ -144,6 +257,7 @@ """ + # special case to prevent duplicate plots when catching exceptions when # forwarding methods from NDFrames _plotting_methods = frozenset(['plot', 'boxplot', 'hist']) @@ -206,12 +320,13 @@ class Grouper(object): sort : boolean, default to False whether to sort the resulting labels - additional kwargs to control time-like groupers (when freq is passed) + additional kwargs to control time-like groupers (when ``freq`` is passed) - closed : closed end of interval; left or right - label : interval boundary to use for labeling; left or right + closed : closed end of interval; 'left' or 'right' + label : interval boundary to use for labeling; 'left' or 'right' convention : {'start', 'end', 'e', 's'} If grouper is PeriodIndex + base, loffset Returns ------- @@ -233,6 +348,7 @@ class Grouper(object): >>> df.groupby(Grouper(level='date', freq='60s', axis=1)) """ + _attributes = ('key', 'level', 'freq', 'axis', 'sort') def __new__(cls, *args, **kwargs): if kwargs.get('freq') is not None: @@ -333,6 +449,14 @@ def _set_grouper(self, obj, sort=False): def groups(self): return self.grouper.groups + def __repr__(self): + attrs_list = ["{}={!r}".format(attr_name, getattr(self, attr_name)) + for attr_name in self._attributes + if getattr(self, attr_name) is not None] + attrs = ", ".join(attrs_list) + cls_name = self.__class__.__name__ + return "{}({})".format(cls_name, attrs) + class GroupByPlot(PandasObject): """ @@ -653,50 +777,10 @@ def __iter__(self): """ return self.grouper.get_iterator(self.obj, axis=self.axis) - @Substitution(name='groupby') + @Appender(_apply_docs['template'] + .format(input="dataframe", + examples=_apply_docs['dataframe_examples'])) def apply(self, func, *args, **kwargs): - """ - Apply function and combine results together in an intelligent way. - - The split-apply-combine combination rules attempt to be as common - sense based as possible. For example: - - case 1: - group DataFrame - apply aggregation function (f(chunk) -> Series) - yield DataFrame, with group axis having group labels - - case 2: - group DataFrame - apply transform function ((f(chunk) -> DataFrame with same indexes) - yield DataFrame with resulting chunks glued together - - case 3: - group Series - apply function with f(chunk) -> DataFrame - yield DataFrame with result of chunks glued together - - Parameters - ---------- - func : function - - Notes - ----- - See online documentation for full exposition on how to use apply. - - In the current implementation apply calls func twice on the - first group to decide whether it can take a fast or slow code - path. This can lead to unexpected behavior if func has - side-effects, as they will take effect twice for the first - group. - - - See also - -------- - pipe : Apply function to the full GroupBy object instead of to each - group. - aggregate, transform - """ func = self._is_builtin_func(func) @@ -824,7 +908,8 @@ def _cython_transform(self, how, numeric_only=True): return self._wrap_transformed_output(output, names) - def _cython_agg_general(self, how, alt=None, numeric_only=True): + def _cython_agg_general(self, how, alt=None, numeric_only=True, + min_count=-1): output = {} for name, obj in self._iterate_slices(): is_numeric = is_numeric_dtype(obj.dtype) @@ -832,7 +917,8 @@ def _cython_agg_general(self, how, alt=None, numeric_only=True): continue try: - result, names = self.grouper.aggregate(obj.values, how) + result, names = self.grouper.aggregate(obj.values, how, + min_count=min_count) except AssertionError as e: raise GroupByError(str(e)) output[name] = self._try_cast(result, obj) @@ -1139,7 +1225,8 @@ def _add_numeric_operations(cls): """ add numeric operations to the GroupBy generically """ def groupby_function(name, alias, npfunc, - numeric_only=True, _convert=False): + numeric_only=True, _convert=False, + min_count=-1): _local_template = "Compute %(f)s of group values" @@ -1149,6 +1236,8 @@ def groupby_function(name, alias, npfunc, def f(self, **kwargs): if 'numeric_only' not in kwargs: kwargs['numeric_only'] = numeric_only + if 'min_count' not in kwargs: + kwargs['min_count'] = min_count self._set_group_selection() try: return self._cython_agg_general( @@ -1196,8 +1285,8 @@ def last(x): else: return last(x) - cls.sum = groupby_function('sum', 'add', np.sum) - cls.prod = groupby_function('prod', 'prod', np.prod) + cls.sum = groupby_function('sum', 'add', np.sum, min_count=0) + cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0) cls.min = groupby_function('min', 'min', np.min, numeric_only=False) cls.max = groupby_function('max', 'max', np.max, numeric_only=False) cls.first = groupby_function('first', 'first', first_compat, @@ -2023,7 +2112,7 @@ def get_group_levels(self): 'var': 'group_var', 'first': { 'name': 'group_nth', - 'f': lambda func, a, b, c, d: func(a, b, c, d, 1) + 'f': lambda func, a, b, c, d, e: func(a, b, c, d, 1, -1) }, 'last': 'group_last', 'ohlc': 'group_ohlc', @@ -2093,7 +2182,7 @@ def wrapper(*args, **kwargs): (how, dtype_str)) return func, dtype_str - def _cython_operation(self, kind, values, how, axis): + def _cython_operation(self, kind, values, how, axis, min_count=-1): assert kind in ['transform', 'aggregate'] # can we do this operation with our cython functions @@ -2178,11 +2267,12 @@ def _cython_operation(self, kind, values, how, axis): counts = np.zeros(self.ngroups, dtype=np.int64) result = self._aggregate( result, counts, values, labels, func, is_numeric, - is_datetimelike) + is_datetimelike, min_count) elif kind == 'transform': result = _maybe_fill(np.empty_like(values, dtype=out_dtype), fill_value=np.nan) + # TODO: min_count result = self._transform( result, values, labels, func, is_numeric, is_datetimelike) @@ -2219,14 +2309,15 @@ def _cython_operation(self, kind, values, how, axis): return result, names - def aggregate(self, values, how, axis=0): - return self._cython_operation('aggregate', values, how, axis) + def aggregate(self, values, how, axis=0, min_count=-1): + return self._cython_operation('aggregate', values, how, axis, + min_count=min_count) def transform(self, values, how, axis=0): return self._cython_operation('transform', values, how, axis) def _aggregate(self, result, counts, values, comp_ids, agg_func, - is_numeric, is_datetimelike): + is_numeric, is_datetimelike, min_count=-1): if values.ndim > 3: # punting for now raise NotImplementedError("number of dimensions is currently " @@ -2235,9 +2326,10 @@ def _aggregate(self, result, counts, values, comp_ids, agg_func, for i, chunk in enumerate(values.transpose(2, 0, 1)): chunk = chunk.squeeze() - agg_func(result[:, :, i], counts, chunk, comp_ids) + agg_func(result[:, :, i], counts, chunk, comp_ids, + min_count) else: - agg_func(result, counts, values, comp_ids) + agg_func(result, counts, values, comp_ids, min_count) return result @@ -2847,9 +2939,11 @@ def is_in_obj(gpr): else: in_axis, name = False, None - if is_categorical_dtype(gpr) and len(gpr) != len(obj): - raise ValueError("Categorical dtype grouper must " - "have len(grouper) == len(data)") + if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]: + raise ValueError( + ("Length of grouper ({len_gpr}) and axis ({len_axis})" + " must be same length" + .format(len_gpr=len(gpr), len_axis=obj.shape[axis]))) # create the Grouping # allow us to passing the actual Grouping as the gpr @@ -3011,6 +3105,12 @@ def _selection_name(self): """) + @Appender(_apply_docs['template'] + .format(input='series', + examples=_apply_docs['series_examples'])) + def apply(self, func, *args, **kwargs): + return super(SeriesGroupBy, self).apply(func, *args, **kwargs) + @Appender(_agg_doc) @Appender(_shared_docs['aggregate'] % dict( klass='Series', @@ -3503,9 +3603,10 @@ def _iterate_slices(self): continue yield val, slicer(val) - def _cython_agg_general(self, how, alt=None, numeric_only=True): + def _cython_agg_general(self, how, alt=None, numeric_only=True, + min_count=-1): new_items, new_blocks = self._cython_agg_blocks( - how, alt=alt, numeric_only=numeric_only) + how, alt=alt, numeric_only=numeric_only, min_count=min_count) return self._wrap_agged_blocks(new_items, new_blocks) def _wrap_agged_blocks(self, items, blocks): @@ -3531,7 +3632,8 @@ def _wrap_agged_blocks(self, items, blocks): _block_agg_axis = 0 - def _cython_agg_blocks(self, how, alt=None, numeric_only=True): + def _cython_agg_blocks(self, how, alt=None, numeric_only=True, + min_count=-1): # TODO: the actual managing of mgr_locs is a PITA # here, it should happen via BlockManager.combine @@ -3548,7 +3650,7 @@ def _cython_agg_blocks(self, how, alt=None, numeric_only=True): locs = block.mgr_locs.as_array try: result, _ = self.grouper.aggregate( - block.values, how, axis=agg_axis) + block.values, how, axis=agg_axis, min_count=min_count) except NotImplementedError: # generally if we have numeric_only=False # and non-applicable functions @@ -4363,7 +4465,8 @@ def count(self): ids, _, ngroups = self.grouper.group_info mask = ids != -1 - val = ((mask & ~isna(blk.get_values())) for blk in data.blocks) + val = ((mask & ~isna(np.atleast_2d(blk.get_values()))) + for blk in data.blocks) loc = (blk.mgr_locs for blk in data.blocks) counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a995fc10a6674..83c78f084a9da 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -1934,7 +1934,10 @@ def putmask(self, mask, value): try: np.putmask(values, mask, self._convert_for_op(value)) return self._shallow_copy(values) - except (ValueError, TypeError): + except (ValueError, TypeError) as err: + if is_object_dtype(self): + raise err + # coerces to object return self.astype(object).putmask(mask, value) @@ -2032,7 +2035,7 @@ def equals(self, other): try: return array_equivalent(_values_from_object(self), _values_from_object(other)) - except: + except Exception: return False def identical(self, other): @@ -2315,7 +2318,7 @@ def intersection(self, other): try: indexer = Index(other._values).get_indexer(self._values) indexer = indexer.take((indexer != -1).nonzero()[0]) - except: + except Exception: # duplicates indexer = algos.unique1d( Index(other._values).get_indexer_non_unique(self._values)[0]) @@ -3024,13 +3027,13 @@ def _reindex_non_unique(self, target): new_indexer = None if len(missing): - l = np.arange(len(indexer)) + length = np.arange(len(indexer)) missing = _ensure_platform_int(missing) missing_labels = target.take(missing) - missing_indexer = _ensure_int64(l[~check]) + missing_indexer = _ensure_int64(length[~check]) cur_labels = self.take(indexer[check]).values - cur_indexer = _ensure_int64(l[check]) + cur_indexer = _ensure_int64(length[check]) new_labels = np.empty(tuple([len(indexer)]), dtype=object) new_labels[cur_indexer] = cur_labels diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 8b680127723c3..70b531ffb0ec4 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -79,7 +79,8 @@ def __new__(cls, data=None, categories=None, ordered=None, dtype=None, if data is not None or categories is None: cls._scalar_data_error(data) data = [] - data = cls._create_categorical(cls, data, categories, ordered) + data = cls._create_categorical(cls, data, categories, ordered, + dtype) if copy: data = data.copy() diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 71de6c7c3e8cf..4e9b2b9a2e922 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -681,7 +681,7 @@ def __sub__(self, other): return self._add_delta(-other) elif is_integer(other): return self.shift(-other) - elif isinstance(other, datetime): + elif isinstance(other, (datetime, np.datetime64)): return self._sub_datelike(other) elif isinstance(other, Period): return self._sub_period(other) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 18be6c61abdf7..3c518017a8808 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -2,9 +2,11 @@ from __future__ import division import operator import warnings -from datetime import time, datetime -from datetime import timedelta +from datetime import time, datetime, timedelta + import numpy as np +from pytz import utc + from pandas.core.base import _shared_docs from pandas.core.dtypes.common import ( @@ -29,6 +31,7 @@ import pandas.core.dtypes.concat as _concat from pandas.errors import PerformanceWarning from pandas.core.common import _values_from_object, _maybe_box +from pandas.core.algorithms import checked_add_with_arr from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.numeric import Int64Index, Float64Index @@ -55,10 +58,6 @@ from pandas._libs.tslibs import timezones -def _utc(): - import pytz - return pytz.utc - # -------- some conversion wrapper functions @@ -66,7 +65,6 @@ def _field_accessor(name, field, docstring=None): def f(self): values = self.asi8 if self.tz is not None: - utc = _utc() if self.tz is not utc: values = self._local_timestamps() @@ -451,7 +449,7 @@ def _generate(cls, start, end, periods, name, offset, try: inferred_tz = timezones.infer_tzinfo(start, end) - except: + except Exception: raise TypeError('Start and end cannot both be tz-aware with ' 'different timezones') @@ -562,8 +560,6 @@ def _convert_for_op(self, value): raise ValueError('Passed item and index have different timezone') def _local_timestamps(self): - utc = _utc() - if self.is_monotonic: return libts.tz_convert(self.asi8, utc, self.tz) else: @@ -767,7 +763,7 @@ def _sub_datelike(self, other): raise TypeError("DatetimeIndex subtraction must have the same " "timezones or no timezones") result = self._sub_datelike_dti(other) - elif isinstance(other, datetime): + elif isinstance(other, (datetime, np.datetime64)): other = Timestamp(other) if other is libts.NaT: result = self._nat_new(box=False) @@ -777,7 +773,8 @@ def _sub_datelike(self, other): "timezones or no timezones") else: i8 = self.asi8 - result = i8 - other.value + result = checked_add_with_arr(i8, -other.value, + arr_mask=self._isnan) result = self._maybe_mask_results(result, fill_value=libts.iNaT) else: @@ -823,7 +820,6 @@ def _add_delta(self, delta): tz = 'UTC' if self.tz is not None else None result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer') - utc = _utc() if self.tz is not None and self.tz is not utc: result = result.tz_convert(self.tz) return result @@ -877,7 +873,6 @@ def astype(self, dtype, copy=True): raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype) def _get_time_micros(self): - utc = _utc() values = self.asi8 if self.tz is not None and self.tz is not utc: values = self._local_timestamps() @@ -1183,12 +1178,12 @@ def __iter__(self): # convert in chunks of 10k for efficiency data = self.asi8 - l = len(self) + length = len(self) chunksize = 10000 - chunks = int(l / chunksize) + 1 + chunks = int(length / chunksize) + 1 for i in range(chunks): start_i = i * chunksize - end_i = min((i + 1) * chunksize, l) + end_i = min((i + 1) * chunksize, length) converted = libts.ints_to_pydatetime(data[start_i:end_i], tz=self.tz, freq=self.freq, box=True) diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 7bf7cfce515a1..9619f5403b761 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -179,7 +179,7 @@ def __new__(cls, data, closed='right', if isinstance(data, IntervalIndex): left = data.left right = data.right - + closed = data.closed else: # don't allow scalars @@ -187,7 +187,7 @@ def __new__(cls, data, closed='right', cls._scalar_data_error(data) data = IntervalIndex.from_intervals(data, name=name) - left, right = data.left, data.right + left, right, closed = data.left, data.right, data.closed return cls._simple_new(left, right, closed, name, copy=copy, verify_integrity=verify_integrity) @@ -569,7 +569,8 @@ def copy(self, deep=False, name=None): left = self.left.copy(deep=True) if deep else self.left right = self.right.copy(deep=True) if deep else self.right name = name if name is not None else self.name - return type(self).from_arrays(left, right, name=name) + closed = self.closed + return type(self).from_arrays(left, right, closed=closed, name=name) @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 4cc59f5297058..f4acb6862addb 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -446,6 +446,17 @@ def _shallow_copy_with_infer(self, values=None, **kwargs): **kwargs) return self._shallow_copy(values, **kwargs) + @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) + def __contains__(self, key): + hash(key) + try: + self.get_loc(key) + return True + except (LookupError, TypeError): + return False + + contains = __contains__ + @Appender(_index_shared_docs['_shallow_copy']) def _shallow_copy(self, values=None, **kwargs): if values is not None: @@ -809,9 +820,10 @@ def duplicated(self, keep='first'): return duplicated_int64(ids, keep) - @Appender(ibase._index_shared_docs['fillna']) def fillna(self, value=None, downcast=None): - # isna is not implemented for MultiIndex + """ + fillna is not implemented for MultiIndex + """ raise NotImplementedError('isna is not defined for MultiIndex') @Appender(_index_shared_docs['dropna']) @@ -1370,17 +1382,6 @@ def nlevels(self): def levshape(self): return tuple(len(x) for x in self.levels) - @Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs) - def __contains__(self, key): - hash(key) - try: - self.get_loc(key) - return True - except LookupError: - return False - - contains = __contains__ - def __reduce__(self): """Necessary for making this object picklable""" d = dict(levels=[lev for lev in self.levels], diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 6e08c32f30dcd..0cc35300f0d17 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -36,6 +36,26 @@ join as libjoin, Timedelta, NaT, iNaT) +def _field_accessor(name, alias, docstring=None): + def f(self): + if self.hasnans: + result = np.empty(len(self), dtype='float64') + mask = self._isnan + imask = ~mask + result.flat[imask] = np.array([getattr(Timedelta(val), alias) + for val in self.asi8[imask]]) + result[mask] = np.nan + else: + result = np.array([getattr(Timedelta(val), alias) + for val in self.asi8], dtype='int64') + + return Index(result, name=self.name) + + f.__name__ = name + f.__doc__ = docstring + return property(f) + + def _td_index_cmp(opname, nat_result=False): """ Wrap comparison operations to convert timedelta-like to timedelta64 @@ -361,7 +381,8 @@ def _add_datelike(self, other): else: other = Timestamp(other) i8 = self.asi8 - result = checked_add_with_arr(i8, other.value) + result = checked_add_with_arr(i8, other.value, + arr_mask=self._isnan) result = self._maybe_mask_results(result, fill_value=iNaT) return DatetimeIndex(result, name=self.name, copy=False) @@ -380,46 +401,17 @@ def _format_native_types(self, na_rep=u('NaT'), nat_rep=na_rep, justify='all').get_result() - def _get_field(self, m): - - values = self.asi8 - hasnans = self.hasnans - if hasnans: - result = np.empty(len(self), dtype='float64') - mask = self._isnan - imask = ~mask - result.flat[imask] = np.array( - [getattr(Timedelta(val), m) for val in values[imask]]) - result[mask] = np.nan - else: - result = np.array([getattr(Timedelta(val), m) - for val in values], dtype='int64') - return Index(result, name=self.name) - - @property - def days(self): - """ Number of days for each element. """ - return self._get_field('days') - - @property - def seconds(self): - """ Number of seconds (>= 0 and less than 1 day) for each element. """ - return self._get_field('seconds') - - @property - def microseconds(self): - """ - Number of microseconds (>= 0 and less than 1 second) for each - element. """ - return self._get_field('microseconds') - - @property - def nanoseconds(self): - """ - Number of nanoseconds (>= 0 and less than 1 microsecond) for each - element. - """ - return self._get_field('nanoseconds') + days = _field_accessor("days", "days", + " Number of days for each element. ") + seconds = _field_accessor("seconds", "seconds", + " Number of seconds (>= 0 and less than 1 day) " + "for each element. ") + microseconds = _field_accessor("microseconds", "microseconds", + "\nNumber of microseconds (>= 0 and less " + "than 1 second) for each\nelement. ") + nanoseconds = _field_accessor("nanoseconds", "nanoseconds", + "\nNumber of nanoseconds (>= 0 and less " + "than 1 microsecond) for each\nelement.\n") @property def components(self): @@ -850,7 +842,7 @@ def insert(self, loc, item): if _is_convertible_to_td(item): try: item = Timedelta(item) - except: + except Exception: pass freq = None diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 045580d393b26..3b7cd1d02e1d3 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1837,8 +1837,10 @@ def _can_hold_element(self, element): if tipo is not None: return (issubclass(tipo.type, (np.floating, np.integer)) and not issubclass(tipo.type, (np.datetime64, np.timedelta64))) - return (isinstance(element, (float, int, np.floating, np.int_)) and - not isinstance(element, (bool, np.bool_, datetime, timedelta, + return ( + isinstance( + element, (float, int, np.floating, np.int_, compat.long)) + and not isinstance(element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))) def to_native_types(self, slicer=None, na_rep='', float_format=None, @@ -1886,9 +1888,11 @@ def _can_hold_element(self, element): if tipo is not None: return issubclass(tipo.type, (np.floating, np.integer, np.complexfloating)) - return (isinstance(element, - (float, int, complex, np.float_, np.int_)) and - not isinstance(element, (bool, np.bool_))) + return ( + isinstance( + element, + (float, int, complex, np.float_, np.int_, compat.long)) + and not isinstance(element, (bool, np.bool_))) def should_store(self, value): return issubclass(value.dtype.type, np.complexfloating) @@ -1946,7 +1950,8 @@ def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: return issubclass(tipo.type, np.timedelta64) - return isinstance(element, (timedelta, np.timedelta64)) + return is_integer(element) or isinstance( + element, (timedelta, np.timedelta64)) def fillna(self, value, **kwargs): diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index baeb869239c1e..d1a355021f388 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -107,21 +107,14 @@ def f(values, axis=None, skipna=True, **kwds): if k not in kwds: kwds[k] = v try: - if values.size == 0: - - # we either return np.nan or pd.NaT - if is_numeric_dtype(values): - values = values.astype('float64') - fill_value = na_value_for_dtype(values.dtype) - - if values.ndim == 1: - return fill_value - else: - result_shape = (values.shape[:axis] + - values.shape[axis + 1:]) - result = np.empty(result_shape, dtype=values.dtype) - result.fill(fill_value) - return result + if values.size == 0 and kwds.get('min_count') is None: + # We are empty, returning NA for our type + # Only applies for the default `min_count` of None + # since that affects how empty arrays are handled. + # TODO(GH-18976) update all the nanops methods to + # correctly handle empty inputs and remove this check. + # It *may* just be `var` + return _na_for_min_count(values, axis) if (_USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name)): @@ -292,6 +285,36 @@ def _wrap_results(result, dtype): return result +def _na_for_min_count(values, axis): + """Return the missing value for `values` + + Parameters + ---------- + values : ndarray + axis : int or None + axis for the reduction + + Returns + ------- + result : scalar or ndarray + For 1-D values, returns a scalar of the correct missing type. + For 2-D values, returns a 1-D array where each element is missing. + """ + # we either return np.nan or pd.NaT + if is_numeric_dtype(values): + values = values.astype('float64') + fill_value = na_value_for_dtype(values.dtype) + + if values.ndim == 1: + return fill_value + else: + result_shape = (values.shape[:axis] + + values.shape[axis + 1:]) + result = np.empty(result_shape, dtype=values.dtype) + result.fill(fill_value) + return result + + def nanany(values, axis=None, skipna=True): values, mask, dtype, _ = _get_values(values, skipna, False, copy=skipna) return values.any(axis) @@ -304,7 +327,7 @@ def nanall(values, axis=None, skipna=True): @disallow('M8') @bottleneck_switch() -def nansum(values, axis=None, skipna=True): +def nansum(values, axis=None, skipna=True, min_count=0): values, mask, dtype, dtype_max = _get_values(values, skipna, 0) dtype_sum = dtype_max if is_float_dtype(dtype): @@ -312,7 +335,7 @@ def nansum(values, axis=None, skipna=True): elif is_timedelta64_dtype(dtype): dtype_sum = np.float64 the_sum = values.sum(axis, dtype=dtype_sum) - the_sum = _maybe_null_out(the_sum, axis, mask) + the_sum = _maybe_null_out(the_sum, axis, mask, min_count=min_count) return _wrap_results(the_sum, dtype) @@ -548,6 +571,9 @@ def nanskew(values, axis=None, skipna=True): m3 = adjusted3.sum(axis, dtype=np.float64) # floating point error + # + # #18044 in _libs/windows.pyx calc_skew follow this behavior + # to fix the fperr to treat m2 <1e-14 as zero m2 = _zero_out_fperr(m2) m3 = _zero_out_fperr(m3) @@ -609,6 +635,9 @@ def nankurt(values, axis=None, skipna=True): result = numer / denom - adj # floating point error + # + # #18044 in _libs/windows.pyx calc_kurt follow this behavior + # to fix the fperr to treat denom <1e-14 as zero numer = _zero_out_fperr(numer) denom = _zero_out_fperr(denom) @@ -635,13 +664,13 @@ def nankurt(values, axis=None, skipna=True): @disallow('M8', 'm8') -def nanprod(values, axis=None, skipna=True): +def nanprod(values, axis=None, skipna=True, min_count=0): mask = isna(values) if skipna and not is_any_int_dtype(values): values = values.copy() values[mask] = 1 result = values.prod(axis) - return _maybe_null_out(result, axis, mask) + return _maybe_null_out(result, axis, mask, min_count=min_count) def _maybe_arg_null_out(result, axis, mask, skipna): @@ -677,9 +706,9 @@ def _get_counts(mask, axis, dtype=float): return np.array(count, dtype=dtype) -def _maybe_null_out(result, axis, mask): +def _maybe_null_out(result, axis, mask, min_count=1): if axis is not None and getattr(result, 'ndim', False): - null_mask = (mask.shape[axis] - mask.sum(axis)) == 0 + null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0 if np.any(null_mask): if is_numeric_dtype(result): if np.iscomplexobj(result): @@ -692,13 +721,14 @@ def _maybe_null_out(result, axis, mask): result[null_mask] = None elif result is not tslib.NaT: null_mask = mask.size - mask.sum() - if null_mask == 0: + if null_mask < min_count: result = np.nan return result def _zero_out_fperr(arg): + # #18044 reference this behavior to fix rolling skew/kurt issue if isinstance(arg, np.ndarray): with np.errstate(invalid='ignore'): return np.where(np.abs(arg) < 1e-14, 0, arg) diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 6edbb99641542..db1d3d4c5e31b 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -395,7 +395,11 @@ def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): grouped = PanelGroupBy(obj, grouper=grouper, axis=self.axis) try: - result = grouped.aggregate(how, *args, **kwargs) + if isinstance(obj, ABCDataFrame) and compat.callable(how): + # Check if the function is reducing or not. + result = grouped._aggregate_item_by_item(how, *args, **kwargs) + else: + result = grouped.aggregate(how, *args, **kwargs) except Exception: # we have a non-reducing function @@ -597,9 +601,20 @@ def size(self): Resampler._deprecated_valids += dir(Resampler) + # downsample methods -for method in ['min', 'max', 'first', 'last', 'sum', 'mean', 'sem', - 'median', 'prod', 'ohlc']: +for method in ['sum', 'prod']: + + def f(self, _method=method, min_count=0, *args, **kwargs): + nv.validate_resampler_func(_method, args, kwargs) + return self._downsample(_method, min_count=min_count) + f.__doc__ = getattr(GroupBy, method).__doc__ + setattr(Resampler, method, f) + + +# downsample methods +for method in ['min', 'max', 'first', 'last', 'mean', 'sem', + 'median', 'ohlc']: def f(self, _method=method, *args, **kwargs): nv.validate_resampler_func(_method, args, kwargs) @@ -1010,22 +1025,18 @@ class TimeGrouper(Grouper): Parameters ---------- freq : pandas date offset or offset alias for identifying bin edges - closed : closed end of interval; left or right - label : interval boundary to use for labeling; left or right - nperiods : optional, integer + closed : closed end of interval; 'left' or 'right' + label : interval boundary to use for labeling; 'left' or 'right' convention : {'start', 'end', 'e', 's'} If axis is PeriodIndex - - Notes - ----- - Use begin, end, nperiods to generate intervals that cannot be derived - directly from the associated object """ + _attributes = Grouper._attributes + ('closed', 'label', 'how', + 'loffset', 'kind', 'convention', + 'base') def __init__(self, freq='Min', closed=None, label=None, how='mean', - nperiods=None, axis=0, - fill_method=None, limit=None, loffset=None, kind=None, - convention=None, base=0, **kwargs): + axis=0, fill_method=None, limit=None, loffset=None, + kind=None, convention=None, base=0, **kwargs): freq = to_offset(freq) end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W']) @@ -1044,7 +1055,6 @@ def __init__(self, freq='Min', closed=None, label=None, how='mean', self.closed = closed self.label = label - self.nperiods = nperiods self.kind = kind self.convention = convention or 'E' @@ -1137,6 +1147,16 @@ def _get_time_bins(self, ax): tz=tz, name=ax.name) + # GH 15549 + # In edge case of tz-aware resapmling binner last index can be + # less than the last variable in data object, this happens because of + # DST time change + if len(binner) > 1 and binner[-1] < last: + extra_date_range = pd.date_range(binner[-1], last + self.freq, + freq=self.freq, tz=tz, + name=ax.name) + binner = labels = binner.append(extra_date_range[1:]) + # a little hack trimmed = False if (len(binner) > 2 and binner[-2] == last and diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index e409090e76944..bdb7ec00a29fd 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -126,7 +126,7 @@ def _groupby_and_merge(by, on, left, right, _merge_pieces, try: if k in merged: merged[k] = key - except: + except KeyError: pass pieces.append(merged) @@ -1253,10 +1253,12 @@ def _get_merge_keys(self): join_names) = super(_AsOfMerge, self)._get_merge_keys() # validate index types are the same - for lk, rk in zip(left_join_keys, right_join_keys): + for i, (lk, rk) in enumerate(zip(left_join_keys, right_join_keys)): if not is_dtype_equal(lk.dtype, rk.dtype): - raise MergeError("incompatible merge keys, " - "must be the same type") + raise MergeError("incompatible merge keys [{i}] {lkdtype} and " + "{rkdtype}, must be the same type" + .format(i=i, lkdtype=lk.dtype, + rkdtype=rk.dtype)) # validate tolerance; must be a Timedelta if we have a DTI if self.tolerance is not None: @@ -1266,8 +1268,10 @@ def _get_merge_keys(self): else: lt = left_join_keys[-1] - msg = "incompatible tolerance, must be compat " \ - "with type {lt}".format(lt=type(lt)) + msg = ("incompatible tolerance {tolerance}, must be compat " + "with type {lkdtype}".format( + tolerance=type(self.tolerance), + lkdtype=lt.dtype)) if is_datetime64_dtype(lt) or is_datetime64tz_dtype(lt): if not isinstance(self.tolerance, Timedelta): @@ -1503,12 +1507,12 @@ def _sort_labels(uniques, left, right): # tuplesafe uniques = Index(uniques).values - l = len(left) + llength = len(left) labels = np.concatenate([left, right]) _, new_labels = sorting.safe_sort(uniques, labels, na_sentinel=-1) new_labels = _ensure_int64(new_labels) - new_left, new_right = new_labels[:l], new_labels[l:] + new_left, new_right = new_labels[:llength], new_labels[llength:] return new_left, new_right @@ -1525,7 +1529,8 @@ def _get_join_keys(llab, rlab, shape, sort): rkey = stride * rlab[0].astype('i8', subok=False, copy=False) for i in range(1, nlev): - stride //= shape[i] + with np.errstate(divide='ignore'): + stride //= shape[i] lkey += llab[i] * stride rkey += rlab[i] * stride diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index fda339aa30461..2adf17a227a59 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -148,7 +148,7 @@ def qcut(x, q, labels=None, retbins=False, precision=3, duplicates='raise'): Parameters ---------- - x : ndarray or Series + x : 1d ndarray or Series q : integer or array of quantiles Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles diff --git a/pandas/core/series.py b/pandas/core/series.py index 1c92c4b8850ee..2b4f9c4c6f7e3 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -597,7 +597,7 @@ def _ixs(self, i, axis=0): return values[i] except IndexError: raise - except: + except Exception: if isinstance(i, slice): indexer = self.index._convert_slice_indexer(i, kind='iloc') return self._get_values(indexer) @@ -675,7 +675,7 @@ def _get_with(self, key): if isinstance(key, tuple): try: return self._get_values_tuple(key) - except: + except Exception: if len(key) == 1: key = key[0] if isinstance(key, slice): @@ -818,7 +818,7 @@ def _set_with(self, key, value): if not isinstance(key, (list, Series, np.ndarray, Series)): try: key = list(key) - except: + except Exception: key = [key] if isinstance(key, Index): @@ -1306,7 +1306,13 @@ def idxmin(self, axis=None, skipna=True, *args, **kwargs): Parameters ---------- skipna : boolean, default True - Exclude NA/null values + Exclude NA/null values. If the entire Series is NA, the result + will be NA. + + Raises + ------ + ValueError + * If the Series is empty Returns ------- @@ -1336,7 +1342,13 @@ def idxmax(self, axis=None, skipna=True, *args, **kwargs): Parameters ---------- skipna : boolean, default True - Exclude NA/null values + Exclude NA/null values. If the entire Series is NA, the result + will be NA. + + Raises + ------ + ValueError + * If the Series is empty Returns ------- @@ -1731,11 +1743,26 @@ def combine(self, other, func, fill_value=np.nan): ---------- other : Series or scalar value func : function + Function that takes two scalars as inputs and return a scalar fill_value : scalar value Returns ------- result : Series + + Examples + -------- + >>> s1 = Series([1, 2]) + >>> s2 = Series([0, 3]) + >>> s1.combine(s2, lambda x1, x2: x1 if x1 < x2 else x2) + 0 0 + 1 2 + dtype: int64 + + See Also + -------- + Series.combine_first : Combine Series values, choosing the calling + Series's values first """ if isinstance(other, Series): new_index = self.index.union(other.index) @@ -1764,7 +1791,21 @@ def combine_first(self, other): Returns ------- - y : Series + combined : Series + + Examples + -------- + >>> s1 = pd.Series([1, np.nan]) + >>> s2 = pd.Series([3, 4]) + >>> s1.combine_first(s2) + 0 1.0 + 1 4.0 + dtype: float64 + + See Also + -------- + Series.combine : Perform elementwise operation on two Series + using a given function """ new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) @@ -1982,7 +2023,7 @@ def nlargest(self, n=5, keep='first'): ---------- n : int Return this many descending sorted values - keep : {'first', 'last', False}, default 'first' + keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. @@ -2029,7 +2070,7 @@ def nsmallest(self, n=5, keep='first'): ---------- n : int Return this many ascending sorted values - keep : {'first', 'last', False}, default 'first' + keep : {'first', 'last'}, default 'first' Where there are duplicate values: - ``first`` : take the first occurrence. - ``last`` : take the last occurrence. diff --git a/pandas/core/strings.py b/pandas/core/strings.py index abef6f6086dbd..9614641aa1abf 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -1423,6 +1423,10 @@ def cons_row(x): return [x] result = [cons_row(x) for x in result] + if result: + # propogate nan values to match longest sequence (GH 18450) + max_len = max(len(x) for x in result) + result = [x * max_len if x[0] is np.nan else x for x in result] if not isinstance(expand, bool): raise ValueError("expand must be True or False") diff --git a/pandas/core/window.py b/pandas/core/window.py index 5143dddc5e866..345f9b035a36b 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -503,6 +503,9 @@ class Window(_Window): * ``general_gaussian`` (needs power, width) * ``slepian`` (needs width). + If ``win_type=None`` all points are evenly weighted. To learn more about + different window types see `scipy.signal window functions + <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__. """ def validate(self): diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index c5d4a0ecf44ab..bac5ac762400d 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -45,7 +45,6 @@ import pandas as pd import numpy as np -import itertools import csv from functools import partial @@ -891,6 +890,7 @@ def get_col_type(dtype): name = any(self.frame.index.names) cname = any(self.frame.columns.names) lastcol = self.frame.index.nlevels - 1 + previous_lev3 = None for i, lev in enumerate(self.frame.index.levels): lev2 = lev.format() blank = ' ' * len(lev2[0]) @@ -901,11 +901,19 @@ def get_col_type(dtype): lev3 = [blank] * clevels if name: lev3.append(lev.name) - for level_idx, group in itertools.groupby( - self.frame.index.labels[i]): - count = len(list(group)) - lev3.extend([lev2[level_idx]] + [blank] * (count - 1)) + current_idx_val = None + for level_idx in self.frame.index.labels[i]: + if ((previous_lev3 is None or + previous_lev3[len(lev3)].isspace()) and + lev2[level_idx] == current_idx_val): + # same index as above row and left index was the same + lev3.append(blank) + else: + # different value than above or left index different + lev3.append(lev2[level_idx]) + current_idx_val = lev2[level_idx] strcols.insert(i, lev3) + previous_lev3 = lev3 column_format = self.column_format if column_format is None: @@ -942,8 +950,8 @@ def get_col_type(dtype): if self.longtable: buf.write('\\endhead\n') buf.write('\\midrule\n') - buf.write('\\multicolumn{3}{r}{{Continued on next ' - 'page}} \\\\\n') + buf.write('\\multicolumn{{{n}}}{{r}}{{{{Continued on next ' + 'page}}}} \\\\\n'.format(n=len(row))) buf.write('\\midrule\n') buf.write('\\endfoot\n\n') buf.write('\\bottomrule\n') @@ -1695,7 +1703,7 @@ def _save_header(self): else: encoded_labels = [] - if not has_mi_columns: + if not has_mi_columns or has_aliases: encoded_labels += list(write_cols) writer.writerow(encoded_labels) else: diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py index b4dc9173f11ba..caa67d1ce6bce 100644 --- a/pandas/io/gbq.py +++ b/pandas/io/gbq.py @@ -29,9 +29,8 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, The main method a user calls to execute a Query in Google BigQuery and read results into a pandas DataFrame. - Google BigQuery API Client Library v2 for Python is used. - Documentation is available `here - <https://developers.google.com/api-client-library/python/apis/bigquery/v2>`__ + This function requires the `pandas-gbq package + <https://pandas-gbq.readthedocs.io>`__. Authentication to the Google BigQuery service is via OAuth 2.0. @@ -70,7 +69,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, dialect : {'legacy', 'standard'}, default 'legacy' 'legacy' : Use BigQuery's legacy SQL dialect. - 'standard' : Use BigQuery's standard SQL (beta), which is + 'standard' : Use BigQuery's standard SQL, which is compliant with the SQL 2011 standard. For more information see `BigQuery SQL Reference <https://cloud.google.com/bigquery/sql-reference/>`__ diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index be39f4baba0fb..203b1d62fcbf3 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -5,7 +5,7 @@ import pandas._libs.json as json from pandas._libs.tslib import iNaT -from pandas.compat import StringIO, long, u +from pandas.compat import StringIO, long, u, to_str from pandas import compat, isna from pandas import Series, DataFrame, to_datetime, MultiIndex from pandas.io.common import (get_filepath_or_buffer, _get_handle, @@ -458,8 +458,10 @@ def read(self): if self.lines and self.chunksize: obj = concat(self) elif self.lines: + + data = to_str(self.data) obj = self._get_object_parser( - self._combine_lines(self.data.split('\n')) + self._combine_lines(data.split('\n')) ) else: obj = self._get_object_parser(self.data) @@ -612,7 +614,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: dtype = np.dtype(dtype) return data.astype(dtype), True - except: + except (TypeError, ValueError): return data, False if convert_dates: @@ -628,7 +630,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: data = data.astype('float64') result = True - except: + except (TypeError, ValueError): pass if data.dtype.kind == 'f': @@ -639,7 +641,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: data = data.astype('float64') result = True - except: + except (TypeError, ValueError): pass # do't coerce 0-len data @@ -651,7 +653,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, if (new_data == data).all(): data = new_data result = True - except: + except (TypeError, ValueError): pass # coerce ints to 64 @@ -661,7 +663,7 @@ def _try_convert_data(self, name, data, use_dtypes=True, try: data = data.astype('int64') result = True - except: + except (TypeError, ValueError): pass return data, result @@ -680,7 +682,7 @@ def _try_convert_to_date(self, data): if new_data.dtype == 'object': try: new_data = data.astype('int64') - except: + except (TypeError, ValueError): pass # ignore numbers that are out of range @@ -697,7 +699,7 @@ def _try_convert_to_date(self, data): unit=date_unit) except ValueError: continue - except: + except Exception: break return new_data, True return data, False diff --git a/pandas/io/json/normalize.py b/pandas/io/json/normalize.py index e811dd1eab142..23d2f730d070c 100644 --- a/pandas/io/json/normalize.py +++ b/pandas/io/json/normalize.py @@ -181,7 +181,7 @@ def _pull_field(js, spec): return result - if isinstance(data, list) and len(data) is 0: + if isinstance(data, list) and not data: return DataFrame() # A bit of a hackjob @@ -207,9 +207,7 @@ def _pull_field(js, spec): elif not isinstance(meta, list): meta = [meta] - for i, x in enumerate(meta): - if not isinstance(x, list): - meta[i] = [x] + meta = [m if isinstance(m, list) else [m] for m in meta] # Disastrously inefficient for now records = [] diff --git a/pandas/io/msgpack/_packer.pyx b/pandas/io/msgpack/_packer.pyx index fd3f4612fb432..f175a6743f44b 100644 --- a/pandas/io/msgpack/_packer.pyx +++ b/pandas/io/msgpack/_packer.pyx @@ -8,6 +8,7 @@ from libc.limits cimport * from pandas.io.msgpack.exceptions import PackValueError from pandas.io.msgpack import ExtType +import numpy as np cdef extern from "../../src/msgpack/pack.h": @@ -133,7 +134,7 @@ cdef class Packer(object): while True: if o is None: ret = msgpack_pack_nil(&self.pk) - elif isinstance(o, bool): + elif isinstance(o, (bool, np.bool_)): if o: ret = msgpack_pack_true(&self.pk) else: diff --git a/pandas/io/packers.py b/pandas/io/packers.py index 92270b39f56ef..abd258034af99 100644 --- a/pandas/io/packers.py +++ b/pandas/io/packers.py @@ -350,8 +350,11 @@ def unconvert(values, dtype, compress=None): ) # fall through to copying `np.fromstring` - # Copy the string into a numpy array. - return np.fromstring(values, dtype=dtype) + # Copy the bytes into a numpy array. + buf = np.frombuffer(values, dtype=dtype) + buf = buf.copy() # required to not mutate the original data + buf.flags.writeable = True + return buf def encode(obj): diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 4b507b7f5df6f..eaaa14e756e22 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -3,7 +3,8 @@ from warnings import catch_warnings from distutils.version import LooseVersion from pandas import DataFrame, RangeIndex, Int64Index, get_option -from pandas.compat import range +from pandas.compat import string_types +from pandas.core.common import AbstractMethodError from pandas.io.common import get_filepath_or_buffer @@ -25,6 +26,11 @@ def get_engine(engine): except ImportError: pass + raise ImportError("Unable to find a usable engine; " + "tried using: 'pyarrow', 'fastparquet'.\n" + "pyarrow or fastparquet is required for parquet " + "support") + if engine not in ['pyarrow', 'fastparquet']: raise ValueError("engine must be one of 'pyarrow', 'fastparquet'") @@ -34,37 +40,75 @@ def get_engine(engine): return FastParquetImpl() -class PyArrowImpl(object): +class BaseImpl(object): + + api = None # module + + @staticmethod + def validate_dataframe(df): + + if not isinstance(df, DataFrame): + raise ValueError("to_parquet only supports IO with DataFrames") + + # must have value column names (strings only) + if df.columns.inferred_type not in {'string', 'unicode'}: + raise ValueError("parquet must have string column names") + + # index level names must be strings + valid_names = all( + isinstance(name, string_types) + for name in df.index.names + if name is not None + ) + if not valid_names: + raise ValueError("Index level names must be strings") + + def write(self, df, path, compression, **kwargs): + raise AbstractMethodError(self) + + def read(self, path, columns=None, **kwargs): + raise AbstractMethodError(self) + + +class PyArrowImpl(BaseImpl): def __init__(self): # since pandas is a dependency of pyarrow # we need to import on first use - try: import pyarrow import pyarrow.parquet except ImportError: - raise ImportError("pyarrow is required for parquet support\n\n" - "you can install via conda\n" - "conda install pyarrow -c conda-forge\n" - "\nor via pip\n" - "pip install -U pyarrow\n") - + raise ImportError( + "pyarrow is required for parquet support\n\n" + "you can install via conda\n" + "conda install pyarrow -c conda-forge\n" + "\nor via pip\n" + "pip install -U pyarrow\n" + ) if LooseVersion(pyarrow.__version__) < '0.4.1': - raise ImportError("pyarrow >= 0.4.1 is required for parquet" - "support\n\n" - "you can install via conda\n" - "conda install pyarrow -c conda-forge\n" - "\nor via pip\n" - "pip install -U pyarrow\n") - - self._pyarrow_lt_050 = LooseVersion(pyarrow.__version__) < '0.5.0' - self._pyarrow_lt_060 = LooseVersion(pyarrow.__version__) < '0.6.0' + raise ImportError( + "pyarrow >= 0.4.1 is required for parquet support\n\n" + "you can install via conda\n" + "conda install pyarrow -c conda-forge\n" + "\nor via pip\n" + "pip install -U pyarrow\n" + ) + + self._pyarrow_lt_060 = ( + LooseVersion(pyarrow.__version__) < LooseVersion('0.6.0')) + self._pyarrow_lt_070 = ( + LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0')) + self.api = pyarrow def write(self, df, path, compression='snappy', coerce_timestamps='ms', **kwargs): + self.validate_dataframe(df) + if self._pyarrow_lt_070: + self._validate_write_lt_070(df) path, _, _ = get_filepath_or_buffer(path) + if self._pyarrow_lt_060: table = self.api.Table.from_pandas(df, timestamps_to_ms=True) self.api.parquet.write_table( @@ -76,37 +120,77 @@ def write(self, df, path, compression='snappy', table, path, compression=compression, coerce_timestamps=coerce_timestamps, **kwargs) - def read(self, path): + def read(self, path, columns=None, **kwargs): path, _, _ = get_filepath_or_buffer(path) - return self.api.parquet.read_table(path).to_pandas() - - -class FastParquetImpl(object): + if self._pyarrow_lt_070: + return self.api.parquet.read_pandas(path, columns=columns, + **kwargs).to_pandas() + kwargs['use_pandas_metadata'] = True + return self.api.parquet.read_table(path, columns=columns, + **kwargs).to_pandas() + + def _validate_write_lt_070(self, df): + # Compatibility shim for pyarrow < 0.7.0 + # TODO: Remove in pandas 0.22.0 + from pandas.core.indexes.multi import MultiIndex + if isinstance(df.index, MultiIndex): + msg = ( + "Multi-index DataFrames are only supported " + "with pyarrow >= 0.7.0" + ) + raise ValueError(msg) + # Validate index + if not isinstance(df.index, Int64Index): + msg = ( + "pyarrow < 0.7.0 does not support serializing {} for the " + "index; you can .reset_index() to make the index into " + "column(s), or install the latest version of pyarrow or " + "fastparquet." + ) + raise ValueError(msg.format(type(df.index))) + if not df.index.equals(RangeIndex(len(df))): + raise ValueError( + "pyarrow < 0.7.0 does not support serializing a non-default " + "index; you can .reset_index() to make the index into " + "column(s), or install the latest version of pyarrow or " + "fastparquet." + ) + if df.index.name is not None: + raise ValueError( + "pyarrow < 0.7.0 does not serialize indexes with a name; you " + "can set the index.name to None or install the latest version " + "of pyarrow or fastparquet." + ) + + +class FastParquetImpl(BaseImpl): def __init__(self): # since pandas is a dependency of fastparquet # we need to import on first use - try: import fastparquet except ImportError: - raise ImportError("fastparquet is required for parquet support\n\n" - "you can install via conda\n" - "conda install fastparquet -c conda-forge\n" - "\nor via pip\n" - "pip install -U fastparquet") - + raise ImportError( + "fastparquet is required for parquet support\n\n" + "you can install via conda\n" + "conda install fastparquet -c conda-forge\n" + "\nor via pip\n" + "pip install -U fastparquet" + ) if LooseVersion(fastparquet.__version__) < '0.1.0': - raise ImportError("fastparquet >= 0.1.0 is required for parquet " - "support\n\n" - "you can install via conda\n" - "conda install fastparquet -c conda-forge\n" - "\nor via pip\n" - "pip install -U fastparquet") - + raise ImportError( + "fastparquet >= 0.1.0 is required for parquet " + "support\n\n" + "you can install via conda\n" + "conda install fastparquet -c conda-forge\n" + "\nor via pip\n" + "pip install -U fastparquet" + ) self.api = fastparquet def write(self, df, path, compression='snappy', **kwargs): + self.validate_dataframe(df) # thriftpy/protocol/compact.py:339: # DeprecationWarning: tostring() is deprecated. # Use tobytes() instead. @@ -115,9 +199,10 @@ def write(self, df, path, compression='snappy', **kwargs): self.api.write(path, df, compression=compression, **kwargs) - def read(self, path): + def read(self, path, columns=None, **kwargs): path, _, _ = get_filepath_or_buffer(path) - return self.api.ParquetFile(path).to_pandas() + parquet_file = self.api.ParquetFile(path) + return parquet_file.to_pandas(columns=columns, **kwargs) def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): @@ -138,47 +223,11 @@ def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): kwargs Additional keyword arguments passed to the engine """ - impl = get_engine(engine) + return impl.write(df, path, compression=compression, **kwargs) - if not isinstance(df, DataFrame): - raise ValueError("to_parquet only support IO with DataFrames") - - valid_types = {'string', 'unicode'} - - # validate index - # -------------- - - # validate that we have only a default index - # raise on anything else as we don't serialize the index - - if not isinstance(df.index, Int64Index): - raise ValueError("parquet does not support serializing {} " - "for the index; you can .reset_index()" - "to make the index into column(s)".format( - type(df.index))) - if not df.index.equals(RangeIndex.from_range(range(len(df)))): - raise ValueError("parquet does not support serializing a " - "non-default index for the index; you " - "can .reset_index() to make the index " - "into column(s)") - - if df.index.name is not None: - raise ValueError("parquet does not serialize index meta-data on a " - "default index") - - # validate columns - # ---------------- - - # must have value column names (strings only) - if df.columns.inferred_type not in valid_types: - raise ValueError("parquet must have string column names") - - return impl.write(df, path, compression=compression) - - -def read_parquet(path, engine='auto', **kwargs): +def read_parquet(path, engine='auto', columns=None, **kwargs): """ Load a parquet object from the file path, returning a DataFrame. @@ -188,6 +237,10 @@ def read_parquet(path, engine='auto', **kwargs): ---------- path : string File path + columns: list, default=None + If not None, only these columns will be read from the file. + + .. versionadded 0.21.1 engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto' Parquet reader library to use. If 'auto', then the option 'io.parquet.engine' is used. If 'auto', then the first @@ -201,4 +254,4 @@ def read_parquet(path, engine='auto', **kwargs): """ impl = get_engine(engine) - return impl.read(path) + return impl.read(path, columns=columns, **kwargs) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 1b6414ea974fa..df8b1b5cca1d3 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -74,15 +74,19 @@ .. versionadded:: 0.18.1 support for the Python parser. header : int or list of ints, default 'infer' - Row number(s) to use as the column names, and the start of the data. - Default behavior is as if set to 0 if no ``names`` passed, otherwise - ``None``. Explicitly pass ``header=0`` to be able to replace existing - names. The header can be a list of integers that specify row locations for - a multi-index on the columns e.g. [0,1,3]. Intervening rows that are not - specified will be skipped (e.g. 2 in this example is skipped). Note that - this parameter ignores commented lines and empty lines if - ``skip_blank_lines=True``, so header=0 denotes the first line of data - rather than the first line of the file. + Row number(s) to use as the column names, and the start of the + data. Default behavior is to infer the column names: if no names + are passed the behavior is identical to ``header=0`` and column + names are inferred from the first line of the file, if column + names are passed explicitly then the behavior is identical to + ``header=None``. Explicitly pass ``header=0`` to be able to + replace existing names. The header can be a list of integers that + specify row locations for a multi-index on the columns + e.g. [0,1,3]. Intervening rows that are not specified will be + skipped (e.g. 2 in this example is skipped). Note that this + parameter ignores commented lines and empty lines if + ``skip_blank_lines=True``, so header=0 denotes the first line of + data rather than the first line of the file. names : array-like, default None List of column names to use. If file contains no header row, then you should explicitly pass header=None. Duplicates in this list will cause @@ -1231,6 +1235,8 @@ def __init__(self, kwds): self.na_values = kwds.get('na_values') self.na_fvalues = kwds.get('na_fvalues') + self.na_filter = kwds.get('na_filter', False) + self.true_values = kwds.get('true_values') self.false_values = kwds.get('false_values') self.as_recarray = kwds.get('as_recarray', False) @@ -1404,7 +1410,6 @@ def _make_index(self, data, alldata, columns, indexnamerow=False): elif not self._has_complex_date_col: index = self._get_simple_index(alldata, columns) index = self._agg_index(index) - elif self._has_complex_date_col: if not self._name_processed: (self.index_names, _, @@ -1487,8 +1492,12 @@ def _agg_index(self, index, try_parse_dates=True): if (try_parse_dates and self._should_parse_dates(i)): arr = self._date_conv(arr) - col_na_values = self.na_values - col_na_fvalues = self.na_fvalues + if self.na_filter: + col_na_values = self.na_values + col_na_fvalues = self.na_fvalues + else: + col_na_values = set() + col_na_fvalues = set() if isinstance(self.na_values, dict): col_name = self.index_names[i] @@ -1671,7 +1680,9 @@ def __init__(self, src, **kwds): ParserBase.__init__(self, kwds) - if 'utf-16' in (kwds.get('encoding') or ''): + if (kwds.get('compression') is None and + 'utf-16' in (kwds.get('encoding') or '')): + # if source is utf-16 plain text, convert source to utf-8 if isinstance(src, compat.string_types): src = open(src, 'rb') self.handles.append(src) @@ -2041,8 +2052,6 @@ def __init__(self, f, **kwds): self.names_passed = kwds['names'] or None - self.na_filter = kwds['na_filter'] - self.has_index_names = False if 'has_index_names' in kwds: self.has_index_names = kwds['has_index_names'] diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 40955c50f6b5f..2a1aaf2f66469 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2137,10 +2137,17 @@ def convert(self, values, nan_rep, encoding): # if we have stored a NaN in the categories # then strip it; in theory we could have BOTH # -1s in the codes and nulls :< - mask = isna(categories) - if mask.any(): - categories = categories[~mask] - codes[codes != -1] -= mask.astype(int).cumsum().values + if categories is None: + # Handle case of NaN-only categorical columns in which case + # the categories are an empty array; when this is stored, + # pytables cannot write a zero-len array, so on readback + # the categories would be None and `read_hdf()` would fail. + categories = Index([], dtype=np.float64) + else: + mask = isna(categories) + if mask.any(): + categories = categories[~mask] + codes[codes != -1] -= mask.astype(int).cumsum().values self.data = Categorical.from_codes(codes, categories=categories, diff --git a/pandas/io/sql.py b/pandas/io/sql.py index c42c19e1357bc..a9b4f504dd624 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -103,12 +103,12 @@ def _handle_date_column(col, utc=None, format=None): if isinstance(format, dict): return to_datetime(col, errors='ignore', **format) else: - if format in ['D', 's', 'ms', 'us', 'ns']: - return to_datetime(col, errors='coerce', unit=format, utc=utc) - elif (issubclass(col.dtype.type, np.floating) or - issubclass(col.dtype.type, np.integer)): - # parse dates as timestamp - format = 's' if format is None else format + # Allow passing of formatting string for integers + # GH17855 + if format is None and (issubclass(col.dtype.type, np.floating) or + issubclass(col.dtype.type, np.integer)): + format = 's' + if format in ['D', 'd', 'h', 'm', 's', 'ms', 'us', 'ns']: return to_datetime(col, errors='coerce', unit=format, utc=utc) elif is_datetime64tz_dtype(col): # coerce to UTC timezone diff --git a/pandas/io/stata.py b/pandas/io/stata.py index afc1631a947c8..aafe5f2ce76bd 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -306,11 +306,11 @@ def convert_delta_safe(base, deltas, unit): data_col[bad_locs] = 1.0 # Replace with NaT dates = dates.astype(np.int64) - if fmt in ["%tc", "tc"]: # Delta ms relative to base + if fmt.startswith(("%tc", "tc")): # Delta ms relative to base base = stata_epoch ms = dates conv_dates = convert_delta_safe(base, ms, 'ms') - elif fmt in ["%tC", "tC"]: + elif fmt.startswith(("%tC", "tC")): from warnings import warn warn("Encountered %tC format. Leaving in Stata Internal Format.") @@ -318,27 +318,30 @@ def convert_delta_safe(base, deltas, unit): if has_bad_values: conv_dates[bad_locs] = pd.NaT return conv_dates - elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base + # Delta days relative to base + elif fmt.startswith(("%td", "td", "%d", "d")): base = stata_epoch days = dates conv_dates = convert_delta_safe(base, days, 'd') - elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week + # does not count leap days - 7 days is a week. + # 52nd week may have more than 7 days + elif fmt.startswith(("%tw", "tw")): year = stata_epoch.year + dates // 52 days = (dates % 52) * 7 conv_dates = convert_year_days_safe(year, days) - elif fmt in ["%tm", "tm"]: # Delta months relative to base + elif fmt.startswith(("%tm", "tm")): # Delta months relative to base year = stata_epoch.year + dates // 12 month = (dates % 12) + 1 conv_dates = convert_year_month_safe(year, month) - elif fmt in ["%tq", "tq"]: # Delta quarters relative to base + elif fmt.startswith(("%tq", "tq")): # Delta quarters relative to base year = stata_epoch.year + dates // 4 month = (dates % 4) * 3 + 1 conv_dates = convert_year_month_safe(year, month) - elif fmt in ["%th", "th"]: # Delta half-years relative to base + elif fmt.startswith(("%th", "th")): # Delta half-years relative to base year = stata_epoch.year + dates // 2 month = (dates % 2) * 6 + 1 conv_dates = convert_year_month_safe(year, month) - elif fmt in ["%ty", "ty"]: # Years -- not delta + elif fmt.startswith(("%ty", "ty")): # Years -- not delta year = dates month = np.ones_like(dates) conv_dates = convert_year_month_safe(year, month) @@ -1029,10 +1032,6 @@ def _read_header(self): # calculate size of a data record self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist) - # remove format details from %td - self.fmtlist = ["%td" if x.startswith("%td") else x - for x in self.fmtlist] - def _read_new_header(self, first_char): # The first part of the header is common to 117 and 118. self.path_or_buf.read(27) # stata_dta><header><release> @@ -1578,7 +1577,8 @@ def read(self, nrows=None, convert_dates=None, self._do_convert_missing(data, convert_missing) if convert_dates: - cols = np.where(lmap(lambda x: x in _date_formats, + cols = np.where(lmap(lambda x: any(x.startswith(fmt) + for fmt in _date_formats), self.fmtlist))[0] for i in cols: col = data.columns[i] diff --git a/pandas/plotting/__init__.py b/pandas/plotting/__init__.py index 8f98e297e3e66..385d4d7f047c7 100644 --- a/pandas/plotting/__init__.py +++ b/pandas/plotting/__init__.py @@ -11,3 +11,10 @@ from pandas.plotting._core import boxplot from pandas.plotting._style import plot_params from pandas.plotting._tools import table +try: + from pandas.plotting._converter import \ + register as register_matplotlib_converters + from pandas.plotting._converter import \ + deregister as deregister_matplotlib_converters +except ImportError: + pass diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 47d15195315ba..357e84d1f17ea 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -1,3 +1,4 @@ +import warnings from datetime import datetime, timedelta import datetime as pydt import numpy as np @@ -44,14 +45,96 @@ MUSEC_PER_DAY = 1e6 * SEC_PER_DAY +_WARN = True # Global for whether pandas has registered the units explicitly +_mpl_units = {} # Cache for units overwritten by us -def register(): - units.registry[lib.Timestamp] = DatetimeConverter() - units.registry[Period] = PeriodConverter() - units.registry[pydt.datetime] = DatetimeConverter() - units.registry[pydt.date] = DatetimeConverter() - units.registry[pydt.time] = TimeConverter() - units.registry[np.datetime64] = DatetimeConverter() + +def get_pairs(): + pairs = [ + (lib.Timestamp, DatetimeConverter), + (Period, PeriodConverter), + (pydt.datetime, DatetimeConverter), + (pydt.date, DatetimeConverter), + (pydt.time, TimeConverter), + (np.datetime64, DatetimeConverter), + ] + return pairs + + +def register(explicit=True): + """Register Pandas Formatters and Converters with matplotlib + + This function modifies the global ``matplotlib.units.registry`` + dictionary. Pandas adds custom converters for + + * pd.Timestamp + * pd.Period + * np.datetime64 + * datetime.datetime + * datetime.date + * datetime.time + + See Also + -------- + deregister_matplotlib_converter + """ + # Renamed in pandas.plotting.__init__ + global _WARN + + if explicit: + _WARN = False + + pairs = get_pairs() + for type_, cls in pairs: + converter = cls() + if type_ in units.registry: + previous = units.registry[type_] + _mpl_units[type_] = previous + units.registry[type_] = converter + + +def deregister(): + """Remove pandas' formatters and converters + + Removes the custom converters added by :func:`register`. This + attempts to set the state of the registry back to the state before + pandas registered its own units. Converters for pandas' own types like + Timestamp and Period are removed completely. Converters for types + pandas overwrites, like ``datetime.datetime``, are restored to their + original value. + + See Also + -------- + deregister_matplotlib_converters + """ + # Renamed in pandas.plotting.__init__ + for type_, cls in get_pairs(): + # We use type to catch our classes directly, no inheritance + if type(units.registry.get(type_)) is cls: + units.registry.pop(type_) + + # restore the old keys + for unit, formatter in _mpl_units.items(): + if type(formatter) not in {DatetimeConverter, PeriodConverter, + TimeConverter}: + # make it idempotent by excluding ours. + units.registry[unit] = formatter + + +def _check_implicitly_registered(): + global _WARN + + if _WARN: + msg = ("Using an implicitly registered datetime converter for a " + "matplotlib plotting method. The converter was registered " + "by pandas on import. Future versions of pandas will require " + "you to explicitly register matplotlib converters.\n\n" + "To register the converters:\n\t" + ">>> from pandas.plotting import register_matplotlib_converters" + "\n\t" + ">>> register_matplotlib_converters()") + warnings.warn(msg, FutureWarning) + _WARN = False def _to_ordinalf(tm): @@ -189,6 +272,7 @@ class DatetimeConverter(dates.DateConverter): @staticmethod def convert(values, unit, axis): # values might be a 1-d array, or a list-like of arrays. + _check_implicitly_registered() if is_nested_list_like(values): values = [DatetimeConverter._convert_1d(v, unit, axis) for v in values] @@ -273,6 +357,7 @@ class PandasAutoDateLocator(dates.AutoDateLocator): def get_locator(self, dmin, dmax): 'Pick the best locator based on a distance.' + _check_implicitly_registered() delta = relativedelta(dmax, dmin) num_days = (delta.years * 12.0 + delta.months) * 31.0 + delta.days @@ -314,6 +399,7 @@ def get_unit_generic(freq): def __call__(self): # if no data have been set, this will tank with a ValueError + _check_implicitly_registered() try: dmin, dmax = self.viewlim_to_dt() except ValueError: @@ -914,6 +1000,8 @@ def _get_default_locs(self, vmin, vmax): def __call__(self): 'Return the locations of the ticks.' # axis calls Locator.set_axis inside set_m<xxxx>_formatter + _check_implicitly_registered() + vi = tuple(self.axis.get_view_interval()) if vi != self.plot_obj.view_interval: self.plot_obj.date_axis_info = None @@ -998,6 +1086,8 @@ def set_locs(self, locs): 'Sets the locations of the ticks' # don't actually use the locs. This is just needed to work with # matplotlib. Force to use vmin, vmax + _check_implicitly_registered() + self.locs = locs (vmin, vmax) = vi = tuple(self.axis.get_view_interval()) @@ -1009,6 +1099,8 @@ def set_locs(self, locs): self._set_default_format(vmin, vmax) def __call__(self, x, pos=0): + _check_implicitly_registered() + if self.formatdict is None: return '' else: @@ -1039,6 +1131,7 @@ def format_timedelta_ticks(x, pos, n_decimals): return s def __call__(self, x, pos=0): + _check_implicitly_registered() (vmin, vmax) = tuple(self.axis.get_view_interval()) n_decimals = int(np.ceil(np.log10(100 * 1e9 / (vmax - vmin)))) if n_decimals > 9: diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 43f33cf30dea1..e1380953e4519 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -11,6 +11,7 @@ from pandas.util._decorators import cache_readonly from pandas.core.base import PandasObject +from pandas.core.config import get_option from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike from pandas.core.dtypes.common import ( is_list_like, @@ -40,16 +41,13 @@ _get_xlim, _set_ticks_props, format_date_labels) -_registered = False - - -def _setup(): - # delay the import of matplotlib until nescessary - global _registered - if not _registered: - from pandas.plotting import _converter - _converter.register() - _registered = True +try: + from pandas.plotting import _converter +except ImportError: + pass +else: + if get_option('plotting.matplotlib.register_converters'): + _converter.register(explicit=True) def _get_standard_kind(kind): @@ -99,7 +97,7 @@ def __init__(self, data, kind=None, by=None, subplots=False, sharex=None, secondary_y=False, colormap=None, table=False, layout=None, **kwds): - _setup() + _converter._WARN = False self.data = data self.by = by @@ -383,12 +381,16 @@ def _add_table(self): def _post_plot_logic_common(self, ax, data): """Common post process for each axes""" - labels = [pprint_thing(key) for key in data.index] - labels = dict(zip(range(len(data.index)), labels)) + + def get_label(i): + try: + return pprint_thing(data.index[i]) + except Exception: + return '' if self.orientation == 'vertical' or self.orientation is None: if self._need_to_set_index: - xticklabels = [labels.get(x, '') for x in ax.get_xticks()] + xticklabels = [get_label(x) for x in ax.get_xticks()] ax.set_xticklabels(xticklabels) self._apply_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize) @@ -400,7 +402,7 @@ def _post_plot_logic_common(self, ax, data): elif self.orientation == 'horizontal': if self._need_to_set_index: - yticklabels = [labels.get(y, '') for y in ax.get_yticks()] + yticklabels = [get_label(y) for y in ax.get_yticks()] ax.set_yticklabels(yticklabels) self._apply_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize) @@ -2059,7 +2061,7 @@ def boxplot_frame(self, column=None, by=None, ax=None, fontsize=None, rot=0, grid=True, figsize=None, layout=None, return_type=None, **kwds): import matplotlib.pyplot as plt - _setup() + _converter._WARN = False ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize, grid=grid, rot=rot, figsize=figsize, layout=layout, return_type=return_type, **kwds) @@ -2155,7 +2157,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, kwds : other plotting keyword arguments To be passed to hist function """ - _setup() + _converter._WARN = False if by is not None: axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize, sharex=sharex, sharey=sharey, @@ -2289,6 +2291,8 @@ def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None, ------- axes: collection of Matplotlib Axes """ + _converter._WARN = False + def plot_group(group, ax): ax.hist(group.dropna().values, bins=bins, **kwargs) @@ -2352,7 +2356,7 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None, >>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1) >>> boxplot_frame_groupby(grouped, subplots=False) """ - _setup() + _converter._WARN = False if subplots is True: naxes = len(grouped) fig, axes = _subplots(naxes=naxes, squeeze=False, diff --git a/pandas/plotting/_timeseries.py b/pandas/plotting/_timeseries.py index 3d04973ed0009..56b5311326e98 100644 --- a/pandas/plotting/_timeseries.py +++ b/pandas/plotting/_timeseries.py @@ -1,5 +1,7 @@ # TODO: Use the fact that axis can have units to simplify the process +import functools + import numpy as np from matplotlib import pylab @@ -293,6 +295,10 @@ def format_timedelta_ticks(x, pos, n_decimals): return s +def _format_coord(freq, t, y): + return "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y) + + def format_dateaxis(subplot, freq, index): """ Pretty-formats the date axis (x-axis). @@ -327,8 +333,7 @@ def format_dateaxis(subplot, freq, index): subplot.xaxis.set_minor_formatter(minformatter) # x and y coord info - subplot.format_coord = lambda t, y: ( - "t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y)) + subplot.format_coord = functools.partial(_format_coord, freq) elif isinstance(index, TimedeltaIndex): subplot.xaxis.set_major_formatter( diff --git a/pandas/tests/dtypes/test_cast.py b/pandas/tests/dtypes/test_cast.py index d9fb458c83529..82a35fa711e8c 100644 --- a/pandas/tests/dtypes/test_cast.py +++ b/pandas/tests/dtypes/test_cast.py @@ -38,17 +38,17 @@ def test_downcast_conv(self): arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995]) result = maybe_downcast_to_dtype(arr, 'infer') - assert (np.array_equal(result, arr)) + tm.assert_numpy_array_equal(result, arr) arr = np.array([8., 8., 8., 8., 8.9999999999995]) result = maybe_downcast_to_dtype(arr, 'infer') - expected = np.array([8, 8, 8, 8, 9]) - assert (np.array_equal(result, expected)) + expected = np.array([8, 8, 8, 8, 9], dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) arr = np.array([8., 8., 8., 8., 9.0000000000005]) result = maybe_downcast_to_dtype(arr, 'infer') - expected = np.array([8, 8, 8, 8, 9]) - assert (np.array_equal(result, expected)) + expected = np.array([8, 8, 8, 8, 9], dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) # GH16875 coercing of bools ser = Series([True, True, False]) diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 70273f9e999cf..7195cb43a70dc 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -416,6 +416,12 @@ def test_length_zero(self): result = lib.infer_dtype([]) assert result == 'empty' + # GH 18004 + arr = np.array([np.array([], dtype=object), + np.array([], dtype=object)]) + result = lib.infer_dtype(arr) + assert result == 'empty' + def test_integers(self): arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O') result = lib.infer_dtype(arr) diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 1bac4037e99c9..97ab0deb50d50 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -440,7 +440,8 @@ def test_nunique(self): Series({0: 1, 1: 3, 2: 2})) def test_sum(self): - self._check_stat_op('sum', np.sum, has_numeric_only=True) + self._check_stat_op('sum', np.sum, has_numeric_only=True, + skipna_alternative=np.nansum) # mixed types (with upcasting happening) self._check_stat_op('sum', np.sum, @@ -716,7 +717,8 @@ def alt(x): def _check_stat_op(self, name, alternative, frame=None, has_skipna=True, has_numeric_only=False, check_dtype=True, - check_dates=False, check_less_precise=False): + check_dates=False, check_less_precise=False, + skipna_alternative=None): if frame is None: frame = self.frame # set some NAs @@ -737,15 +739,11 @@ def _check_stat_op(self, name, alternative, frame=None, has_skipna=True, assert len(result) if has_skipna: - def skipna_wrapper(x): - nona = x.dropna() - if len(nona) == 0: - return np.nan - return alternative(nona) - def wrapper(x): return alternative(x.values) + skipna_wrapper = tm._make_skipna_wrapper(alternative, + skipna_alternative) result0 = f(axis=0, skipna=False) result1 = f(axis=1, skipna=False) tm.assert_series_equal(result0, frame.apply(wrapper), @@ -797,8 +795,11 @@ def wrapper(x): r0 = getattr(all_na, name)(axis=0) r1 = getattr(all_na, name)(axis=1) if name in ['sum', 'prod']: - assert np.isnan(r0).all() - assert np.isnan(r1).all() + unit = int(name == 'prod') + expected = pd.Series(unit, index=r0.index, dtype=r0.dtype) + tm.assert_series_equal(r0, expected) + expected = pd.Series(unit, index=r1.index, dtype=r1.dtype) + tm.assert_series_equal(r1, expected) def test_mode(self): df = pd.DataFrame({"A": [12, 12, 11, 12, 19, 11], @@ -936,6 +937,66 @@ def test_sum_corner(self): assert len(axis0) == 0 assert len(axis1) == 0 + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_sum_prod_nanops(self, method, unit): + idx = ['a', 'b', 'c'] + df = pd.DataFrame({"a": [unit, unit], + "b": [unit, np.nan], + "c": [np.nan, np.nan]}) + # The default + result = getattr(df, method) + expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') + + # min_count=1 + result = getattr(df, method)(min_count=1) + expected = pd.Series([unit, unit, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = getattr(df, method)(min_count=0) + expected = pd.Series([unit, unit, unit], index=idx, dtype='float64') + tm.assert_series_equal(result, expected) + + result = getattr(df.iloc[1:], method)(min_count=1) + expected = pd.Series([unit, np.nan, np.nan], index=idx) + tm.assert_series_equal(result, expected) + + # min_count > 1 + df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5}) + result = getattr(df, method)(min_count=5) + expected = pd.Series(result, index=['A', 'B']) + tm.assert_series_equal(result, expected) + + result = getattr(df, method)(min_count=6) + expected = pd.Series(result, index=['A', 'B']) + tm.assert_series_equal(result, expected) + + def test_sum_nanops_timedelta(self): + # prod isn't defined on timedeltas + idx = ['a', 'b', 'c'] + df = pd.DataFrame({"a": [0, 0], + "b": [0, np.nan], + "c": [np.nan, np.nan]}) + + df2 = df.apply(pd.to_timedelta) + + # 0 by default + result = df2.sum() + expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df2.sum(min_count=0) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df2.sum(min_count=1) + expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx) + tm.assert_series_equal(result, expected) + def test_sum_object(self): values = self.frame.values.astype(int) frame = DataFrame(values, index=self.frame.index, diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 1e2f630401c89..343e235fb741c 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -884,6 +884,27 @@ def test_filter_regex_search(self): exp = df[[x for x in df.columns if 'BB' in x]] assert_frame_equal(result, exp) + @pytest.mark.parametrize('name,expected', [ + ('a', DataFrame({u'a': [1, 2]})), + (u'a', DataFrame({u'a': [1, 2]})), + (u'あ', DataFrame({u'あ': [3, 4]})) + ]) + def test_filter_unicode(self, name, expected): + # GH13101 + df = DataFrame({u'a': [1, 2], u'あ': [3, 4]}) + + assert_frame_equal(df.filter(like=name), expected) + assert_frame_equal(df.filter(regex=name), expected) + + @pytest.mark.parametrize('name', ['a', u'a']) + def test_filter_bytestring(self, name): + # GH13101 + df = DataFrame({b'a': [1, 2], b'b': [3, 4]}) + expected = DataFrame({b'a': [1, 2]}) + + assert_frame_equal(df.filter(like=name), expected) + assert_frame_equal(df.filter(regex=name), expected) + def test_filter_corner(self): empty = DataFrame() diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index c55c79ef18602..8291e9d452348 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1913,10 +1913,11 @@ def test_from_records_len0_with_columns(self): # #2633 result = DataFrame.from_records([], index='foo', columns=['foo', 'bar']) + expected = Index(['bar']) - assert np.array_equal(result.columns, ['bar']) assert len(result) == 0 assert result.index.name == 'foo' + tm.assert_index_equal(result.columns, expected) def test_to_frame_with_falsey_names(self): # GH 16114 diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py index 5bdb76494f4c8..7d2d18db8d41c 100644 --- a/pandas/tests/frame/test_convert_to.py +++ b/pandas/tests/frame/test_convert_to.py @@ -1,6 +1,9 @@ # -*- coding: utf-8 -*- +from datetime import datetime + import pytest +import pytz import collections import numpy as np @@ -249,3 +252,18 @@ def test_to_dict_box_scalars(self): result = DataFrame(d).to_dict(orient='records') assert isinstance(result[0]['a'], (int, long)) + + def test_frame_to_dict_tz(self): + # GH18372 When converting to dict with orient='records' columns of + # datetime that are tz-aware were not converted to required arrays + data = [(datetime(2017, 11, 18, 21, 53, 0, 219225, tzinfo=pytz.utc),), + (datetime(2017, 11, 18, 22, 6, 30, 61810, tzinfo=pytz.utc,),)] + df = DataFrame(list(data), columns=["d", ]) + + result = df.to_dict(orient='records') + expected = [ + {'d': Timestamp('2017-11-18 21:53:00.219225+0000', tz=pytz.utc)}, + {'d': Timestamp('2017-11-18 22:06:30.061810+0000', tz=pytz.utc)}, + ] + tm.assert_dict_equal(result[0], expected[0]) + tm.assert_dict_equal(result[1], expected[1]) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index abb528f0d2179..5adcd3b6855ce 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -10,6 +10,8 @@ from pandas import (DataFrame, Series, date_range, Timedelta, Timestamp, compat, concat, option_context) from pandas.compat import u +from pandas import _np_version_under1p14 + from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.tests.frame.common import TestData from pandas.util.testing import (assert_series_equal, @@ -531,7 +533,12 @@ def test_astype_str(self): assert_frame_equal(result, expected) result = DataFrame([1.12345678901234567890]).astype(tt) - expected = DataFrame(['1.12345678901']) + if _np_version_under1p14: + # < 1.14 truncates + expected = DataFrame(['1.12345678901']) + else: + # >= 1.14 preserves the full repr + expected = DataFrame(['1.1234567890123457']) assert_frame_equal(result, expected) @pytest.mark.parametrize("dtype_class", [dict, Series]) diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py index 4f77ba0ae1f5a..5b903c5a1eaf6 100644 --- a/pandas/tests/frame/test_nonunique_indexes.py +++ b/pandas/tests/frame/test_nonunique_indexes.py @@ -448,7 +448,7 @@ def test_as_matrix_duplicates(self): expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']], dtype=object) - assert np.array_equal(result, expected) + tm.assert_numpy_array_equal(result, expected) def test_set_value_by_index(self): # See gh-12344 diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 4162a586f8063..ca8a0d8bda3ab 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -1203,3 +1203,16 @@ def test_period_index_date_overflow(self): expected = ',0\n1990-01-01,4\n,5\n3005-01-01,6\n' assert result == expected + + def test_multi_index_header(self): + # see gh-5539 + columns = pd.MultiIndex.from_tuples([("a", 1), ("a", 2), + ("b", 1), ("b", 2)]) + df = pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]]) + df.columns = columns + + header = ["a", "b", "c", "d"] + result = df.to_csv(header=header) + + expected = ",a,b,c,d\n0,1,2,3,4\n1,5,6,7,8\n" + assert result == expected diff --git a/pandas/tests/groupby/test_aggregate.py b/pandas/tests/groupby/test_aggregate.py index 913d3bcc09869..ad1a322fdaae9 100644 --- a/pandas/tests/groupby/test_aggregate.py +++ b/pandas/tests/groupby/test_aggregate.py @@ -809,26 +809,60 @@ def test__cython_agg_general(self): exc.args += ('operation: %s' % op, ) raise - def test_cython_agg_empty_buckets(self): - ops = [('mean', np.mean), - ('median', lambda x: np.median(x) if len(x) > 0 else np.nan), - ('var', lambda x: np.var(x, ddof=1)), - ('add', lambda x: np.sum(x) if len(x) > 0 else np.nan), - ('prod', np.prod), - ('min', np.min), - ('max', np.max), ] - + @pytest.mark.parametrize('op, targop', [ + ('mean', np.mean), + ('median', lambda x: np.median(x) if len(x) > 0 else np.nan), + ('var', lambda x: np.var(x, ddof=1)), + ('min', np.min), + ('max', np.max), ] + ) + def test_cython_agg_empty_buckets(self, op, targop): df = pd.DataFrame([11, 12, 13]) grps = range(0, 55, 5) - for op, targop in ops: - result = df.groupby(pd.cut(df[0], grps))._cython_agg_general(op) - expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x)) - try: - tm.assert_frame_equal(result, expected) - except BaseException as exc: - exc.args += ('operation: %s' % op,) - raise + # calling _cython_agg_general directly, instead of via the user API + # which sets different values for min_count, so do that here. + result = df.groupby(pd.cut(df[0], grps))._cython_agg_general(op) + expected = df.groupby(pd.cut(df[0], grps)).agg(lambda x: targop(x)) + try: + tm.assert_frame_equal(result, expected) + except BaseException as exc: + exc.args += ('operation: %s' % op,) + raise + + def test_cython_agg_empty_buckets_nanops(self): + # GH-18869 can't call nanops on empty groups, so hardcode expected + # for these + df = pd.DataFrame([11, 12, 13], columns=['a']) + grps = range(0, 25, 5) + # add / sum + result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('add') + intervals = pd.interval_range(0, 20, freq=5) + expected = pd.DataFrame( + {"a": [0, 0, 36, 0]}, + index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + tm.assert_frame_equal(result, expected) + + # prod + result = df.groupby(pd.cut(df['a'], grps))._cython_agg_general('prod') + expected = pd.DataFrame( + {"a": [1, 1, 1716, 1]}, + index=pd.CategoricalIndex(intervals, name='a', ordered=True)) + tm.assert_frame_equal(result, expected) + + @pytest.mark.xfail(reason="GH-18869: agg func not called on empty groups.") + def test_agg_category_nansum(self): + categories = ['a', 'b', 'c'] + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=categories), + 'B': [1, 2, 3]}) + result = df.groupby("A").B.agg(np.nansum) + expected = pd.Series([3, 3, 0], + index=pd.CategoricalIndex(['a', 'b', 'c'], + categories=categories, + name='A'), + name='B') + tm.assert_series_equal(result, expected) def test_agg_over_numpy_arrays(self): # GH 3788 diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index fdc03acd3e931..d4f35aa8755d1 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -17,6 +17,142 @@ class TestGroupByCategorical(MixIn): + def test_groupby(self): + + cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], + categories=["a", "b", "c", "d"], ordered=True) + data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats}) + + exp_index = CategoricalIndex(list('abcd'), name='b', ordered=True) + expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index) + result = data.groupby("b").mean() + tm.assert_frame_equal(result, expected) + + raw_cat1 = Categorical(["a", "a", "b", "b"], + categories=["a", "b", "z"], ordered=True) + raw_cat2 = Categorical(["c", "d", "c", "d"], + categories=["c", "d", "y"], ordered=True) + df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]}) + + # single grouper + gb = df.groupby("A") + exp_idx = CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True) + expected = DataFrame({'values': Series([3, 7, 0], index=exp_idx)}) + result = gb.sum() + tm.assert_frame_equal(result, expected) + + # multiple groupers + gb = df.groupby(['A', 'B']) + exp_index = pd.MultiIndex.from_product( + [Categorical(["a", "b", "z"], ordered=True), + Categorical(["c", "d", "y"], ordered=True)], + names=['A', 'B']) + expected = DataFrame({'values': [1, 2, np.nan, 3, 4, np.nan, + np.nan, np.nan, np.nan]}, + index=exp_index) + result = gb.sum() + tm.assert_frame_equal(result, expected) + + # multiple groupers with a non-cat + df = df.copy() + df['C'] = ['foo', 'bar'] * 2 + gb = df.groupby(['A', 'B', 'C']) + exp_index = pd.MultiIndex.from_product( + [Categorical(["a", "b", "z"], ordered=True), + Categorical(["c", "d", "y"], ordered=True), + ['foo', 'bar']], + names=['A', 'B', 'C']) + expected = DataFrame({'values': Series( + np.nan, index=exp_index)}).sort_index() + expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4] + result = gb.sum() + tm.assert_frame_equal(result, expected) + + # GH 8623 + x = DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'], + [1, 'John P. Doe']], + columns=['person_id', 'person_name']) + x['person_name'] = Categorical(x.person_name) + + g = x.groupby(['person_id']) + result = g.transform(lambda x: x) + tm.assert_frame_equal(result, x[['person_name']]) + + result = x.drop_duplicates('person_name') + expected = x.iloc[[0, 1]] + tm.assert_frame_equal(result, expected) + + def f(x): + return x.drop_duplicates('person_name').iloc[0] + + result = g.apply(f) + expected = x.iloc[[0, 1]].copy() + expected.index = Index([1, 2], name='person_id') + expected['person_name'] = expected['person_name'].astype('object') + tm.assert_frame_equal(result, expected) + + # GH 9921 + # Monotonic + df = DataFrame({"a": [5, 15, 25]}) + c = pd.cut(df.a, bins=[0, 10, 20, 30, 40]) + + result = df.a.groupby(c).transform(sum) + tm.assert_series_equal(result, df['a']) + + tm.assert_series_equal( + df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) + tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) + tm.assert_frame_equal( + df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']]) + + # Filter + tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a']) + tm.assert_frame_equal(df.groupby(c).filter(np.all), df) + + # Non-monotonic + df = DataFrame({"a": [5, 15, 25, -5]}) + c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40]) + + result = df.a.groupby(c).transform(sum) + tm.assert_series_equal(result, df['a']) + + tm.assert_series_equal( + df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) + tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) + tm.assert_frame_equal( + df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']]) + + # GH 9603 + df = DataFrame({'a': [1, 0, 0, 0]}) + c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list('abcd'))) + result = df.groupby(c).apply(len) + + exp_index = CategoricalIndex( + c.values.categories, ordered=c.values.ordered) + expected = Series([1, 0, 0, 0], index=exp_index) + expected.index.name = 'a' + tm.assert_series_equal(result, expected) + + def test_groupby_sort(self): + + # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby + # This should result in a properly sorted Series so that the plot + # has a sorted x axis + # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar') + + df = DataFrame({'value': np.random.randint(0, 10000, 100)}) + labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)] + cat_labels = Categorical(labels, labels) + + df = df.sort_values(by=['value'], ascending=True) + df['value_group'] = pd.cut(df.value, range(0, 10500, 500), + right=False, labels=cat_labels) + + res = df.groupby(['value_group'])['value_group'].count() + exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))] + exp.index = CategoricalIndex(exp.index, name=exp.index.name) + tm.assert_series_equal(res, exp) + def test_level_groupby_get_group(self): # GH15155 df = DataFrame(data=np.arange(2, 22, 2), @@ -526,3 +662,53 @@ def test_groupby_categorical_two_columns(self): "C3": [nan, nan, nan, nan, 10, 100, nan, nan, nan, nan, 200, 34]}, index=idx) tm.assert_frame_equal(res, exp) + + def test_empty_sum(self): + # https://github.com/pandas-dev/pandas/issues/18678 + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=['a', 'b', 'c']), + 'B': [1, 2, 1]}) + expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A') + + # 0 by default + result = df.groupby("A").B.sum() + expected = pd.Series([3, 1, 0], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df.groupby("A").B.sum(min_count=0) + expected = pd.Series([3, 1, 0], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df.groupby("A").B.sum(min_count=1) + expected = pd.Series([3, 1, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count>1 + result = df.groupby("A").B.sum(min_count=2) + expected = pd.Series([3, np.nan, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + def test_empty_prod(self): + # https://github.com/pandas-dev/pandas/issues/18678 + df = pd.DataFrame({"A": pd.Categorical(['a', 'a', 'b'], + categories=['a', 'b', 'c']), + 'B': [1, 2, 1]}) + + expected_idx = pd.CategoricalIndex(['a', 'b', 'c'], name='A') + + # 1 by default + result = df.groupby("A").B.prod() + expected = pd.Series([2, 1, 1], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=0 + result = df.groupby("A").B.prod(min_count=0) + expected = pd.Series([2, 1, 1], expected_idx, name='B') + tm.assert_series_equal(result, expected) + + # min_count=1 + result = df.groupby("A").B.prod(min_count=1) + expected = pd.Series([2, 1, np.nan], expected_idx, name='B') + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/groupby/test_counting.py b/pandas/tests/groupby/test_counting.py index 485241d593d4f..787d99086873e 100644 --- a/pandas/tests/groupby/test_counting.py +++ b/pandas/tests/groupby/test_counting.py @@ -2,9 +2,11 @@ from __future__ import print_function import numpy as np +import pytest -from pandas import (DataFrame, Series, MultiIndex) -from pandas.util.testing import assert_series_equal +from pandas import (DataFrame, Series, MultiIndex, Timestamp, Timedelta, + Period) +from pandas.util.testing import (assert_series_equal, assert_frame_equal) from pandas.compat import (range, product as cart_product) @@ -195,3 +197,18 @@ def test_ngroup_respects_groupby_order(self): g.ngroup()) assert_series_equal(Series(df['group_index'].values), g.cumcount()) + + @pytest.mark.parametrize('datetimelike', [ + [Timestamp('2016-05-%02d 20:09:25+00:00' % i) for i in range(1, 4)], + [Timestamp('2016-05-%02d 20:09:25' % i) for i in range(1, 4)], + [Timedelta(x, unit="h") for x in range(1, 4)], + [Period(freq="2W", year=2017, month=x) for x in range(1, 4)]]) + def test_count_with_datetimelike(self, datetimelike): + # test for #13393, where DataframeGroupBy.count() fails + # when counting a datetimelike column. + + df = DataFrame({'x': ['a', 'a', 'b'], 'y': datetimelike}) + res = df.groupby('x').count() + expected = DataFrame({'y': [2, 1]}, index=['a', 'b']) + expected.index.name = "x" + assert_frame_equal(expected, res) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 9d25117fbd954..7a5581c897231 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -10,7 +10,7 @@ from pandas import (date_range, bdate_range, Timestamp, Index, MultiIndex, DataFrame, Series, - concat, Panel, DatetimeIndex) + concat, Panel, DatetimeIndex, CategoricalIndex) from pandas.errors import UnsupportedFunctionCall, PerformanceWarning from pandas.util.testing import (assert_panel_equal, assert_frame_equal, assert_series_equal, assert_almost_equal, @@ -28,6 +28,15 @@ from .common import MixIn +class TestGrouper(object): + + def test_repr(self): + # GH18203 + result = repr(pd.Grouper(key='A', level='B')) + expected = "Grouper(key='A', level='B', axis=0, sort=False)" + assert result == expected + + class TestGroupBy(MixIn): def test_basic(self): @@ -253,6 +262,29 @@ def test_grouper_column_and_index(self): expected = df_single.reset_index().groupby(['inner', 'B']).mean() assert_frame_equal(result, expected) + def test_groupby_categorical_index_and_columns(self): + # GH18432 + columns = ['A', 'B', 'A', 'B'] + categories = ['B', 'A'] + data = np.ones((5, 4), int) + cat_columns = CategoricalIndex(columns, + categories=categories, + ordered=True) + df = DataFrame(data=data, columns=cat_columns) + result = df.groupby(axis=1, level=0).sum() + expected_data = 2 * np.ones((5, 2), int) + expected_columns = CategoricalIndex(categories, + categories=categories, + ordered=True) + expected = DataFrame(data=expected_data, columns=expected_columns) + assert_frame_equal(result, expected) + + # test transposed version + df = DataFrame(data.T, index=cat_columns) + result = df.groupby(axis=0, level=0).sum() + expected = DataFrame(data=expected_data.T, index=expected_columns) + assert_frame_equal(result, expected) + def test_grouper_getting_correct_binner(self): # GH 10063 @@ -3818,7 +3850,7 @@ def h(df, arg3): # Assert the results here index = pd.Index(['A', 'B', 'C'], name='group') - expected = pd.Series([-79.5160891089, -78.4839108911, None], + expected = pd.Series([-79.5160891089, -78.4839108911, -80], index=index) assert_series_equal(expected, result) diff --git a/pandas/tests/groupby/test_timegrouper.py b/pandas/tests/groupby/test_timegrouper.py index c8503b16a0e16..d359bfa5351a9 100644 --- a/pandas/tests/groupby/test_timegrouper.py +++ b/pandas/tests/groupby/test_timegrouper.py @@ -41,12 +41,11 @@ def test_groupby_with_timegrouper(self): df = df.set_index(['Date']) expected = DataFrame( - {'Quantity': np.nan}, + {'Quantity': 0}, index=date_range('20130901 13:00:00', '20131205 13:00:00', freq='5D', name='Date', closed='left')) - expected.iloc[[0, 6, 18], 0] = np.array( - [24., 6., 9.], dtype='float64') + expected.iloc[[0, 6, 18], 0] = np.array([24, 6, 9], dtype='int64') result1 = df.resample('5D') .sum() assert_frame_equal(result1, expected) @@ -245,6 +244,8 @@ def test_timegrouper_with_reg_groups(self): result = df.groupby([pd.Grouper(freq='1M', key='Date')]).sum() assert_frame_equal(result, expected) + @pytest.mark.parametrize('freq', ['D', 'M', 'A', 'Q-APR']) + def test_timegrouper_with_reg_groups_freq(self, freq): # GH 6764 multiple grouping with/without sort df = DataFrame({ 'date': pd.to_datetime([ @@ -258,20 +259,24 @@ def test_timegrouper_with_reg_groups(self): 'cost1': [12, 15, 10, 24, 39, 1, 0, 90, 45, 34, 1, 12] }).set_index('date') - for freq in ['D', 'M', 'A', 'Q-APR']: - expected = df.groupby('user_id')[ - 'whole_cost'].resample( - freq).sum().dropna().reorder_levels( - ['date', 'user_id']).sort_index().astype('int64') - expected.name = 'whole_cost' - - result1 = df.sort_index().groupby([pd.Grouper(freq=freq), - 'user_id'])['whole_cost'].sum() - assert_series_equal(result1, expected) - - result2 = df.groupby([pd.Grouper(freq=freq), 'user_id'])[ - 'whole_cost'].sum() - assert_series_equal(result2, expected) + expected = ( + df.groupby('user_id')['whole_cost'] + .resample(freq) + .sum(min_count=1) # XXX + .dropna() + .reorder_levels(['date', 'user_id']) + .sort_index() + .astype('int64') + ) + expected.name = 'whole_cost' + + result1 = df.sort_index().groupby([pd.Grouper(freq=freq), + 'user_id'])['whole_cost'].sum() + assert_series_equal(result1, expected) + + result2 = df.groupby([pd.Grouper(freq=freq), 'user_id'])[ + 'whole_cost'].sum() + assert_series_equal(result2, expected) def test_timegrouper_get_group(self): # GH 6914 diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 456e5a9bd6439..3a57337efea6f 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -996,3 +996,16 @@ def test_searchsorted_monotonic(self, indices): # non-monotonic should raise. with pytest.raises(ValueError): indices._searchsorted_monotonic(value, side='left') + + def test_putmask_with_wrong_mask(self): + # GH18368 + index = self.create_index() + + with pytest.raises(ValueError): + index.putmask(np.ones(len(index) + 1, np.bool), 1) + + with pytest.raises(ValueError): + index.putmask(np.ones(len(index) - 1, np.bool), 1) + + with pytest.raises(ValueError): + index.putmask('foo', 1) diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 3b40ef092f364..1349f2f761a2f 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -20,11 +20,6 @@ START, END = datetime(2009, 1, 1), datetime(2010, 1, 1) -def eq_gen_range(kwargs, expected): - rng = generate_range(**kwargs) - assert (np.array_equal(list(rng), expected)) - - class TestDateRanges(TestData): def test_date_range_gen_error(self): @@ -201,20 +196,23 @@ def test_generate_cday(self): assert rng1 == rng2 def test_1(self): - eq_gen_range(dict(start=datetime(2009, 3, 25), periods=2), - [datetime(2009, 3, 25), datetime(2009, 3, 26)]) + rng = list(generate_range(start=datetime(2009, 3, 25), periods=2)) + expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)] + assert rng == expected def test_2(self): - eq_gen_range(dict(start=datetime(2008, 1, 1), - end=datetime(2008, 1, 3)), - [datetime(2008, 1, 1), - datetime(2008, 1, 2), - datetime(2008, 1, 3)]) + rng = list(generate_range(start=datetime(2008, 1, 1), + end=datetime(2008, 1, 3))) + expected = [datetime(2008, 1, 1), + datetime(2008, 1, 2), + datetime(2008, 1, 3)] + assert rng == expected def test_3(self): - eq_gen_range(dict(start=datetime(2008, 1, 5), - end=datetime(2008, 1, 6)), - []) + rng = list(generate_range(start=datetime(2008, 1, 5), + end=datetime(2008, 1, 6))) + expected = [] + assert rng == expected def test_precision_finer_than_offset(self): # GH 9907 @@ -236,6 +234,22 @@ def test_precision_finer_than_offset(self): tm.assert_index_equal(result1, expected1) tm.assert_index_equal(result2, expected2) + dt1, dt2 = '2017-01-01', '2017-01-01' + tz1, tz2 = 'US/Eastern', 'Europe/London' + + @pytest.mark.parametrize("start,end", [ + (pd.Timestamp(dt1, tz=tz1), pd.Timestamp(dt2)), + (pd.Timestamp(dt1), pd.Timestamp(dt2, tz=tz2)), + (pd.Timestamp(dt1, tz=tz1), pd.Timestamp(dt2, tz=tz2)), + (pd.Timestamp(dt1, tz=tz2), pd.Timestamp(dt2, tz=tz1)) + ]) + def test_mismatching_tz_raises_err(self, start, end): + # issue 18488 + with pytest.raises(TypeError): + pd.date_range(start, end) + with pytest.raises(TypeError): + pd.DatetimeIndex(start, end, freq=BDay()) + class TestBusinessDateRange(object): diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 8d9ac59cf9883..20a9916ad6bc4 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -211,6 +211,40 @@ def test_ufunc_coercions(self): tm.assert_index_equal(result, exp) assert result.freq == 'D' + def test_datetimeindex_sub_timestamp_overflow(self): + dtimax = pd.to_datetime(['now', pd.Timestamp.max]) + dtimin = pd.to_datetime(['now', pd.Timestamp.min]) + + tsneg = Timestamp('1950-01-01') + ts_neg_variants = [tsneg, + tsneg.to_pydatetime(), + tsneg.to_datetime64().astype('datetime64[ns]'), + tsneg.to_datetime64().astype('datetime64[D]')] + + tspos = Timestamp('1980-01-01') + ts_pos_variants = [tspos, + tspos.to_pydatetime(), + tspos.to_datetime64().astype('datetime64[ns]'), + tspos.to_datetime64().astype('datetime64[D]')] + + for variant in ts_neg_variants: + with pytest.raises(OverflowError): + dtimax - variant + + expected = pd.Timestamp.max.value - tspos.value + for variant in ts_pos_variants: + res = dtimax - variant + assert res[1].value == expected + + expected = pd.Timestamp.min.value - tsneg.value + for variant in ts_neg_variants: + res = dtimin - variant + assert res[1].value == expected + + for variant in ts_pos_variants: + with pytest.raises(OverflowError): + dtimin - variant + def test_week_of_month_frequency(self): # GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise d1 = date(2002, 9, 1) diff --git a/pandas/tests/indexes/datetimes/test_partial_slicing.py b/pandas/tests/indexes/datetimes/test_partial_slicing.py index e7d03aa193cbd..04c180350fb72 100644 --- a/pandas/tests/indexes/datetimes/test_partial_slicing.py +++ b/pandas/tests/indexes/datetimes/test_partial_slicing.py @@ -2,9 +2,10 @@ import pytest -from datetime import datetime +from datetime import datetime, date import numpy as np import pandas as pd +import operator as op from pandas import (DatetimeIndex, Series, DataFrame, date_range, Index, Timedelta, Timestamp) @@ -268,3 +269,21 @@ def test_loc_datetime_length_one(self): result = df.loc['2016-10-01T00:00:00':] tm.assert_frame_equal(result, df) + + @pytest.mark.parametrize('datetimelike', [ + Timestamp('20130101'), datetime(2013, 1, 1), + date(2013, 1, 1), np.datetime64('2013-01-01T00:00', 'ns')]) + @pytest.mark.parametrize('op,expected', [ + (op.lt, [True, False, False, False]), + (op.le, [True, True, False, False]), + (op.eq, [False, True, False, False]), + (op.gt, [False, False, False, True])]) + def test_selection_by_datetimelike(self, datetimelike, op, expected): + # GH issue #17965, test for ability to compare datetime64[ns] columns + # to datetimelike + df = DataFrame({'A': [pd.Timestamp('20120101'), + pd.Timestamp('20130101'), + np.nan, pd.Timestamp('20130103')]}) + result = op(df.A, datetimelike) + expected = Series(expected, name='A') + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 330ec9f357655..c7944c078d8c4 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -960,6 +960,7 @@ def test_guess_datetime_format_nopadding(self): for dt_string, dt_format in dt_string_to_format: assert tools._guess_datetime_format(dt_string) == dt_format + @pytest.mark.xfail(reason="GH18141 - dateutil > 2.6.1 broken") def test_guess_datetime_format_for_array(self): tm._skip_if_not_us_locale() expected_format = '%Y-%m-%d %H:%M:%S.%f' diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index d8ec23b9c7e0e..5e40e06d57413 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -4,6 +4,7 @@ import pandas.util.testing as tm from pandas.core.indexes.api import Index, CategoricalIndex +from pandas.core.dtypes.dtypes import CategoricalDtype from .common import Base from pandas.compat import range, PY3 @@ -95,6 +96,11 @@ def test_construction(self): 1, -1, 0], dtype='int8')) assert result.ordered + result = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True) + expected = pd.CategoricalIndex(ci, categories=list('ab'), ordered=True, + dtype='category') + tm.assert_index_equal(result, expected, exact=True) + # turn me to an Index result = Index(np.array(ci)) assert isinstance(result, Index) @@ -125,6 +131,25 @@ def test_construction_with_dtype(self): result = CategoricalIndex(idx, categories=idx, ordered=True) tm.assert_index_equal(result, expected, exact=True) + def test_construction_with_categorical_dtype(self): + # construction with CategoricalDtype + # GH18109 + data, cats, ordered = 'a a b b'.split(), 'c b a'.split(), True + dtype = CategoricalDtype(categories=cats, ordered=ordered) + + result = pd.CategoricalIndex(data, dtype=dtype) + expected = pd.CategoricalIndex(data, categories=cats, + ordered=ordered) + tm.assert_index_equal(result, expected, exact=True) + + # error to combine categories or ordered and dtype keywords args + with pytest.raises(ValueError, match="Cannot specify both `dtype` and " + "`categories` or `ordered`."): + pd.CategoricalIndex(data, categories=cats, dtype=dtype) + with pytest.raises(ValueError, match="Cannot specify both `dtype` and " + "`categories` or `ordered`."): + pd.CategoricalIndex(data, ordered=ordered, dtype=dtype) + def test_create_categorical(self): # https://github.com/pandas-dev/pandas/pull/17513 # The public CI constructor doesn't hit this code path with diff --git a/pandas/tests/indexes/test_interval.py b/pandas/tests/indexes/test_interval.py index b55bab3a210cc..399d88309072e 100644 --- a/pandas/tests/indexes/test_interval.py +++ b/pandas/tests/indexes/test_interval.py @@ -6,6 +6,7 @@ from pandas import (Interval, IntervalIndex, Index, isna, interval_range, Timestamp, Timedelta, compat, date_range, timedelta_range, DateOffset) +from pandas.compat import zip from pandas.tseries.offsets import Day from pandas._libs.interval import IntervalTree from pandas.tests.indexes.common import Base @@ -13,6 +14,11 @@ import pandas as pd +@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither']) +def closed(request): + return request.param + + class TestIntervalIndex(Base): _holder = IntervalIndex @@ -22,34 +28,63 @@ def setup_method(self, method): [(0, 1), np.nan, (1, 2)]) self.indices = dict(intervalIndex=tm.makeIntervalIndex(10)) - def create_index(self): - return IntervalIndex.from_breaks(np.arange(10)) + def create_index(self, closed='right'): + return IntervalIndex.from_breaks(np.arange(3), closed=closed) - def test_constructors(self): - expected = self.index - actual = IntervalIndex.from_breaks(np.arange(3), closed='right') - assert expected.equals(actual) + def create_index_with_nan(self, closed='right'): + return IntervalIndex.from_tuples( + [(0, 1), np.nan, (1, 2)], closed=closed) - alternate = IntervalIndex.from_breaks(np.arange(3), closed='left') - assert not expected.equals(alternate) + @pytest.mark.parametrize('name', [None, 'foo']) + def test_constructors(self, closed, name): + left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4]) + ivs = [Interval(l, r, closed=closed) for l, r in zip(left, right)] + expected = IntervalIndex._simple_new( + left=left, right=right, closed=closed, name=name) - actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)]) - assert expected.equals(actual) + result = IntervalIndex(ivs, name=name) + tm.assert_index_equal(result, expected) - actual = IntervalIndex([Interval(0, 1), Interval(1, 2)]) - assert expected.equals(actual) + result = IntervalIndex.from_intervals(ivs, name=name) + tm.assert_index_equal(result, expected) - actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1, - closed='right') - assert expected.equals(actual) + result = IntervalIndex.from_breaks( + np.arange(5), closed=closed, name=name) + tm.assert_index_equal(result, expected) - actual = Index([Interval(0, 1), Interval(1, 2)]) - assert isinstance(actual, IntervalIndex) - assert expected.equals(actual) + result = IntervalIndex.from_arrays( + left.values, right.values, closed=closed, name=name) + tm.assert_index_equal(result, expected) - actual = Index(expected) - assert isinstance(actual, IntervalIndex) - assert expected.equals(actual) + result = IntervalIndex.from_tuples( + zip(left, right), closed=closed, name=name) + tm.assert_index_equal(result, expected) + + result = Index(ivs, name=name) + assert isinstance(result, IntervalIndex) + tm.assert_index_equal(result, expected) + + # idempotent + tm.assert_index_equal(Index(expected), expected) + tm.assert_index_equal(IntervalIndex(expected), expected) + + result = IntervalIndex.from_intervals( + expected.values, name=expected.name) + tm.assert_index_equal(result, expected) + + left, right = expected.left, expected.right + result = IntervalIndex.from_arrays( + left, right, closed=expected.closed, name=expected.name) + tm.assert_index_equal(result, expected) + + result = IntervalIndex.from_tuples( + expected.to_tuples(), closed=expected.closed, name=expected.name) + tm.assert_index_equal(result, expected) + + breaks = expected.left.tolist() + [expected.right[-1]] + result = IntervalIndex.from_breaks( + breaks, closed=expected.closed, name=expected.name) + tm.assert_index_equal(result, expected) def test_constructors_other(self): @@ -66,43 +101,57 @@ def test_constructors_other(self): def test_constructors_errors(self): # scalar - with pytest.raises(TypeError): + msg = ('IntervalIndex(...) must be called with a collection of ' + 'some kind, 5 was passed') + with pytest.raises(TypeError, message=msg): IntervalIndex(5) # not an interval - with pytest.raises(TypeError): + msg = "type <class 'numpy.int32'> with value 0 is not an interval" + with pytest.raises(TypeError, message=msg): IntervalIndex([0, 1]) - with pytest.raises(TypeError): + with pytest.raises(TypeError, message=msg): IntervalIndex.from_intervals([0, 1]) # invalid closed - with pytest.raises(ValueError): + msg = "invalid options for 'closed': invalid" + with pytest.raises(ValueError, message=msg): IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid') # mismatched closed - with pytest.raises(ValueError): + msg = 'intervals must all be closed on the same side' + with pytest.raises(ValueError, message=msg): IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2, closed='left')]) - with pytest.raises(ValueError): + with pytest.raises(ValueError, message=msg): IntervalIndex.from_arrays([0, 10], [3, 5]) - with pytest.raises(ValueError): + with pytest.raises(ValueError, message=msg): Index([Interval(0, 1), Interval(2, 3, closed='left')]) # no point in nesting periods in an IntervalIndex - with pytest.raises(ValueError): + msg = 'Period dtypes are not supported, use a PeriodIndex instead' + with pytest.raises(ValueError, message=msg): IntervalIndex.from_breaks( pd.period_range('2000-01-01', periods=3)) - def test_constructors_datetimelike(self): + # decreasing breaks/arrays + msg = 'left side of interval must be <= right side' + with pytest.raises(ValueError, message=msg): + IntervalIndex.from_breaks(range(10, -1, -1)) + + with pytest.raises(ValueError, message=msg): + IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1)) + + def test_constructors_datetimelike(self, closed): # DTI / TDI for idx in [pd.date_range('20130101', periods=5), pd.timedelta_range('1 day', periods=5)]: - result = IntervalIndex.from_breaks(idx) - expected = IntervalIndex.from_breaks(idx.values) + result = IntervalIndex.from_breaks(idx, closed=closed) + expected = IntervalIndex.from_breaks(idx.values, closed=closed) tm.assert_index_equal(result, expected) expected_scalar_type = type(idx[0]) @@ -117,8 +166,8 @@ def f(): IntervalIndex.from_intervals([0.997, 4.0]) pytest.raises(TypeError, f) - def test_properties(self): - index = self.index + def test_properties(self, closed): + index = self.create_index(closed=closed) assert len(index) == 2 assert index.size == 2 assert index.shape == (2, ) @@ -127,14 +176,15 @@ def test_properties(self): tm.assert_index_equal(index.right, Index([1, 2])) tm.assert_index_equal(index.mid, Index([0.5, 1.5])) - assert index.closed == 'right' + assert index.closed == closed - expected = np.array([Interval(0, 1), Interval(1, 2)], dtype=object) + expected = np.array([Interval(0, 1, closed=closed), + Interval(1, 2, closed=closed)], dtype=object) tm.assert_numpy_array_equal(np.asarray(index), expected) tm.assert_numpy_array_equal(index.values, expected) # with nans - index = self.index_with_nan + index = self.create_index_with_nan(closed=closed) assert len(index) == 3 assert index.size == 3 assert index.shape == (3, ) @@ -143,41 +193,43 @@ def test_properties(self): tm.assert_index_equal(index.right, Index([1, np.nan, 2])) tm.assert_index_equal(index.mid, Index([0.5, np.nan, 1.5])) - assert index.closed == 'right' + assert index.closed == closed - expected = np.array([Interval(0, 1), np.nan, - Interval(1, 2)], dtype=object) + expected = np.array([Interval(0, 1, closed=closed), np.nan, + Interval(1, 2, closed=closed)], dtype=object) tm.assert_numpy_array_equal(np.asarray(index), expected) tm.assert_numpy_array_equal(index.values, expected) - def test_with_nans(self): - index = self.index + def test_with_nans(self, closed): + index = self.create_index(closed=closed) assert not index.hasnans tm.assert_numpy_array_equal(index.isna(), np.array([False, False])) tm.assert_numpy_array_equal(index.notna(), np.array([True, True])) - index = self.index_with_nan + index = self.create_index_with_nan(closed=closed) assert index.hasnans tm.assert_numpy_array_equal(index.notna(), np.array([True, False, True])) tm.assert_numpy_array_equal(index.isna(), np.array([False, True, False])) - def test_copy(self): - actual = self.index.copy() - assert actual.equals(self.index) + def test_copy(self, closed): + expected = IntervalIndex.from_breaks(np.arange(5), closed=closed) + + result = expected.copy() + assert result.equals(expected) - actual = self.index.copy(deep=True) - assert actual.equals(self.index) - assert actual.left is not self.index.left + result = expected.copy(deep=True) + assert result.equals(expected) + assert result.left is not expected.left - def test_ensure_copied_data(self): + def test_ensure_copied_data(self, closed): # exercise the copy flag in the constructor # not copying - index = self.index + index = self.create_index(closed=closed) result = IntervalIndex(index, copy=False) tm.assert_numpy_array_equal(index.left.values, result.left.values, check_same='same') @@ -191,23 +243,34 @@ def test_ensure_copied_data(self): tm.assert_numpy_array_equal(index.right.values, result.right.values, check_same='copy') - def test_equals(self): + def test_equals(self, closed): + expected = IntervalIndex.from_breaks(np.arange(5), closed=closed) + assert expected.equals(expected) + assert expected.equals(expected.copy()) - idx = self.index - assert idx.equals(idx) - assert idx.equals(idx.copy()) + assert not expected.equals(expected.astype(object)) + assert not expected.equals(np.array(expected)) + assert not expected.equals(list(expected)) - assert not idx.equals(idx.astype(object)) - assert not idx.equals(np.array(idx)) - assert not idx.equals(list(idx)) + assert not expected.equals([1, 2]) + assert not expected.equals(np.array([1, 2])) + assert not expected.equals(pd.date_range('20130101', periods=2)) - assert not idx.equals([1, 2]) - assert not idx.equals(np.array([1, 2])) - assert not idx.equals(pd.date_range('20130101', periods=2)) + expected_name1 = IntervalIndex.from_breaks( + np.arange(5), closed=closed, name='foo') + expected_name2 = IntervalIndex.from_breaks( + np.arange(5), closed=closed, name='bar') + assert expected.equals(expected_name1) + assert expected_name1.equals(expected_name2) - def test_astype(self): + for other_closed in {'left', 'right', 'both', 'neither'} - {closed}: + expected_other_closed = IntervalIndex.from_breaks( + np.arange(5), closed=other_closed) + assert not expected.equals(expected_other_closed) - idx = self.index + def test_astype(self, closed): + + idx = self.create_index(closed=closed) for dtype in [np.int64, np.float64, 'datetime64[ns]', 'datetime64[ns, US/Eastern]', 'timedelta64', @@ -227,24 +290,24 @@ def test_astype(self): expected = pd.Categorical(idx, ordered=True) tm.assert_categorical_equal(result, expected) - def test_where(self): - expected = self.index - result = self.index.where(self.index.notna()) + def test_where(self, closed): + expected = self.create_index(closed=closed) + result = expected.where(expected.notna()) tm.assert_index_equal(result, expected) - idx = IntervalIndex.from_breaks([1, 2]) + idx = IntervalIndex.from_breaks([1, 2], closed=closed) result = idx.where([True, False]) expected = IntervalIndex.from_intervals( - [Interval(1.0, 2.0, closed='right'), np.nan]) + [Interval(1.0, 2.0, closed=closed), np.nan]) tm.assert_index_equal(result, expected) def test_where_array_like(self): pass - def test_delete(self): - expected = IntervalIndex.from_breaks([1, 2]) - actual = self.index.delete(0) - assert expected.equals(actual) + def test_delete(self, closed): + expected = IntervalIndex.from_breaks([1, 2], closed=closed) + result = self.create_index(closed=closed).delete(0) + tm.assert_index_equal(result, expected) def test_insert(self): expected = IntervalIndex.from_breaks(range(4)) @@ -255,113 +318,128 @@ def test_insert(self): pytest.raises(ValueError, self.index.insert, 0, Interval(2, 3, closed='left')) - def test_take(self): - actual = self.index.take([0, 1]) - assert self.index.equals(actual) + def test_take(self, closed): + index = self.create_index(closed=closed) - expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2]) - actual = self.index.take([0, 0, 1]) - assert expected.equals(actual) + actual = index.take([0, 1]) + tm.assert_index_equal(actual, index) + + expected = IntervalIndex.from_arrays( + [0, 0, 1], [1, 1, 2], closed=closed) + actual = index.take([0, 0, 1]) + tm.assert_index_equal(actual, expected) - def test_unique(self): + def test_unique(self, closed): # unique non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) + idx = IntervalIndex.from_tuples( + [(0, 1), (2, 3), (4, 5)], closed=closed) assert idx.is_unique # unique overlapping - distinct endpoints - idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)]) + idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed) assert idx.is_unique # unique overlapping - shared endpoints - idx = pd.IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)]) + idx = pd.IntervalIndex.from_tuples( + [(1, 2), (1, 3), (2, 3)], closed=closed) assert idx.is_unique # unique nested - idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)]) + idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed) assert idx.is_unique # duplicate - idx = IntervalIndex.from_tuples([(0, 1), (0, 1), (2, 3)]) + idx = IntervalIndex.from_tuples( + [(0, 1), (0, 1), (2, 3)], closed=closed) assert not idx.is_unique # unique mixed - idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')]) + idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed) assert idx.is_unique # duplicate mixed - idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b'), (0, 1)]) + idx = IntervalIndex.from_tuples( + [(0, 1), ('a', 'b'), (0, 1)], closed=closed) assert not idx.is_unique # empty - idx = IntervalIndex([]) + idx = IntervalIndex([], closed=closed) assert idx.is_unique - def test_monotonic(self): + def test_monotonic(self, closed): # increasing non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)]) + idx = IntervalIndex.from_tuples( + [(0, 1), (2, 3), (4, 5)], closed=closed) assert idx.is_monotonic assert idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # decreasing non-overlapping - idx = IntervalIndex.from_tuples([(4, 5), (2, 3), (1, 2)]) + idx = IntervalIndex.from_tuples( + [(4, 5), (2, 3), (1, 2)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing assert idx._is_strictly_monotonic_decreasing # unordered non-overlapping - idx = IntervalIndex.from_tuples([(0, 1), (4, 5), (2, 3)]) + idx = IntervalIndex.from_tuples( + [(0, 1), (4, 5), (2, 3)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # increasing overlapping - idx = IntervalIndex.from_tuples([(0, 2), (0.5, 2.5), (1, 3)]) + idx = IntervalIndex.from_tuples( + [(0, 2), (0.5, 2.5), (1, 3)], closed=closed) assert idx.is_monotonic assert idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # decreasing overlapping - idx = IntervalIndex.from_tuples([(1, 3), (0.5, 2.5), (0, 2)]) + idx = IntervalIndex.from_tuples( + [(1, 3), (0.5, 2.5), (0, 2)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing assert idx._is_strictly_monotonic_decreasing # unordered overlapping - idx = IntervalIndex.from_tuples([(0.5, 2.5), (0, 2), (1, 3)]) + idx = IntervalIndex.from_tuples( + [(0.5, 2.5), (0, 2), (1, 3)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # increasing overlapping shared endpoints - idx = pd.IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)]) + idx = pd.IntervalIndex.from_tuples( + [(1, 2), (1, 3), (2, 3)], closed=closed) assert idx.is_monotonic assert idx._is_strictly_monotonic_increasing assert not idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # decreasing overlapping shared endpoints - idx = pd.IntervalIndex.from_tuples([(2, 3), (1, 3), (1, 2)]) + idx = pd.IntervalIndex.from_tuples( + [(2, 3), (1, 3), (1, 2)], closed=closed) assert not idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing assert idx._is_strictly_monotonic_decreasing # stationary - idx = IntervalIndex.from_tuples([(0, 1), (0, 1)]) + idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed) assert idx.is_monotonic assert not idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing assert not idx._is_strictly_monotonic_decreasing # empty - idx = IntervalIndex([]) + idx = IntervalIndex([], closed=closed) assert idx.is_monotonic assert idx._is_strictly_monotonic_increasing assert idx.is_monotonic_decreasing @@ -395,24 +473,24 @@ def test_repr_max_seq_item_setting(self): def test_repr_roundtrip(self): super(TestIntervalIndex, self).test_repr_roundtrip() - def test_get_item(self): + def test_get_item(self, closed): i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan), - closed='right') - assert i[0] == Interval(0.0, 1.0) - assert i[1] == Interval(1.0, 2.0) + closed=closed) + assert i[0] == Interval(0.0, 1.0, closed=closed) + assert i[1] == Interval(1.0, 2.0, closed=closed) assert isna(i[2]) result = i[0:1] - expected = IntervalIndex.from_arrays((0.,), (1.,), closed='right') + expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed) tm.assert_index_equal(result, expected) result = i[0:2] - expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed='right') + expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed) tm.assert_index_equal(result, expected) result = i[1:3] expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan), - closed='right') + closed=closed) tm.assert_index_equal(result, expected) def test_get_loc_value(self): @@ -581,20 +659,22 @@ def testcontains(self): assert not i.contains(20) assert not i.contains(-20) - def test_dropna(self): + def test_dropna(self, closed): - expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)]) + expected = IntervalIndex.from_tuples( + [(0.0, 1.0), (1.0, 2.0)], closed=closed) - ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan]) + ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed) result = ii.dropna() tm.assert_index_equal(result, expected) - ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan]) + ii = IntervalIndex.from_arrays( + [0, 1, np.nan], [1, 2, np.nan], closed=closed) result = ii.dropna() tm.assert_index_equal(result, expected) - def test_non_contiguous(self): - index = IntervalIndex.from_tuples([(0, 1), (2, 3)]) + def test_non_contiguous(self, closed): + index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed) target = [0.5, 1.5, 2.5] actual = index.get_indexer(target) expected = np.array([0, -1, 1], dtype='intp') @@ -602,31 +682,32 @@ def test_non_contiguous(self): assert 1.5 not in index - def test_union(self): - other = IntervalIndex.from_arrays([2], [3]) - expected = IntervalIndex.from_arrays(range(3), range(1, 4)) - actual = self.index.union(other) + def test_union(self, closed): + idx = self.create_index(closed=closed) + other = IntervalIndex.from_arrays([2], [3], closed=closed) + expected = IntervalIndex.from_arrays( + range(3), range(1, 4), closed=closed) + actual = idx.union(other) assert expected.equals(actual) - actual = other.union(self.index) + actual = other.union(idx) assert expected.equals(actual) - tm.assert_index_equal(self.index.union(self.index), self.index) - tm.assert_index_equal(self.index.union(self.index[:1]), - self.index) + tm.assert_index_equal(idx.union(idx), idx) + tm.assert_index_equal(idx.union(idx[:1]), idx) - def test_intersection(self): - other = IntervalIndex.from_breaks([1, 2, 3]) - expected = IntervalIndex.from_breaks([1, 2]) - actual = self.index.intersection(other) + def test_intersection(self, closed): + idx = self.create_index(closed=closed) + other = IntervalIndex.from_breaks([1, 2, 3], closed=closed) + expected = IntervalIndex.from_breaks([1, 2], closed=closed) + actual = idx.intersection(other) assert expected.equals(actual) - tm.assert_index_equal(self.index.intersection(self.index), - self.index) + tm.assert_index_equal(idx.intersection(idx), idx) - def test_difference(self): - tm.assert_index_equal(self.index.difference(self.index[:1]), - self.index[1:]) + def test_difference(self, closed): + idx = self.create_index(closed=closed) + tm.assert_index_equal(idx.difference(idx[:1]), idx[1:]) def test_symmetric_difference(self): result = self.index[:1].symmetric_difference(self.index[1:]) @@ -639,11 +720,12 @@ def test_set_operation_errors(self): other = IntervalIndex.from_breaks([0, 1, 2], closed='neither') pytest.raises(ValueError, self.index.union, other) - def test_isin(self): - actual = self.index.isin(self.index) + def test_isin(self, closed): + idx = self.create_index(closed=closed) + actual = idx.isin(idx) tm.assert_numpy_array_equal(np.array([True, True]), actual) - actual = self.index.isin(self.index[:1]) + actual = idx.isin(idx[:1]) tm.assert_numpy_array_equal(np.array([True, False]), actual) def test_comparison(self): @@ -702,25 +784,28 @@ def test_comparison(self): with pytest.raises(ValueError): self.index > np.arange(3) - def test_missing_values(self): - idx = pd.Index([np.nan, pd.Interval(0, 1), pd.Interval(1, 2)]) - idx2 = pd.IntervalIndex.from_arrays([np.nan, 0, 1], [np.nan, 1, 2]) + def test_missing_values(self, closed): + idx = Index([np.nan, Interval(0, 1, closed=closed), + Interval(1, 2, closed=closed)]) + idx2 = IntervalIndex.from_arrays( + [np.nan, 0, 1], [np.nan, 1, 2], closed=closed) assert idx.equals(idx2) with pytest.raises(ValueError): - IntervalIndex.from_arrays([np.nan, 0, 1], np.array([0, 1, 2])) + IntervalIndex.from_arrays( + [np.nan, 0, 1], np.array([0, 1, 2]), closed=closed) tm.assert_numpy_array_equal(isna(idx), np.array([True, False, False])) - def test_sort_values(self): - expected = IntervalIndex.from_breaks([1, 2, 3, 4]) - actual = IntervalIndex.from_tuples([(3, 4), (1, 2), - (2, 3)]).sort_values() + def test_sort_values(self, closed): + expected = IntervalIndex.from_breaks([1, 2, 3, 4], closed=closed) + actual = IntervalIndex.from_tuples( + [(3, 4), (1, 2), (2, 3)], closed=closed).sort_values() tm.assert_index_equal(expected, actual) # nan - idx = self.index_with_nan + idx = self.create_index_with_nan(closed=closed) mask = idx.isna() tm.assert_numpy_array_equal(mask, np.array([False, True, False])) @@ -733,84 +818,83 @@ def test_sort_values(self): tm.assert_numpy_array_equal(mask, np.array([True, False, False])) def test_datetime(self): - dates = pd.date_range('2000', periods=3) + dates = date_range('2000', periods=3) idx = IntervalIndex.from_breaks(dates) tm.assert_index_equal(idx.left, dates[:2]) tm.assert_index_equal(idx.right, dates[-2:]) - expected = pd.date_range('2000-01-01T12:00', periods=2) + expected = date_range('2000-01-01T12:00', periods=2) tm.assert_index_equal(idx.mid, expected) - assert pd.Timestamp('2000-01-01T12') not in idx - assert pd.Timestamp('2000-01-01T12') not in idx + assert Timestamp('2000-01-01T12') not in idx + assert Timestamp('2000-01-01T12') not in idx - target = pd.date_range('1999-12-31T12:00', periods=7, freq='12H') + target = date_range('1999-12-31T12:00', periods=7, freq='12H') actual = idx.get_indexer(target) expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp') tm.assert_numpy_array_equal(actual, expected) - def test_append(self): + def test_append(self, closed): - index1 = IntervalIndex.from_arrays([0, 1], [1, 2]) - index2 = IntervalIndex.from_arrays([1, 2], [2, 3]) + index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed) + index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed) result = index1.append(index2) - expected = IntervalIndex.from_arrays([0, 1, 1, 2], [1, 2, 2, 3]) + expected = IntervalIndex.from_arrays( + [0, 1, 1, 2], [1, 2, 2, 3], closed=closed) tm.assert_index_equal(result, expected) result = index1.append([index1, index2]) - expected = IntervalIndex.from_arrays([0, 1, 0, 1, 1, 2], - [1, 2, 1, 2, 2, 3]) + expected = IntervalIndex.from_arrays( + [0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed) tm.assert_index_equal(result, expected) - def f(): - index1.append(IntervalIndex.from_arrays([0, 1], [1, 2], - closed='both')) - - pytest.raises(ValueError, f) + msg = ('can only append two IntervalIndex objects that are closed ' + 'on the same side') + for other_closed in {'left', 'right', 'both', 'neither'} - {closed}: + index_other_closed = IntervalIndex.from_arrays( + [0, 1], [1, 2], closed=other_closed) + with tm.assert_raises_regex(ValueError, msg): + index1.append(index_other_closed) - def test_is_non_overlapping_monotonic(self): + def test_is_non_overlapping_monotonic(self, closed): # Should be True in all cases tpls = [(0, 1), (2, 3), (4, 5), (6, 7)] - for closed in ('left', 'right', 'neither', 'both'): - idx = IntervalIndex.from_tuples(tpls, closed=closed) - assert idx.is_non_overlapping_monotonic is True + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is True - idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) - assert idx.is_non_overlapping_monotonic is True + idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) + assert idx.is_non_overlapping_monotonic is True # Should be False in all cases (overlapping) tpls = [(0, 2), (1, 3), (4, 5), (6, 7)] - for closed in ('left', 'right', 'neither', 'both'): - idx = IntervalIndex.from_tuples(tpls, closed=closed) - assert idx.is_non_overlapping_monotonic is False + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is False - idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) - assert idx.is_non_overlapping_monotonic is False + idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) + assert idx.is_non_overlapping_monotonic is False # Should be False in all cases (non-monotonic) tpls = [(0, 1), (2, 3), (6, 7), (4, 5)] - for closed in ('left', 'right', 'neither', 'both'): - idx = IntervalIndex.from_tuples(tpls, closed=closed) - assert idx.is_non_overlapping_monotonic is False - - idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) - assert idx.is_non_overlapping_monotonic is False + idx = IntervalIndex.from_tuples(tpls, closed=closed) + assert idx.is_non_overlapping_monotonic is False - # Should be False for closed='both', overwise True (GH16560) - idx = IntervalIndex.from_breaks(range(4), closed='both') + idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed) assert idx.is_non_overlapping_monotonic is False - for closed in ('left', 'right', 'neither'): + # Should be False for closed='both', overwise True (GH16560) + if closed == 'both': + idx = IntervalIndex.from_breaks(range(4), closed=closed) + assert idx.is_non_overlapping_monotonic is False + else: idx = IntervalIndex.from_breaks(range(4), closed=closed) assert idx.is_non_overlapping_monotonic is True class TestIntervalRange(object): - @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both']) def test_construction_from_numeric(self, closed): # combinations of start/end/periods without freq expected = IntervalIndex.from_breaks( @@ -848,7 +932,6 @@ def test_construction_from_numeric(self, closed): closed=closed) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both']) def test_construction_from_timestamp(self, closed): # combinations of start/end/periods without freq start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06') @@ -915,7 +998,6 @@ def test_construction_from_timestamp(self, closed): closed=closed) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both']) def test_construction_from_timedelta(self, closed): # combinations of start/end/periods without freq start, end = Timedelta('1 day'), Timedelta('6 days') diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 18bfc3d0efbee..c9c4029786c64 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -2980,3 +2980,13 @@ def test_nan_stays_float(self): assert pd.isna(df0.index.get_level_values(1)).all() # the following failed in 0.14.1 assert pd.isna(dfm.index.get_level_values(1)[:-1]).all() + + def test_million_record_attribute_error(self): + # GH 18165 + r = list(range(1000000)) + df = pd.DataFrame({'a': r, 'b': r}, + index=pd.MultiIndex.from_tuples([(x, x) for x in r])) + + with tm.assert_raises_regex(AttributeError, + "'Series' object has no attribute 'foo'"): + df['a'].foo() diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index f4f669ee1d087..3cf56dc5115c2 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -1282,3 +1282,23 @@ def test_add_overflow(self): result = (to_timedelta([pd.NaT, '5 days', '1 hours']) + to_timedelta(['7 seconds', pd.NaT, '4 hours'])) tm.assert_index_equal(result, exp) + + def test_timedeltaindex_add_timestamp_nat_masking(self): + # GH17991 checking for overflow-masking with NaT + tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT']) + + tsneg = Timestamp('1950-01-01') + ts_neg_variants = [tsneg, + tsneg.to_pydatetime(), + tsneg.to_datetime64().astype('datetime64[ns]'), + tsneg.to_datetime64().astype('datetime64[D]')] + + tspos = Timestamp('1980-01-01') + ts_pos_variants = [tspos, + tspos.to_pydatetime(), + tspos.to_datetime64().astype('datetime64[ns]'), + tspos.to_datetime64().astype('datetime64[D]')] + + for variant in ts_neg_variants + ts_pos_variants: + res = tdinat + variant + assert res[1] is pd.NaT diff --git a/pandas/tests/indexing/test_timedelta.py b/pandas/tests/indexing/test_timedelta.py index 32609362e49af..3ad3b771b2ab2 100644 --- a/pandas/tests/indexing/test_timedelta.py +++ b/pandas/tests/indexing/test_timedelta.py @@ -2,6 +2,7 @@ import pandas as pd from pandas.util import testing as tm +import numpy as np class TestTimedeltaIndexing(object): @@ -47,3 +48,23 @@ def test_string_indexing(self): expected = df.iloc[0] sliced = df.loc['0 days'] tm.assert_series_equal(sliced, expected) + + @pytest.mark.parametrize( + "value", + [None, pd.NaT, np.nan]) + def test_masked_setitem(self, value): + # issue (#18586) + series = pd.Series([0, 1, 2], dtype='timedelta64[ns]') + series[series == series[0]] = value + expected = pd.Series([pd.NaT, 1, 2], dtype='timedelta64[ns]') + tm.assert_series_equal(series, expected) + + @pytest.mark.parametrize( + "value", + [None, pd.NaT, np.nan]) + def test_listlike_setitem(self, value): + # issue (#18586) + series = pd.Series([0, 1, 2], dtype='timedelta64[ns]') + series.iloc[0] = value + expected = pd.Series([pd.NaT, 1, 2], dtype='timedelta64[ns]') + tm.assert_series_equal(series, expected) diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index c182db35c0c89..4e59779cb9b47 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1245,7 +1245,9 @@ class TestCanHoldElement(object): @pytest.mark.parametrize('value, dtype', [ (1, 'i8'), (1.0, 'f8'), + (2**63, 'f8'), (1j, 'complex128'), + (2**63, 'complex128'), (True, 'bool'), (np.timedelta64(20, 'ns'), '<m8[ns]'), (np.datetime64(20, 'ns'), '<M8[ns]'), diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py new file mode 100644 index 0000000000000..828d5d0ccd3c6 --- /dev/null +++ b/pandas/tests/io/conftest.py @@ -0,0 +1,74 @@ +import os + +import moto +import pytest +from pandas.io.parsers import read_table + +HERE = os.path.dirname(__file__) + + +@pytest.fixture(scope='module') +def tips_file(): + """Path to the tips dataset""" + return os.path.join(HERE, 'parser', 'data', 'tips.csv') + + +@pytest.fixture(scope='module') +def jsonl_file(): + """Path a JSONL dataset""" + return os.path.join(HERE, 'parser', 'data', 'items.jsonl') + + +@pytest.fixture(scope='module') +def salaries_table(): + """DataFrame with the salaries dataset""" + path = os.path.join(HERE, 'parser', 'data', 'salaries.csv') + return read_table(path) + + +@pytest.fixture(scope='module') +def s3_resource(tips_file, jsonl_file): + """Fixture for mocking S3 interaction. + + The primary bucket name is "pandas-test". The following datasets + are loaded. + + - tips.csv + - tips.csv.gz + - tips.csv.bz2 + - items.jsonl + + A private bucket "cant_get_it" is also created. The boto3 s3 resource + is yielded by the fixture. + """ + pytest.importorskip('s3fs') + moto.mock_s3().start() + + test_s3_files = [ + ('tips.csv', tips_file), + ('tips.csv.gz', tips_file + '.gz'), + ('tips.csv.bz2', tips_file + '.bz2'), + ('items.jsonl', jsonl_file), + ] + + def add_tips_files(bucket_name): + for s3_key, file_name in test_s3_files: + with open(file_name, 'rb') as f: + conn.Bucket(bucket_name).put_object( + Key=s3_key, + Body=f) + + boto3 = pytest.importorskip('boto3') + # see gh-16135 + bucket = 'pandas-test' + + conn = boto3.resource("s3", region_name="us-east-1") + conn.create_bucket(Bucket=bucket) + add_tips_files(bucket) + + conn.create_bucket(Bucket='cant_get_it', ACL='private') + add_tips_files('cant_get_it') + + yield conn + + moto.mock_s3().stop() diff --git a/pandas/tests/io/data/stata13_dates.dta b/pandas/tests/io/data/stata13_dates.dta new file mode 100644 index 0000000000000..87b857559e501 Binary files /dev/null and b/pandas/tests/io/data/stata13_dates.dta differ diff --git a/pandas/tests/io/formats/test_to_latex.py b/pandas/tests/io/formats/test_to_latex.py index aa86d1d9231fb..5504ac942f688 100644 --- a/pandas/tests/io/formats/test_to_latex.py +++ b/pandas/tests/io/formats/test_to_latex.py @@ -91,6 +91,29 @@ def test_to_latex_format(self, frame): assert withindex_result == withindex_expected + def test_to_latex_empty(self): + df = DataFrame() + result = df.to_latex() + expected = r"""\begin{tabular}{l} +\toprule +Empty DataFrame +Columns: Index([], dtype='object') +Index: Index([], dtype='object') \\ +\bottomrule +\end{tabular} +""" + assert result == expected + + result = df.to_latex(longtable=True) + expected = r"""\begin{longtable}{l} +\toprule +Empty DataFrame +Columns: Index([], dtype='object') +Index: Index([], dtype='object') \\ +\end{longtable} +""" + assert result == expected + def test_to_latex_with_formatters(self): df = DataFrame({'int': [1, 2, 3], 'float': [1.0, 2.0, 3.0], @@ -221,6 +244,28 @@ def test_to_latex_multiindex(self): assert result == expected + def test_to_latex_multiindex_dupe_level(self): + # see gh-14484 + # + # If an index is repeated in subsequent rows, it should be + # replaced with a blank in the created table. This should + # ONLY happen if all higher order indices (to the left) are + # equal too. In this test, 'c' has to be printed both times + # because the higher order index 'A' != 'B'. + df = pd.DataFrame(index=pd.MultiIndex.from_tuples( + [('A', 'c'), ('B', 'c')]), columns=['col']) + result = df.to_latex() + expected = r"""\begin{tabular}{lll} +\toprule + & & col \\ +\midrule +A & c & NaN \\ +B & c & NaN \\ +\bottomrule +\end{tabular} +""" + assert result == expected + def test_to_latex_multicolumnrow(self): df = pd.DataFrame({ ('c1', 0): dict((x, x) for x in range(5)), @@ -355,7 +400,7 @@ def test_to_latex_longtable(self, frame): 1 & 2 & b2 \\ \end{longtable} """ - + open("expected.txt", "w").write(withindex_result) assert withindex_result == withindex_expected withoutindex_result = df.to_latex(index=False, longtable=True) @@ -365,7 +410,7 @@ def test_to_latex_longtable(self, frame): \midrule \endhead \midrule -\multicolumn{3}{r}{{Continued on next page}} \\ +\multicolumn{2}{r}{{Continued on next page}} \\ \midrule \endfoot @@ -378,6 +423,14 @@ def test_to_latex_longtable(self, frame): assert withoutindex_result == withoutindex_expected + df = DataFrame({'a': [1, 2]}) + with1column_result = df.to_latex(index=False, longtable=True) + assert "\multicolumn{1}" in with1column_result + + df = DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [5, 6]}) + with3columns_result = df.to_latex(index=False, longtable=True) + assert "\multicolumn{3}" in with3columns_result + def test_to_latex_escape_special_chars(self): special_characters = ['&', '%', '$', '#', '_', '{', '}', '~', '^', '\\'] diff --git a/pandas/tests/io/json/test_normalize.py b/pandas/tests/io/json/test_normalize.py index 49b765b18d623..1cceae32cd748 100644 --- a/pandas/tests/io/json/test_normalize.py +++ b/pandas/tests/io/json/test_normalize.py @@ -173,6 +173,21 @@ def test_meta_name_conflict(self): for val in ['metafoo', 'metabar', 'foo', 'bar']: assert val in result + def test_meta_parameter_not_modified(self): + # GH 18610 + data = [{'foo': 'hello', + 'bar': 'there', + 'data': [{'foo': 'something', 'bar': 'else'}, + {'foo': 'something2', 'bar': 'else2'}]}] + + COLUMNS = ['foo', 'bar'] + result = json_normalize(data, 'data', meta=COLUMNS, + meta_prefix='meta') + + assert COLUMNS == ['foo', 'bar'] + for val in ['metafoo', 'metabar', 'foo', 'bar']: + assert val in result + def test_record_prefix(self, state_data): result = json_normalize(state_data[0], 'counties') expected = DataFrame(state_data[0]['counties']) diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index 6625446bea469..78e33f8966d1f 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -4,7 +4,6 @@ from pandas.compat import (range, lrange, StringIO, OrderedDict, is_platform_32bit) import os - import numpy as np from pandas import (Series, DataFrame, DatetimeIndex, Timestamp, read_json, compat) @@ -1030,6 +1029,70 @@ def test_tz_range_is_utc(self): df = DataFrame({'DT': dti}) assert dumps(df, iso_dates=True) == dfexp + def test_read_inline_jsonl(self): + # GH9180 + result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_read_s3_jsonl(self, s3_resource): + pytest.importorskip('s3fs') + # GH17200 + + result = read_json('s3n://pandas-test/items.jsonl', lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_read_local_jsonl(self): + # GH17200 + with ensure_clean('tmp_items.json') as path: + with open(path, 'w') as infile: + infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n') + result = read_json(path, lines=True) + expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_read_jsonl_unicode_chars(self): + # GH15132: non-ascii unicode characters + # \u201d == RIGHT DOUBLE QUOTATION MARK + + # simulate file handle + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + json = StringIO(json) + result = read_json(json, lines=True) + expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]], + columns=['a', 'b']) + assert_frame_equal(result, expected) + + # simulate string + json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n' + result = read_json(json, lines=True) + expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]], + columns=['a', 'b']) + assert_frame_equal(result, expected) + + def test_to_jsonl(self): + # GH9180 + df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b']) + result = df.to_json(orient="records", lines=True) + expected = '{"a":1,"b":2}\n{"a":1,"b":2}' + assert result == expected + + df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b']) + result = df.to_json(orient="records", lines=True) + expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}' + assert result == expected + assert_frame_equal(pd.read_json(result, lines=True), df) + + # GH15096: escaped characters in columns and data + df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], + columns=["a\\", 'b']) + result = df.to_json(orient="records", lines=True) + expected = ('{"a\\\\":"foo\\\\","b":"bar"}\n' + '{"a\\\\":"foo\\"","b":"bar"}') + assert result == expected + assert_frame_equal(pd.read_json(result, lines=True), df) + def test_latin_encoding(self): if compat.PY2: tm.assert_raises_regex( diff --git a/pandas/tests/io/parser/c_parser_only.py b/pandas/tests/io/parser/c_parser_only.py index c68b2bf064d97..6d476e326213e 100644 --- a/pandas/tests/io/parser/c_parser_only.py +++ b/pandas/tests/io/parser/c_parser_only.py @@ -290,11 +290,11 @@ def test_empty_header_read(count): test_empty_header_read(count) def test_parse_trim_buffers(self): - # This test is part of a bugfix for issue #13703. It attmepts to + # This test is part of a bugfix for issue #13703. It attempts to # to stress the system memory allocator, to cause it to move the # stream buffer and either let the OS reclaim the region, or let # other memory requests of parser otherwise modify the contents - # of memory space, where it was formely located. + # of memory space, where it was formally located. # This test is designed to cause a `segfault` with unpatched # `tokenizer.c`. Sometimes the test fails on `segfault`, other # times it fails due to memory corruption, which causes the @@ -346,7 +346,7 @@ def test_parse_trim_buffers(self): # Generate the expected output: manually create the dataframe # by splitting by comma and repeating the `n_lines` times. - row = tuple(val_ if val_ else float("nan") + row = tuple(val_ if val_ else np.nan for val_ in record_.split(",")) expected = pd.DataFrame([row for _ in range(n_lines)], dtype=object, columns=None, index=None) @@ -359,6 +359,15 @@ def test_parse_trim_buffers(self): # Check for data corruption if there was no segfault tm.assert_frame_equal(result, expected) + # This extra test was added to replicate the fault in gh-5291. + # Force 'utf-8' encoding, so that `_string_convert` would take + # a different execution branch. + chunks_ = self.read_csv(StringIO(csv_data), header=None, + dtype=object, chunksize=chunksize, + encoding='utf_8') + result = pd.concat(chunks_, axis=0, ignore_index=True) + tm.assert_frame_equal(result, expected) + def test_internal_null_byte(self): # see gh-14012 # diff --git a/pandas/tests/io/parser/common.py b/pandas/tests/io/parser/common.py index e85d3ad294655..6a996213b28bb 100644 --- a/pandas/tests/io/parser/common.py +++ b/pandas/tests/io/parser/common.py @@ -823,7 +823,7 @@ def test_parse_integers_above_fp_precision(self): 17007000002000192, 17007000002000194]}) - assert np.array_equal(result['Numbers'], expected['Numbers']) + tm.assert_series_equal(result['Numbers'], expected['Numbers']) def test_chunks_have_consistent_numerical_type(self): integers = [str(i) for i in range(499999)] diff --git a/pandas/tests/io/parser/compression.py b/pandas/tests/io/parser/compression.py index 797c12139656d..84db9d14eee07 100644 --- a/pandas/tests/io/parser/compression.py +++ b/pandas/tests/io/parser/compression.py @@ -7,6 +7,7 @@ import pytest +import pandas as pd import pandas.util.testing as tm @@ -157,6 +158,19 @@ def test_read_csv_infer_compression(self): inputs[3].close() + def test_read_csv_compressed_utf16_example(self): + # GH18071 + path = tm.get_data_path('utf16_ex_small.zip') + + result = self.read_csv(path, encoding='utf-16', + compression='zip', sep='\t') + expected = pd.DataFrame({ + u'Country': [u'Venezuela', u'Venezuela'], + u'Twitter': [u'Hugo Chávez Frías', u'Henrique Capriles R.'] + }) + + tm.assert_frame_equal(result, expected) + def test_invalid_compression(self): msg = 'Unrecognized compression type: sfark' with tm.assert_raises_regex(ValueError, msg): diff --git a/pandas/tests/io/parser/data/items.jsonl b/pandas/tests/io/parser/data/items.jsonl new file mode 100644 index 0000000000000..f784d37befa82 --- /dev/null +++ b/pandas/tests/io/parser/data/items.jsonl @@ -0,0 +1,2 @@ +{"a": 1, "b": 2} +{"b":2, "a" :1} diff --git a/pandas/tests/io/parser/data/utf16_ex_small.zip b/pandas/tests/io/parser/data/utf16_ex_small.zip new file mode 100644 index 0000000000000..b0560c1b1f6c4 Binary files /dev/null and b/pandas/tests/io/parser/data/utf16_ex_small.zip differ diff --git a/pandas/tests/io/parser/dtypes.py b/pandas/tests/io/parser/dtypes.py index 7d3df6201a390..b91ce04673e29 100644 --- a/pandas/tests/io/parser/dtypes.py +++ b/pandas/tests/io/parser/dtypes.py @@ -114,6 +114,17 @@ def test_categorical_dtype(self): actual = self.read_csv(StringIO(data), dtype='category') tm.assert_frame_equal(actual, expected) + @pytest.mark.slow + def test_categorical_dtype_high_cardinality_numeric(self): + # GH 18186 + data = np.sort([str(i) for i in range(524289)]) + expected = DataFrame({'a': Categorical(data, ordered=True)}) + actual = self.read_csv(StringIO('a\n' + '\n'.join(data)), + dtype='category') + actual["a"] = actual["a"].cat.reorder_categories( + np.sort(actual.a.cat.categories), ordered=True) + tm.assert_frame_equal(actual, expected) + def test_categorical_dtype_encoding(self): # GH 10153 pth = tm.get_data_path('unicode_series.csv') diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py index 7fbf174e19eee..8dc599b42ddc7 100644 --- a/pandas/tests/io/parser/na_values.py +++ b/pandas/tests/io/parser/na_values.py @@ -312,3 +312,21 @@ def test_empty_na_values_no_default_with_index(self): out = self.read_csv(StringIO(data), keep_default_na=False, index_col=0) tm.assert_frame_equal(out, expected) + + def test_no_na_filter_on_index(self): + # see gh-5239 + data = "a,b,c\n1,,3\n4,5,6" + + # Don't parse NA-values in index when na_filter=False. + out = self.read_csv(StringIO(data), index_col=[1], na_filter=False) + + expected = DataFrame({"a": [1, 4], "c": [3, 6]}, + index=Index(["", "5"], name="b")) + tm.assert_frame_equal(out, expected) + + # Parse NA-values in index when na_filter=True. + out = self.read_csv(StringIO(data), index_col=[1], na_filter=True) + + expected = DataFrame({"a": [1, 4], "c": [3, 6]}, + index=Index([np.nan, 5.0], name="b")) + tm.assert_frame_equal(out, expected) diff --git a/pandas/tests/io/parser/parse_dates.py b/pandas/tests/io/parser/parse_dates.py index 90103e7bf26b0..4c0f67fa6876a 100644 --- a/pandas/tests/io/parser/parse_dates.py +++ b/pandas/tests/io/parser/parse_dates.py @@ -656,3 +656,21 @@ def test_parse_date_column_with_empty_string(self): [621, ' ']] expected = DataFrame(expected_data, columns=['case', 'opdate']) tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("data,expected", [ + ("a\n135217135789158401\n1352171357E+5", + DataFrame({"a": [135217135789158401, + 135217135700000]}, dtype="float64")), + ("a\n99999999999\n123456789012345\n1234E+0", + DataFrame({"a": [99999999999, + 123456789012345, + 1234]}, dtype="float64")) + ]) + @pytest.mark.parametrize("parse_dates", [True, False]) + def test_parse_date_float(self, data, expected, parse_dates): + # see gh-2697 + # + # Date parsing should fail, so we leave the data untouched + # (i.e. float precision should remain unchanged). + result = self.read_csv(StringIO(data), parse_dates=parse_dates) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index 27cc708889fa2..d00d3f31ce189 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -4,10 +4,7 @@ Tests parsers ability to read and parse non-local files and hence require a network connection to be read. """ -import os - import pytest -import moto import pandas.util.testing as tm from pandas import DataFrame @@ -15,51 +12,6 @@ from pandas.compat import BytesIO -@pytest.fixture(scope='module') -def tips_file(): - return os.path.join(tm.get_data_path(), 'tips.csv') - - -@pytest.fixture(scope='module') -def salaries_table(): - path = os.path.join(tm.get_data_path(), 'salaries.csv') - return read_table(path) - - -@pytest.fixture(scope='module') -def s3_resource(tips_file): - pytest.importorskip('s3fs') - moto.mock_s3().start() - - test_s3_files = [ - ('tips.csv', tips_file), - ('tips.csv.gz', tips_file + '.gz'), - ('tips.csv.bz2', tips_file + '.bz2'), - ] - - def add_tips_files(bucket_name): - for s3_key, file_name in test_s3_files: - with open(file_name, 'rb') as f: - conn.Bucket(bucket_name).put_object( - Key=s3_key, - Body=f) - - boto3 = pytest.importorskip('boto3') - # see gh-16135 - bucket = 'pandas-test' - - conn = boto3.resource("s3", region_name="us-east-1") - conn.create_bucket(Bucket=bucket) - add_tips_files(bucket) - - conn.create_bucket(Bucket='cant_get_it', ACL='private') - add_tips_files('cant_get_it') - - yield conn - - moto.mock_s3().stop() - - @pytest.mark.network @pytest.mark.parametrize( "compression,extension", diff --git a/pandas/tests/io/parser/test_textreader.py b/pandas/tests/io/parser/test_textreader.py index c9088d2ecc5e7..f66f9ccf065f7 100644 --- a/pandas/tests/io/parser/test_textreader.py +++ b/pandas/tests/io/parser/test_textreader.py @@ -161,9 +161,9 @@ def test_skip_bad_lines(self): error_bad_lines=False, warn_bad_lines=False) result = reader.read() - expected = {0: ['a', 'd', 'g', 'l'], - 1: ['b', 'e', 'h', 'm'], - 2: ['c', 'f', 'i', 'n']} + expected = {0: np.array(['a', 'd', 'g', 'l'], dtype=object), + 1: np.array(['b', 'e', 'h', 'm'], dtype=object), + 2: np.array(['c', 'f', 'i', 'n'], dtype=object)} assert_array_dicts_equal(result, expected) reader = TextReader(StringIO(data), delimiter=':', @@ -189,8 +189,10 @@ def test_header_not_enough_lines(self): assert header == expected recs = reader.read() - expected = {0: [1, 4], 1: [2, 5], 2: [3, 6]} - assert_array_dicts_equal(expected, recs) + expected = {0: np.array([1, 4], dtype=np.int64), + 1: np.array([2, 5], dtype=np.int64), + 2: np.array([3, 6], dtype=np.int64)} + assert_array_dicts_equal(recs, expected) # not enough rows pytest.raises(parser.ParserError, TextReader, StringIO(data), @@ -203,14 +205,16 @@ def test_header_not_enough_lines_as_recarray(self): '1,2,3\n' '4,5,6') - reader = TextReader(StringIO(data), delimiter=',', header=2, - as_recarray=True) + reader = TextReader(StringIO(data), delimiter=',', + header=2, as_recarray=True) header = reader.header expected = [['a', 'b', 'c']] assert header == expected recs = reader.read() - expected = {'a': [1, 4], 'b': [2, 5], 'c': [3, 6]} + expected = {'a': np.array([1, 4], dtype=np.int64), + 'b': np.array([2, 5], dtype=np.int64), + 'c': np.array([3, 6], dtype=np.int64)} assert_array_dicts_equal(expected, recs) # not enough rows @@ -225,7 +229,7 @@ def test_escapechar(self): reader = TextReader(StringIO(data), delimiter=',', header=None, escapechar='\\') result = reader.read() - expected = {0: ['"hello world"'] * 3} + expected = {0: np.array(['"hello world"'] * 3, dtype=object)} assert_array_dicts_equal(result, expected) def test_eof_has_eol(self): @@ -360,7 +364,7 @@ def test_empty_field_eof(self): result = TextReader(StringIO(data), delimiter=',').read() - expected = {0: np.array([1, 4]), + expected = {0: np.array([1, 4], dtype=np.int64), 1: np.array(['2', ''], dtype=object), 2: np.array(['3', ''], dtype=object)} assert_array_dicts_equal(result, expected) @@ -397,4 +401,5 @@ def test_empty_csv_input(self): def assert_array_dicts_equal(left, right): for k, v in compat.iteritems(left): - assert(np.array_equal(v, right[k])) + assert tm.assert_numpy_array_equal(np.asarray(v), + np.asarray(right[k])) diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index 940a331a9de84..b5d1435c29cb7 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -18,7 +18,7 @@ try: DataFrame({'A': [1, 2]}).to_clipboard() _DEPS_INSTALLED = 1 -except PyperclipException: +except (PyperclipException, RuntimeError): _DEPS_INSTALLED = 0 diff --git a/pandas/tests/io/test_packers.py b/pandas/tests/io/test_packers.py index a28adcf1ee771..bc58ea1c7c228 100644 --- a/pandas/tests/io/test_packers.py +++ b/pandas/tests/io/test_packers.py @@ -180,6 +180,15 @@ def test_scalar_float(self): x_rec = self.encode_decode(x) tm.assert_almost_equal(x, x_rec) + def test_scalar_bool(self): + x = np.bool_(1) + x_rec = self.encode_decode(x) + tm.assert_almost_equal(x, x_rec) + + x = np.bool_(0) + x_rec = self.encode_decode(x) + tm.assert_almost_equal(x, x_rec) + def test_scalar_complex(self): x = np.random.rand() + 1j * np.random.rand() x_rec = self.encode_decode(x) @@ -263,7 +272,7 @@ def test_numpy_array_complex(self): x.dtype == x_rec.dtype) def test_list_mixed(self): - x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')] + x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo'), np.bool_(1)] x_rec = self.encode_decode(x) # current msgpack cannot distinguish list/tuple tm.assert_almost_equal(tuple(x), x_rec) @@ -401,6 +410,7 @@ def setup_method(self, method): 'G': [Timestamp('20130102', tz='US/Eastern')] * 5, 'H': Categorical([1, 2, 3, 4, 5]), 'I': Categorical([1, 2, 3, 4, 5], ordered=True), + 'J': (np.bool_(1), 2, 3, 4, 5), } self.d['float'] = Series(data['A']) @@ -410,6 +420,7 @@ def setup_method(self, method): self.d['dt_tz'] = Series(data['G']) self.d['cat_ordered'] = Series(data['H']) self.d['cat_unordered'] = Series(data['I']) + self.d['numpy_bool_mixed'] = Series(data['J']) def test_basic(self): diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index ecd4e8f719014..8c88cf076319b 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -105,7 +105,7 @@ def test_options_py(df_compat, pa): with pd.option_context('io.parquet.engine', 'pyarrow'): df.to_parquet(path) - result = read_parquet(path, compression=None) + result = read_parquet(path) tm.assert_frame_equal(result, df) @@ -118,7 +118,7 @@ def test_options_fp(df_compat, fp): with pd.option_context('io.parquet.engine', 'fastparquet'): df.to_parquet(path, compression=None) - result = read_parquet(path, compression=None) + result = read_parquet(path) tm.assert_frame_equal(result, df) @@ -130,7 +130,7 @@ def test_options_auto(df_compat, fp, pa): with pd.option_context('io.parquet.engine', 'auto'): df.to_parquet(path) - result = read_parquet(path, compression=None) + result = read_parquet(path) tm.assert_frame_equal(result, df) @@ -162,7 +162,7 @@ def test_cross_engine_pa_fp(df_cross_compat, pa, fp): with tm.ensure_clean() as path: df.to_parquet(path, engine=pa, compression=None) - result = read_parquet(path, engine=fp, compression=None) + result = read_parquet(path, engine=fp) tm.assert_frame_equal(result, df) @@ -174,37 +174,40 @@ def test_cross_engine_fp_pa(df_cross_compat, pa, fp): with tm.ensure_clean() as path: df.to_parquet(path, engine=fp, compression=None) - result = read_parquet(path, engine=pa, compression=None) + result = read_parquet(path, engine=pa) tm.assert_frame_equal(result, df) class Base(object): def check_error_on_write(self, df, engine, exc): - # check that we are raising the exception - # on writing - + # check that we are raising the exception on writing with pytest.raises(exc): with tm.ensure_clean() as path: to_parquet(df, path, engine, compression=None) - def check_round_trip(self, df, engine, expected=None, **kwargs): - + def check_round_trip(self, df, engine, expected=None, + write_kwargs=None, read_kwargs=None, + check_names=True): + if write_kwargs is None: + write_kwargs = {} + if read_kwargs is None: + read_kwargs = {} with tm.ensure_clean() as path: - df.to_parquet(path, engine, **kwargs) - result = read_parquet(path, engine) + df.to_parquet(path, engine, **write_kwargs) + result = read_parquet(path, engine, **read_kwargs) if expected is None: expected = df - tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected, check_names=check_names) # repeat - to_parquet(df, path, engine, **kwargs) - result = pd.read_parquet(path, engine) + to_parquet(df, path, engine, **write_kwargs) + result = pd.read_parquet(path, engine, **read_kwargs) if expected is None: expected = df - tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected, check_names=check_names) class TestBasic(Base): @@ -222,7 +225,7 @@ def test_columns_dtypes(self, engine): # unicode df.columns = [u'foo', u'bar'] - self.check_round_trip(df, engine, compression=None) + self.check_round_trip(df, engine, write_kwargs={'compression': None}) def test_columns_dtypes_invalid(self, engine): @@ -243,44 +246,94 @@ def test_columns_dtypes_invalid(self, engine): datetime.datetime(2011, 1, 1, 1, 1)] self.check_error_on_write(df, engine, ValueError) - def test_write_with_index(self, engine): + @pytest.mark.parametrize('compression', [None, 'gzip', 'snappy', 'brotli']) + def test_compression(self, engine, compression): + + if compression == 'snappy': + pytest.importorskip('snappy') + + elif compression == 'brotli': + pytest.importorskip('brotli') df = pd.DataFrame({'A': [1, 2, 3]}) - self.check_round_trip(df, engine, compression=None) + self.check_round_trip(df, engine, + write_kwargs={'compression': compression}) - # non-default index - for index in [[2, 3, 4], - pd.date_range('20130101', periods=3), - list('abc'), - [1, 3, 4], - pd.MultiIndex.from_tuples([('a', 1), ('a', 2), - ('b', 1)]), - ]: + def test_read_columns(self, engine): + # GH18154 + df = pd.DataFrame({'string': list('abc'), + 'int': list(range(1, 4))}) + + expected = pd.DataFrame({'string': list('abc')}) + self.check_round_trip(df, engine, expected=expected, + write_kwargs={'compression': None}, + read_kwargs={'columns': ['string']}) + + def test_write_index(self, engine): + check_names = engine != 'fastparquet' + + if engine == 'pyarrow': + import pyarrow + if LooseVersion(pyarrow.__version__) < LooseVersion('0.7.0'): + pytest.skip("pyarrow is < 0.7.0") + df = pd.DataFrame({'A': [1, 2, 3]}) + self.check_round_trip(df, engine, write_kwargs={'compression': None}) + + indexes = [ + [2, 3, 4], + pd.date_range('20130101', periods=3), + list('abc'), + [1, 3, 4], + ] + # non-default index + for index in indexes: df.index = index - self.check_error_on_write(df, engine, ValueError) + self.check_round_trip( + df, engine, + write_kwargs={'compression': None}, + check_names=check_names) # index with meta-data df.index = [0, 1, 2] df.index.name = 'foo' - self.check_error_on_write(df, engine, ValueError) + self.check_round_trip(df, engine, write_kwargs={'compression': None}) - # column multi-index - df.index = [0, 1, 2] - df.columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]), - self.check_error_on_write(df, engine, ValueError) - - @pytest.mark.parametrize('compression', [None, 'gzip', 'snappy', 'brotli']) - def test_compression(self, engine, compression): + def test_write_multiindex(self, pa_ge_070): + # Not suppoprted in fastparquet as of 0.1.3 or older pyarrow version + engine = pa_ge_070 - if compression == 'snappy': - pytest.importorskip('snappy') + df = pd.DataFrame({'A': [1, 2, 3]}) + index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]) + df.index = index + self.check_round_trip(df, engine, write_kwargs={'compression': None}) - elif compression == 'brotli': - pytest.importorskip('brotli') + def test_write_column_multiindex(self, engine): + # column multi-index + mi_columns = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]) + df = pd.DataFrame(np.random.randn(4, 3), columns=mi_columns) + self.check_error_on_write(df, engine, ValueError) - df = pd.DataFrame({'A': [1, 2, 3]}) - self.check_round_trip(df, engine, compression=compression) + def test_multiindex_with_columns(self, pa_ge_070): + + engine = pa_ge_070 + dates = pd.date_range('01-Jan-2018', '01-Dec-2018', freq='MS') + df = pd.DataFrame(np.random.randn(2 * len(dates), 3), + columns=list('ABC')) + index1 = pd.MultiIndex.from_product( + [['Level1', 'Level2'], dates], + names=['level', 'date']) + index2 = index1.copy(names=None) + for index in [index1, index2]: + df.index = index + with tm.ensure_clean() as path: + df.to_parquet(path, engine) + result = read_parquet(path, engine) + expected = df + tm.assert_frame_equal(result, expected) + result = read_parquet(path, engine, columns=['A', 'B']) + expected = df[['A', 'B']] + tm.assert_frame_equal(result, expected) class TestParquetPyArrow(Base): @@ -307,14 +360,12 @@ def test_basic(self, pa): self.check_round_trip(df, pa) def test_duplicate_columns(self, pa): - # not currently able to handle duplicate columns df = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list('aaa')).copy() self.check_error_on_write(df, pa, ValueError) def test_unsupported(self, pa): - # period df = pd.DataFrame({'a': pd.period_range('2013', freq='M', periods=3)}) self.check_error_on_write(df, pa, ValueError) @@ -368,7 +419,7 @@ def test_basic(self, fp): 'timedelta': pd.timedelta_range('1 day', periods=3), }) - self.check_round_trip(df, fp, compression=None) + self.check_round_trip(df, fp, write_kwargs={'compression': None}) @pytest.mark.skip(reason="not supported") def test_duplicate_columns(self, fp): @@ -381,7 +432,8 @@ def test_duplicate_columns(self, fp): def test_bool_with_none(self, fp): df = pd.DataFrame({'a': [True, None, False]}) expected = pd.DataFrame({'a': [1.0, np.nan, 0.0]}, dtype='float16') - self.check_round_trip(df, fp, expected=expected, compression=None) + self.check_round_trip(df, fp, expected=expected, + write_kwargs={'compression': None}) def test_unsupported(self, fp): @@ -397,7 +449,7 @@ def test_categorical(self, fp): if LooseVersion(fastparquet.__version__) < LooseVersion("0.1.3"): pytest.skip("CategoricalDtype not supported for older fp") df = pd.DataFrame({'a': pd.Categorical(list('abc'))}) - self.check_round_trip(df, fp, compression=None) + self.check_round_trip(df, fp, write_kwargs={'compression': None}) def test_datetime_tz(self, fp): # doesn't preserve tz @@ -407,4 +459,13 @@ def test_datetime_tz(self, fp): # warns on the coercion with catch_warnings(record=True): self.check_round_trip(df, fp, df.astype('datetime64[ns]'), - compression=None) + write_kwargs={'compression': None}) + + def test_filter_row_groups(self, fp): + d = {'a': list(range(0, 3))} + df = pd.DataFrame(d) + with tm.ensure_clean() as path: + df.to_parquet(path, fp, compression=None, + row_group_offsets=1) + result = read_parquet(path, fp, filters=[('a', '==', 0)]) + assert len(result) == 1 diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index a97747b93369f..a7cc6b711802e 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -4928,6 +4928,25 @@ def test_categorical_conversion(self): result = read_hdf(path, 'df', where='obsids=B') tm.assert_frame_equal(result, expected) + def test_categorical_nan_only_columns(self): + # GH18413 + # Check that read_hdf with categorical columns with NaN-only values can + # be read back. + df = pd.DataFrame({ + 'a': ['a', 'b', 'c', np.nan], + 'b': [np.nan, np.nan, np.nan, np.nan], + 'c': [1, 2, 3, 4], + 'd': pd.Series([None] * 4, dtype=object) + }) + df['a'] = df.a.astype('category') + df['b'] = df.b.astype('category') + df['d'] = df.b.astype('category') + expected = df + with ensure_clean_path(self.path) as path: + df.to_hdf(path, 'df', format='table', data_columns=True) + result = read_hdf(path, 'df') + tm.assert_frame_equal(result, expected) + def test_duplicate_column_name(self): df = DataFrame(columns=["a", "a"], data=[[0, 0]]) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 2df43158b5370..4528565eefa0c 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -88,6 +88,7 @@ "TextCol" TEXT, "DateCol" TEXT, "IntDateCol" INTEGER, + "IntDateOnlyCol" INTEGER, "FloatCol" REAL, "IntCol" INTEGER, "BoolCol" INTEGER, @@ -98,6 +99,7 @@ `TextCol` TEXT, `DateCol` DATETIME, `IntDateCol` INTEGER, + `IntDateOnlyCol` INTEGER, `FloatCol` DOUBLE, `IntCol` INTEGER, `BoolCol` BOOLEAN, @@ -109,6 +111,7 @@ "DateCol" TIMESTAMP, "DateColWithTz" TIMESTAMP WITH TIME ZONE, "IntDateCol" INTEGER, + "IntDateOnlyCol" INTEGER, "FloatCol" DOUBLE PRECISION, "IntCol" INTEGER, "BoolCol" BOOLEAN, @@ -120,31 +123,33 @@ 'sqlite': { 'query': """ INSERT INTO types_test_data - VALUES(?, ?, ?, ?, ?, ?, ?, ?) + VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?) """, 'fields': ( - 'TextCol', 'DateCol', 'IntDateCol', 'FloatCol', - 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' + 'TextCol', 'DateCol', 'IntDateCol', 'IntDateOnlyCol', + 'FloatCol', 'IntCol', 'BoolCol', 'IntColWithNull', + 'BoolColWithNull' ) }, 'mysql': { 'query': """ INSERT INTO types_test_data - VALUES("%s", %s, %s, %s, %s, %s, %s, %s) + VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s) """, 'fields': ( - 'TextCol', 'DateCol', 'IntDateCol', 'FloatCol', - 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' + 'TextCol', 'DateCol', 'IntDateCol', 'IntDateOnlyCol', + 'FloatCol', 'IntCol', 'BoolCol', 'IntColWithNull', + 'BoolColWithNull' ) }, 'postgresql': { 'query': """ INSERT INTO types_test_data - VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s) + VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) """, 'fields': ( 'TextCol', 'DateCol', 'DateColWithTz', - 'IntDateCol', 'FloatCol', + 'IntDateCol', 'IntDateOnlyCol', 'FloatCol', 'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull' ) }, @@ -313,13 +318,13 @@ def _load_raw_sql(self): self.drop_table('types_test_data') self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor]) ins = SQL_STRINGS['insert_test_types'][self.flavor] - data = [ { 'TextCol': 'first', 'DateCol': '2000-01-03 00:00:00', 'DateColWithTz': '2000-01-01 00:00:00-08:00', 'IntDateCol': 535852800, + 'IntDateOnlyCol': 20101010, 'FloatCol': 10.10, 'IntCol': 1, 'BoolCol': False, @@ -331,6 +336,7 @@ def _load_raw_sql(self): 'DateCol': '2000-01-04 00:00:00', 'DateColWithTz': '2000-06-01 00:00:00-07:00', 'IntDateCol': 1356998400, + 'IntDateOnlyCol': 20101212, 'FloatCol': 10.10, 'IntCol': 1, 'BoolCol': False, @@ -610,20 +616,42 @@ def test_date_parsing(self): df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, parse_dates=['DateCol']) assert issubclass(df.DateCol.dtype.type, np.datetime64) + assert df.DateCol.tolist() == [ + pd.Timestamp(2000, 1, 3, 0, 0, 0), + pd.Timestamp(2000, 1, 4, 0, 0, 0) + ] df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'}) assert issubclass(df.DateCol.dtype.type, np.datetime64) + assert df.DateCol.tolist() == [ + pd.Timestamp(2000, 1, 3, 0, 0, 0), + pd.Timestamp(2000, 1, 4, 0, 0, 0) + ] df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, parse_dates=['IntDateCol']) - assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + assert df.IntDateCol.tolist() == [ + pd.Timestamp(1986, 12, 25, 0, 0, 0), + pd.Timestamp(2013, 1, 1, 0, 0, 0) + ] df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, parse_dates={'IntDateCol': 's'}) - assert issubclass(df.IntDateCol.dtype.type, np.datetime64) + assert df.IntDateCol.tolist() == [ + pd.Timestamp(1986, 12, 25, 0, 0, 0), + pd.Timestamp(2013, 1, 1, 0, 0, 0) + ] + + df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn, + parse_dates={'IntDateOnlyCol': '%Y%m%d'}) + assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64) + assert df.IntDateOnlyCol.tolist() == [ + pd.Timestamp('2010-10-10'), + pd.Timestamp('2010-12-12') + ] def test_date_and_index(self): # Test case where same column appears in parse_date and index_col diff --git a/pandas/tests/io/test_stata.py b/pandas/tests/io/test_stata.py index 055a490bc6b5d..78b47960e1a04 100644 --- a/pandas/tests/io/test_stata.py +++ b/pandas/tests/io/test_stata.py @@ -96,6 +96,8 @@ def setup_method(self, method): self.dta24_111 = os.path.join(self.dirpath, 'stata7_111.dta') + self.stata_dates = os.path.join(self.dirpath, 'stata13_dates.dta') + def read_dta(self, file): # Legacy default reader configuration return read_stata(file, convert_dates=True) @@ -1327,3 +1329,22 @@ def test_set_index(self): df.to_stata(path) reread = pd.read_stata(path, index_col='index') tm.assert_frame_equal(df, reread) + + @pytest.mark.parametrize( + 'column', ['ms', 'day', 'week', 'month', 'qtr', 'half', 'yr']) + def test_date_parsing_ignores_format_details(self, column): + # GH 17797 + # + # Test that display formats are ignored when determining if a numeric + # column is a date value. + # + # All date types are stored as numbers and format associated with the + # column denotes both the type of the date and the display format. + # + # STATA supports 9 date types which each have distinct units. We test 7 + # of the 9 types, ignoring %tC and %tb. %tC is a variant of %tc that + # accounts for leap seconds and %tb relies on STATAs business calendar. + df = read_stata(self.stata_dates) + unformatted = df.loc[0, column] + formatted = df.loc[0, column + "_fmt"] + assert unformatted == formatted diff --git a/pandas/tests/plotting/test_converter.py b/pandas/tests/plotting/test_converter.py index e1f64bed5598d..3818c04649366 100644 --- a/pandas/tests/plotting/test_converter.py +++ b/pandas/tests/plotting/test_converter.py @@ -1,20 +1,144 @@ +import subprocess import pytest from datetime import datetime, date import numpy as np -from pandas import Timestamp, Period, Index +from pandas import Timestamp, Period, Index, date_range, Series from pandas.compat import u +import pandas.core.config as cf import pandas.util.testing as tm from pandas.tseries.offsets import Second, Milli, Micro, Day from pandas.compat.numpy import np_datetime64_compat converter = pytest.importorskip('pandas.plotting._converter') +from pandas.plotting import (register_matplotlib_converters, + deregister_matplotlib_converters) def test_timtetonum_accepts_unicode(): assert (converter.time2num("00:01") == converter.time2num(u("00:01"))) +class TestRegistration(object): + + def test_register_by_default(self): + # Run in subprocess to ensure a clean state + code = ("'import matplotlib.units; " + "import pandas as pd; " + "units = dict(matplotlib.units.registry); " + "assert pd.Timestamp in units)'") + call = ['python', '-c', code] + assert subprocess.check_call(call) == 0 + + def test_warns(self): + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range('2017', periods=12)) + _, ax = plt.subplots() + + # Set to the "warning" state, in case this isn't the first test run + converter._WARN = True + with tm.assert_produces_warning(FutureWarning, + check_stacklevel=False) as w: + ax.plot(s.index, s.values) + plt.close() + + assert len(w) == 1 + assert "Using an implicitly registered datetime converter" in str(w[0]) + + def test_registering_no_warning(self): + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range('2017', periods=12)) + _, ax = plt.subplots() + + # Set to the "warn" state, in case this isn't the first test run + converter._WARN = True + register_matplotlib_converters() + with tm.assert_produces_warning(None) as w: + ax.plot(s.index, s.values) + + assert len(w) == 0 + + def test_pandas_plots_register(self): + pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range('2017', periods=12)) + # Set to the "warn" state, in case this isn't the first test run + converter._WARN = True + with tm.assert_produces_warning(None) as w: + s.plot() + + assert len(w) == 0 + + def test_matplotlib_formatters(self): + units = pytest.importorskip("matplotlib.units") + assert Timestamp in units.registry + + ctx = cf.option_context("plotting.matplotlib.register_converters", + False) + with ctx: + assert Timestamp not in units.registry + + assert Timestamp in units.registry + + def test_option_no_warning(self): + pytest.importorskip("matplotlib.pyplot") + ctx = cf.option_context("plotting.matplotlib.register_converters", + False) + plt = pytest.importorskip("matplotlib.pyplot") + s = Series(range(12), index=date_range('2017', periods=12)) + _, ax = plt.subplots() + + converter._WARN = True + # Test without registering first, no warning + with ctx: + with tm.assert_produces_warning(None) as w: + ax.plot(s.index, s.values) + + assert len(w) == 0 + + # Now test with registering + converter._WARN = True + register_matplotlib_converters() + with ctx: + with tm.assert_produces_warning(None) as w: + ax.plot(s.index, s.values) + + assert len(w) == 0 + + def test_registry_resets(self): + units = pytest.importorskip("matplotlib.units") + dates = pytest.importorskip("matplotlib.dates") + + # make a copy, to reset to + original = dict(units.registry) + + try: + # get to a known state + units.registry.clear() + date_converter = dates.DateConverter() + units.registry[datetime] = date_converter + units.registry[date] = date_converter + + register_matplotlib_converters() + assert units.registry[date] is not date_converter + deregister_matplotlib_converters() + assert units.registry[date] is date_converter + + finally: + # restore original stater + units.registry.clear() + for k, v in original.items(): + units.registry[k] = v + + def test_old_import_warns(self): + with tm.assert_produces_warning(FutureWarning) as w: + from pandas.tseries import converter + converter.register() + + assert len(w) + assert ('pandas.plotting.register_matplotlib_converters' in + str(w[0].message)) + + class TestDateTimeConverter(object): def setup_method(self, method): diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index d66012e2a56a0..d6cedac747f25 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1,13 +1,14 @@ """ Test cases for time series specific (freq conversion, etc) """ from datetime import datetime, timedelta, date, time +import pickle import pytest from pandas.compat import lrange, zip import numpy as np from pandas import Index, Series, DataFrame, NaT -from pandas.compat import is_platform_mac +from pandas.compat import is_platform_mac, PY3 from pandas.core.indexes.datetimes import date_range, bdate_range from pandas.core.indexes.timedeltas import timedelta_range from pandas.tseries.offsets import DateOffset @@ -1470,5 +1471,12 @@ def _check_plot_works(f, freq=None, series=None, *args, **kwargs): with ensure_clean(return_filelike=True) as path: plt.savefig(path) + + # GH18439 + # this is supported only in Python 3 pickle since + # pickle in Python2 doesn't support instancemethod pickling + if PY3: + with ensure_clean(return_filelike=True) as path: + pickle.dump(fig, path) finally: plt.close(fig) diff --git a/pandas/tests/plotting/test_misc.py b/pandas/tests/plotting/test_misc.py index 6f476553091d9..54a512d14fef4 100644 --- a/pandas/tests/plotting/test_misc.py +++ b/pandas/tests/plotting/test_misc.py @@ -201,6 +201,7 @@ def test_parallel_coordinates(self): with tm.assert_produces_warning(FutureWarning): parallel_coordinates(df, 'Name', colors=colors) + @pytest.mark.xfail(reason="unreliable test") def test_parallel_coordinates_with_sorted_labels(self): """ For #15908 """ from pandas.plotting import parallel_coordinates diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 84a15cab34cd0..11368e44943d8 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1594,7 +1594,9 @@ def test_concat_series_axis1_same_names_ignore_index(self): s2 = Series(randn(len(dates)), index=dates, name='value') result = concat([s1, s2], axis=1, ignore_index=True) - assert np.array_equal(result.columns, [0, 1]) + expected = Index([0, 1]) + + tm.assert_index_equal(result.columns, expected) def test_concat_iterables(self): from collections import deque, Iterable @@ -1981,3 +1983,21 @@ def test_concat_will_upcast(dt, pdt): pdt(np.array([5], dtype=dt, ndmin=dims))] x = pd.concat(dfs) assert x.values.dtype == 'float64' + + +def test_concat_empty_and_non_empty_frame_regression(): + # GH 18178 regression test + df1 = pd.DataFrame({'foo': [1]}) + df2 = pd.DataFrame({'foo': []}) + expected = pd.DataFrame({'foo': [1.0]}) + result = pd.concat([df1, df2]) + assert_frame_equal(result, expected) + + +def test_concat_empty_and_non_empty_series_regression(): + # GH 18187 regression test + s1 = pd.Series([1]) + s2 = pd.Series([]) + expected = s1 + result = pd.concat([s1, s2]) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/reshape/test_merge.py b/pandas/tests/reshape/test_merge.py index 172667c9a0fb8..33d91af21c723 100644 --- a/pandas/tests/reshape/test_merge.py +++ b/pandas/tests/reshape/test_merge.py @@ -861,6 +861,12 @@ def test_validation(self): result = merge(left, right, on=['a', 'b'], validate='1:1') assert_frame_equal(result, expected_multi) + def test_merge_two_empty_df_no_division_error(self): + # GH17776, PR #17846 + a = pd.DataFrame({'a': [], 'b': [], 'c': []}) + with np.errstate(divide='raise'): + merge(a, a, on=('a', 'b')) + def _check_merge(x, y): for how in ['inner', 'left', 'outer']: diff --git a/pandas/tests/reshape/test_merge_asof.py b/pandas/tests/reshape/test_merge_asof.py index 78bfa2ff8597c..4b2680b9be592 100644 --- a/pandas/tests/reshape/test_merge_asof.py +++ b/pandas/tests/reshape/test_merge_asof.py @@ -973,3 +973,15 @@ def test_on_float_by_int(self): columns=['symbol', 'exch', 'price', 'mpv']) assert_frame_equal(result, expected) + + def test_merge_datatype_error(self): + """ Tests merge datatype mismatch error """ + msg = 'merge keys \[0\] object and int64, must be the same type' + + left = pd.DataFrame({'left_val': [1, 5, 10], + 'a': ['a', 'b', 'c']}) + right = pd.DataFrame({'right_val': [1, 2, 3, 6, 7], + 'a': [1, 2, 3, 6, 7]}) + + with tm.assert_raises_regex(MergeError, msg): + merge_asof(left, right, on='a') diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 135e4c544de41..0e69371511294 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -125,12 +125,13 @@ def test_round_nat(klass): def test_NaT_methods(): # GH 9513 + # GH 17329 for `timestamp` raise_methods = ['astimezone', 'combine', 'ctime', 'dst', 'fromordinal', 'fromtimestamp', 'isocalendar', 'strftime', 'strptime', 'time', 'timestamp', 'timetuple', 'timetz', 'toordinal', 'tzname', 'utcfromtimestamp', 'utcnow', 'utcoffset', - 'utctimetuple'] + 'utctimetuple', 'timestamp'] nat_methods = ['date', 'now', 'replace', 'to_datetime', 'today', 'tz_convert', 'tz_localize'] nan_methods = ['weekday', 'isoweekday'] diff --git a/pandas/tests/scalar/test_timestamp.py b/pandas/tests/scalar/test_timestamp.py index c1b9f858a08de..4053257fbd2c8 100644 --- a/pandas/tests/scalar/test_timestamp.py +++ b/pandas/tests/scalar/test_timestamp.py @@ -19,7 +19,7 @@ from pandas._libs import tslib, period from pandas._libs.tslibs.timezones import get_timezone -from pandas.compat import lrange, long +from pandas.compat import lrange, long, PY3 from pandas.util.testing import assert_series_equal from pandas.compat.numpy import np_datetime64_compat from pandas import (Timestamp, date_range, Period, Timedelta, compat, @@ -1079,6 +1079,28 @@ def test_is_leap_year(self): dt = Timestamp('2100-01-01 00:00:00', tz=tz) assert not dt.is_leap_year + def test_timestamp(self): + # GH#17329 + # tz-naive --> treat it as if it were UTC for purposes of timestamp() + ts = Timestamp.now() + uts = ts.replace(tzinfo=utc) + assert ts.timestamp() == uts.timestamp() + + tsc = Timestamp('2014-10-11 11:00:01.12345678', tz='US/Central') + utsc = tsc.tz_convert('UTC') + + # utsc is a different representation of the same time + assert tsc.timestamp() == utsc.timestamp() + + if PY3: + + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + + # should agree with datetime.timestamp method + dt = ts.to_pydatetime() + assert dt.timestamp() == ts.timestamp() + class TestTimestampNsOperations(object): diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 8cc40bb5146c5..d6db2ab83098b 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -28,40 +28,124 @@ class TestSeriesAnalytics(TestData): @pytest.mark.parametrize("use_bottleneck", [True, False]) - @pytest.mark.parametrize("method", ["sum", "prod"]) - def test_empty(self, method, use_bottleneck): - + @pytest.mark.parametrize("method, unit", [ + ("sum", 0.0), + ("prod", 1.0) + ]) + def test_empty(self, method, unit, use_bottleneck): with pd.option_context("use_bottleneck", use_bottleneck): - # GH 9422 - # treat all missing as NaN + # GH 9422 / 18921 + # Entirely empty s = Series([]) + # NA by default result = getattr(s, method)() + assert result == unit + + # Explict + result = getattr(s, method)(min_count=0) + assert result == unit + + result = getattr(s, method)(min_count=1) assert isna(result) + # Skipna, default result = getattr(s, method)(skipna=True) + result == unit + + # Skipna, explicit + result = getattr(s, method)(skipna=True, min_count=0) + assert result == unit + + result = getattr(s, method)(skipna=True, min_count=1) assert isna(result) + # All-NA s = Series([np.nan]) + # NA by default result = getattr(s, method)() + assert result == unit + + # Explicit + result = getattr(s, method)(min_count=0) + assert result == unit + + result = getattr(s, method)(min_count=1) assert isna(result) + # Skipna, default result = getattr(s, method)(skipna=True) + result == unit + + # skipna, explicit + result = getattr(s, method)(skipna=True, min_count=0) + assert result == unit + + result = getattr(s, method)(skipna=True, min_count=1) assert isna(result) + # Mix of valid, empty s = Series([np.nan, 1]) + # Default result = getattr(s, method)() assert result == 1.0 - s = Series([np.nan, 1]) + # Explicit + result = getattr(s, method)(min_count=0) + assert result == 1.0 + + result = getattr(s, method)(min_count=1) + assert result == 1.0 + + # Skipna result = getattr(s, method)(skipna=True) assert result == 1.0 + result = getattr(s, method)(skipna=True, min_count=0) + assert result == 1.0 + + result = getattr(s, method)(skipna=True, min_count=1) + assert result == 1.0 + # GH #844 (changed in 9422) df = DataFrame(np.empty((10, 0))) - assert (df.sum(1).isnull()).all() + assert (getattr(df, method)(1) == unit).all() + + s = pd.Series([1]) + result = getattr(s, method)(min_count=2) + assert isna(result) + + s = pd.Series([np.nan]) + result = getattr(s, method)(min_count=2) + assert isna(result) + + s = pd.Series([np.nan, 1]) + result = getattr(s, method)(min_count=2) + assert isna(result) + + @pytest.mark.parametrize('method, unit', [ + ('sum', 0.0), + ('prod', 1.0), + ]) + def test_empty_multi(self, method, unit): + s = pd.Series([1, np.nan, np.nan, np.nan], + index=pd.MultiIndex.from_product([('a', 'b'), (0, 1)])) + # 1 / 0 by default + result = getattr(s, method)(level=0) + expected = pd.Series([1, unit], index=['a', 'b']) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = getattr(s, method)(level=0, min_count=0) + expected = pd.Series([1, unit], index=['a', 'b']) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = getattr(s, method)(level=0, min_count=1) + expected = pd.Series([1, np.nan], index=['a', 'b']) + tm.assert_series_equal(result, expected) @pytest.mark.parametrize( - "method", ['sum', 'mean', 'median', 'std', 'var']) + "method", ['mean', 'median', 'std', 'var']) def test_ops_consistency_on_empty(self, method): # GH 7869 @@ -109,7 +193,7 @@ def test_sum_overflow(self, use_bottleneck): assert np.allclose(float(result), v[-1]) def test_sum(self): - self._check_stat_op('sum', np.sum, check_allna=True) + self._check_stat_op('sum', np.sum, check_allna=False) def test_sum_inf(self): s = Series(np.random.randn(10)) @@ -848,6 +932,12 @@ def test_value_counts_nunique(self): result = series.nunique() assert result == 11 + # GH 18051 + s = pd.Series(pd.Categorical([])) + assert s.nunique() == 0 + s = pd.Series(pd.Categorical([np.nan])) + assert s.nunique() == 0 + def test_unique(self): # 714 also, dtype=float @@ -920,6 +1010,14 @@ def test_drop_duplicates(self): sc.drop_duplicates(keep=False, inplace=True) assert_series_equal(sc, s[~expected]) + # GH 18051 + s = pd.Series(pd.Categorical([])) + tm.assert_categorical_equal(s.unique(), pd.Categorical([]), + check_dtype=False) + s = pd.Series(pd.Categorical([np.nan])) + tm.assert_categorical_equal(s.unique(), pd.Categorical([np.nan]), + check_dtype=False) + def test_clip(self): val = self.ts.median() diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index bd4e8b23f31b4..5ca4eba4da13b 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -636,17 +636,21 @@ def test_valid(self): def test_isna(self): ser = Series([0, 5.4, 3, nan, -0.001]) - np.array_equal(ser.isna(), - Series([False, False, False, True, False]).values) + expected = Series([False, False, False, True, False]) + tm.assert_series_equal(ser.isna(), expected) + ser = Series(["hi", "", nan]) - np.array_equal(ser.isna(), Series([False, False, True]).values) + expected = Series([False, False, True]) + tm.assert_series_equal(ser.isna(), expected) def test_notna(self): ser = Series([0, 5.4, 3, nan, -0.001]) - np.array_equal(ser.notna(), - Series([True, True, True, False, True]).values) + expected = Series([True, True, True, False, True]) + tm.assert_series_equal(ser.notna(), expected) + ser = Series(["hi", "", nan]) - np.array_equal(ser.notna(), Series([True, True, False]).values) + expected = Series([True, True, False]) + tm.assert_series_equal(ser.notna(), expected) def test_pad_nan(self): x = Series([np.nan, 1., np.nan, 3., np.nan], ['z', 'a', 'b', 'c', 'd'], diff --git a/pandas/tests/series/test_quantile.py b/pandas/tests/series/test_quantile.py index cf5e3fe4f29b0..255367523a3d8 100644 --- a/pandas/tests/series/test_quantile.py +++ b/pandas/tests/series/test_quantile.py @@ -38,7 +38,7 @@ def test_quantile(self): # GH7661 result = Series([np.timedelta64('NaT')]).sum() - assert result is pd.NaT + assert result == pd.Timedelta(0) msg = 'percentiles should all be in the interval \\[0, 1\\]' for invalid in [-1, 2, [0.5, -1], [0.5, 2]]: diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 38625bfb29917..240a7ad4b22f9 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -1132,19 +1132,19 @@ def test_pad_backfill_object_segfault(): result = libalgos.pad_object(old, new) expected = np.array([-1], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) result = libalgos.pad_object(new, old) expected = np.array([], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) result = libalgos.backfill_object(old, new) expected = np.array([-1], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) result = libalgos.backfill_object(new, old) expected = np.array([], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) def test_arrmap(): @@ -1219,7 +1219,7 @@ def test_is_lexsorted(): 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0]), + 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype='int64'), np.array([30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, @@ -1231,19 +1231,10 @@ def test_is_lexsorted(): 7, 6, 5, 4, 3, 2, 1, 0, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, - 4, 3, 2, 1, 0])] + 4, 3, 2, 1, 0], dtype='int64')] assert (not libalgos.is_lexsorted(failure)) -# def test_get_group_index(): -# a = np.array([0, 1, 2, 0, 2, 1, 0, 0], dtype=np.int64) -# b = np.array([1, 0, 3, 2, 0, 2, 3, 0], dtype=np.int64) -# expected = np.array([1, 4, 11, 2, 8, 6, 3, 0], dtype=np.int64) - -# result = lib.get_group_index([a, b], (3, 4)) - -# assert(np.array_equal(result, expected)) - def test_groupsort_indexer(): a = np.random.randint(0, 1000, 100).astype(np.int64) @@ -1252,14 +1243,22 @@ def test_groupsort_indexer(): result = libalgos.groupsort_indexer(a, 1000)[0] # need to use a stable sort + # np.argsort returns int, groupsort_indexer + # always returns int64 expected = np.argsort(a, kind='mergesort') - assert (np.array_equal(result, expected)) + expected = expected.astype(np.int64) + + tm.assert_numpy_array_equal(result, expected) # compare with lexsort + # np.lexsort returns int, groupsort_indexer + # always returns int64 key = a * 1000 + b result = libalgos.groupsort_indexer(key, 1000000)[0] expected = np.lexsort((b, a)) - assert (np.array_equal(result, expected)) + expected = expected.astype(np.int64) + + tm.assert_numpy_array_equal(result, expected) def test_infinity_sort(): diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py index 272ba25bf8f8a..48c1622aa0c4e 100644 --- a/pandas/tests/test_categorical.py +++ b/pandas/tests/test_categorical.py @@ -2124,6 +2124,13 @@ def test_creation_astype(self): res = s.astype(CategoricalDtype(list('abcdef'), ordered=True)) tm.assert_series_equal(res, exp) + @pytest.mark.parametrize('columns', [['x'], ['x', 'y'], ['x', 'y', 'z']]) + def test_empty_astype(self, columns): + # GH 18004 + msg = '> 1 ndim Categorical are not supported at this time' + with tm.assert_raises_regex(NotImplementedError, msg): + DataFrame(columns=columns).astype('category') + def test_construction_series(self): l = [1, 2, 3, 1] @@ -3156,18 +3163,6 @@ def test_info(self): buf = compat.StringIO() df2.info(buf=buf) - def test_groupby_sort(self): - - # http://stackoverflow.com/questions/23814368/sorting-pandas-categorical-labels-after-groupby - # This should result in a properly sorted Series so that the plot - # has a sorted x axis - # self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar') - - res = self.cat.groupby(['value_group'])['value_group'].count() - exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))] - exp.index = pd.CategoricalIndex(exp.index, name=exp.index.name) - tm.assert_series_equal(res, exp) - def test_min_max(self): # unordered cats have no min/max cat = Series(Categorical(["a", "b", "c", "d"], ordered=False)) @@ -3287,123 +3282,6 @@ def test_value_counts_with_nan(self): res = s.value_counts(dropna=False, sort=False) tm.assert_series_equal(res, exp) - def test_groupby(self): - - cats = Categorical(["a", "a", "a", "b", "b", "b", "c", "c", "c"], - categories=["a", "b", "c", "d"], ordered=True) - data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats}) - - exp_index = pd.CategoricalIndex(['a', 'b', 'c', 'd'], name='b', - ordered=True) - expected = DataFrame({'a': [1, 2, 4, np.nan]}, index=exp_index) - result = data.groupby("b").mean() - tm.assert_frame_equal(result, expected) - - raw_cat1 = Categorical(["a", "a", "b", "b"], - categories=["a", "b", "z"], ordered=True) - raw_cat2 = Categorical(["c", "d", "c", "d"], - categories=["c", "d", "y"], ordered=True) - df = DataFrame({"A": raw_cat1, "B": raw_cat2, "values": [1, 2, 3, 4]}) - - # single grouper - gb = df.groupby("A") - exp_idx = pd.CategoricalIndex(['a', 'b', 'z'], name='A', ordered=True) - expected = DataFrame({'values': Series([3, 7, np.nan], index=exp_idx)}) - result = gb.sum() - tm.assert_frame_equal(result, expected) - - # multiple groupers - gb = df.groupby(['A', 'B']) - exp_index = pd.MultiIndex.from_product( - [Categorical(["a", "b", "z"], ordered=True), - Categorical(["c", "d", "y"], ordered=True)], - names=['A', 'B']) - expected = DataFrame({'values': [1, 2, np.nan, 3, 4, np.nan, - np.nan, np.nan, np.nan]}, - index=exp_index) - result = gb.sum() - tm.assert_frame_equal(result, expected) - - # multiple groupers with a non-cat - df = df.copy() - df['C'] = ['foo', 'bar'] * 2 - gb = df.groupby(['A', 'B', 'C']) - exp_index = pd.MultiIndex.from_product( - [Categorical(["a", "b", "z"], ordered=True), - Categorical(["c", "d", "y"], ordered=True), - ['foo', 'bar']], - names=['A', 'B', 'C']) - expected = DataFrame({'values': Series( - np.nan, index=exp_index)}).sort_index() - expected.iloc[[1, 2, 7, 8], 0] = [1, 2, 3, 4] - result = gb.sum() - tm.assert_frame_equal(result, expected) - - # GH 8623 - x = pd.DataFrame([[1, 'John P. Doe'], [2, 'Jane Dove'], - [1, 'John P. Doe']], - columns=['person_id', 'person_name']) - x['person_name'] = pd.Categorical(x.person_name) - - g = x.groupby(['person_id']) - result = g.transform(lambda x: x) - tm.assert_frame_equal(result, x[['person_name']]) - - result = x.drop_duplicates('person_name') - expected = x.iloc[[0, 1]] - tm.assert_frame_equal(result, expected) - - def f(x): - return x.drop_duplicates('person_name').iloc[0] - - result = g.apply(f) - expected = x.iloc[[0, 1]].copy() - expected.index = Index([1, 2], name='person_id') - expected['person_name'] = expected['person_name'].astype('object') - tm.assert_frame_equal(result, expected) - - # GH 9921 - # Monotonic - df = DataFrame({"a": [5, 15, 25]}) - c = pd.cut(df.a, bins=[0, 10, 20, 30, 40]) - - result = df.a.groupby(c).transform(sum) - tm.assert_series_equal(result, df['a']) - - tm.assert_series_equal( - df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) - tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) - tm.assert_frame_equal( - df.groupby(c).transform(lambda xs: np.max(xs)), df[['a']]) - - # Filter - tm.assert_series_equal(df.a.groupby(c).filter(np.all), df['a']) - tm.assert_frame_equal(df.groupby(c).filter(np.all), df) - - # Non-monotonic - df = DataFrame({"a": [5, 15, 25, -5]}) - c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40]) - - result = df.a.groupby(c).transform(sum) - tm.assert_series_equal(result, df['a']) - - tm.assert_series_equal( - df.a.groupby(c).transform(lambda xs: np.sum(xs)), df['a']) - tm.assert_frame_equal(df.groupby(c).transform(sum), df[['a']]) - tm.assert_frame_equal( - df.groupby(c).transform(lambda xs: np.sum(xs)), df[['a']]) - - # GH 9603 - df = pd.DataFrame({'a': [1, 0, 0, 0]}) - c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=pd.Categorical(list('abcd'))) - result = df.groupby(c).apply(len) - - exp_index = pd.CategoricalIndex(c.values.categories, - ordered=c.values.ordered) - expected = pd.Series([1, 0, 0, 0], index=exp_index) - expected.index.name = 'a' - tm.assert_series_equal(result, expected) - def test_pivot_table(self): raw_cat1 = Categorical(["a", "a", "b", "b"], diff --git a/pandas/tests/test_downstream.py b/pandas/tests/test_downstream.py index 61f0c992225c6..b8e9191002640 100644 --- a/pandas/tests/test_downstream.py +++ b/pandas/tests/test_downstream.py @@ -52,7 +52,6 @@ def test_xarray(df): assert df.to_xarray() is not None -@tm.network def test_statsmodels(): statsmodels = import_module('statsmodels') # noqa diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index 6d2607962dfb0..aebc9cd3deaac 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -73,17 +73,11 @@ def teardown_method(self, method): def run_arithmetic(self, df, other, assert_func, check_dtype=False, test_flex=True): expr._MIN_ELEMENTS = 0 - operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv', 'pow'] + operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv'] if not compat.PY3: operations.append('div') for arith in operations: - # numpy >= 1.11 doesn't handle integers - # raised to integer powers - # https://github.com/pandas-dev/pandas/issues/15363 - if arith == 'pow' and not _np_version_under1p11: - continue - operator_name = arith if arith == 'div': operator_name = 'truediv' diff --git a/pandas/tests/test_join.py b/pandas/tests/test_join.py index cde1cab37d09c..af946436b55c7 100644 --- a/pandas/tests/test_join.py +++ b/pandas/tests/test_join.py @@ -53,7 +53,7 @@ def test_left_join_indexer_unique(): result = _join.left_join_indexer_unique_int64(b, a) expected = np.array([1, 1, 2, 3, 3], dtype=np.int64) - assert (np.array_equal(result, expected)) + tm.assert_numpy_array_equal(result, expected) def test_left_outer_join_bug(): @@ -69,13 +69,14 @@ def test_left_outer_join_bug(): lidx, ridx = _join.left_outer_join(left, right, max_groups, sort=False) - exp_lidx = np.arange(len(left)) - exp_ridx = -np.ones(len(left)) + exp_lidx = np.arange(len(left), dtype=np.int64) + exp_ridx = -np.ones(len(left), dtype=np.int64) + exp_ridx[left == 1] = 1 exp_ridx[left == 3] = 0 - assert (np.array_equal(lidx, exp_lidx)) - assert (np.array_equal(ridx, exp_ridx)) + tm.assert_numpy_array_equal(lidx, exp_lidx) + tm.assert_numpy_array_equal(ridx, exp_ridx) def test_inner_join_indexer(): diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py index 2662720bb436d..75aa9aa4e8198 100644 --- a/pandas/tests/test_lib.py +++ b/pandas/tests/test_lib.py @@ -198,7 +198,7 @@ def test_get_reverse_indexer(self): indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.int64) result = lib.get_reverse_indexer(indexer, 5) expected = np.array([4, 2, 3, 6, 7], dtype=np.int64) - assert np.array_equal(result, expected) + tm.assert_numpy_array_equal(result, expected) class TestNAObj(object): diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py index 9305504f8d5e3..5d56088193d30 100644 --- a/pandas/tests/test_nanops.py +++ b/pandas/tests/test_nanops.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- from __future__ import division, print_function +from distutils.version import LooseVersion from functools import partial import pytest @@ -181,12 +182,17 @@ def _coerce_tds(targ, res): check_dtype=check_dtype) def check_fun_data(self, testfunc, targfunc, testarval, targarval, - targarnanval, check_dtype=True, **kwargs): + targarnanval, check_dtype=True, empty_targfunc=None, + **kwargs): for axis in list(range(targarval.ndim)) + [None]: for skipna in [False, True]: targartempval = targarval if skipna else targarnanval - try: + if skipna and empty_targfunc and isna(targartempval).all(): + targ = empty_targfunc(targartempval, axis=axis, **kwargs) + else: targ = targfunc(targartempval, axis=axis, **kwargs) + + try: res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs) self.check_results(targ, res, axis, @@ -218,10 +224,11 @@ def check_fun_data(self, testfunc, targfunc, testarval, targarval, except ValueError: return self.check_fun_data(testfunc, targfunc, testarval2, targarval2, - targarnanval2, check_dtype=check_dtype, **kwargs) + targarnanval2, check_dtype=check_dtype, + empty_targfunc=empty_targfunc, **kwargs) def check_fun(self, testfunc, targfunc, testar, targar=None, - targarnan=None, **kwargs): + targarnan=None, empty_targfunc=None, **kwargs): if targar is None: targar = testar if targarnan is None: @@ -231,7 +238,8 @@ def check_fun(self, testfunc, targfunc, testar, targar=None, targarnanval = getattr(self, targarnan) try: self.check_fun_data(testfunc, targfunc, testarval, targarval, - targarnanval, **kwargs) + targarnanval, empty_targfunc=empty_targfunc, + **kwargs) except BaseException as exc: exc.args += ('testar: %s' % testar, 'targar: %s' % targar, 'targarnan: %s' % targarnan) @@ -328,7 +336,8 @@ def test_nanall(self): def test_nansum(self): self.check_funs(nanops.nansum, np.sum, allow_str=False, - allow_date=False, allow_tdelta=True, check_dtype=False) + allow_date=False, allow_tdelta=True, check_dtype=False, + empty_targfunc=np.nansum) def test_nanmean(self): self.check_funs(nanops.nanmean, np.mean, allow_complex=False, @@ -461,8 +470,12 @@ def test_nankurt(self): allow_tdelta=False) def test_nanprod(self): + if LooseVersion(np.__version__) < LooseVersion("1.10.0"): + raise pytest.skip("np.nanprod added in 1.10.0") + self.check_funs(nanops.nanprod, np.prod, allow_str=False, - allow_date=False, allow_tdelta=False) + allow_date=False, allow_tdelta=False, + empty_targfunc=np.nanprod) def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs): res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 33fb6f1108bf2..7e442fcc2fc8b 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -3,6 +3,7 @@ from warnings import catch_warnings from datetime import datetime +from distutils.version import LooseVersion import operator import pytest @@ -10,7 +11,6 @@ import pandas as pd from pandas.core.dtypes.common import is_float_dtype -from pandas.core.dtypes.missing import remove_na_arraylike from pandas import (Series, DataFrame, Index, date_range, isna, notna, pivot, MultiIndex) from pandas.core.nanops import nanall, nanany @@ -83,13 +83,16 @@ def test_count(self): self._check_stat_op('count', f, obj=self.panel, has_skipna=False) def test_sum(self): - self._check_stat_op('sum', np.sum) + self._check_stat_op('sum', np.sum, skipna_alternative=np.nansum) def test_mean(self): self._check_stat_op('mean', np.mean) def test_prod(self): - self._check_stat_op('prod', np.prod) + if LooseVersion(np.__version__) < LooseVersion("1.10.0"): + raise pytest.skip("np.nanprod added in 1.10.0") + + self._check_stat_op('prod', np.prod, skipna_alternative=np.nanprod) def test_median(self): def wrapper(x): @@ -142,7 +145,8 @@ def alt(x): self._check_stat_op('sem', alt) - def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): + def _check_stat_op(self, name, alternative, obj=None, has_skipna=True, + skipna_alternative=None): if obj is None: obj = self.panel @@ -154,11 +158,8 @@ def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): if has_skipna: - def skipna_wrapper(x): - nona = remove_na_arraylike(x) - if len(nona) == 0: - return np.nan - return alternative(nona) + skipna_wrapper = tm._make_skipna_wrapper(alternative, + skipna_alternative) def wrapper(x): return alternative(np.asarray(x)) diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py index c0e8770dff8b8..ef19f11499e00 100644 --- a/pandas/tests/test_panel4d.py +++ b/pandas/tests/test_panel4d.py @@ -4,11 +4,11 @@ import operator import pytest from warnings import catch_warnings +from distutils.version import LooseVersion import numpy as np from pandas import Series, Index, isna, notna from pandas.core.dtypes.common import is_float_dtype -from pandas.core.dtypes.missing import remove_na_arraylike from pandas.core.panel import Panel from pandas.core.panel4d import Panel4D from pandas.tseries.offsets import BDay @@ -37,13 +37,16 @@ def test_count(self): self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False) def test_sum(self): - self._check_stat_op('sum', np.sum) + self._check_stat_op('sum', np.sum, skipna_alternative=np.nansum) def test_mean(self): self._check_stat_op('mean', np.mean) def test_prod(self): - self._check_stat_op('prod', np.prod) + if LooseVersion(np.__version__) < LooseVersion("1.10.0"): + raise pytest.skip("np.nanprod added in 1.10.0") + + self._check_stat_op('prod', np.prod, skipna_alternative=np.nanprod) def test_median(self): def wrapper(x): @@ -106,7 +109,8 @@ def alt(x): # self._check_stat_op('skew', alt) - def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): + def _check_stat_op(self, name, alternative, obj=None, has_skipna=True, + skipna_alternative=None): if obj is None: obj = self.panel4d @@ -117,11 +121,9 @@ def _check_stat_op(self, name, alternative, obj=None, has_skipna=True): f = getattr(obj, name) if has_skipna: - def skipna_wrapper(x): - nona = remove_na_arraylike(x) - if len(nona) == 0: - return np.nan - return alternative(nona) + + skipna_wrapper = tm._make_skipna_wrapper(alternative, + skipna_alternative) def wrapper(x): return alternative(np.asarray(x)) diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index ac8297a53de37..04e702644913f 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -4,6 +4,7 @@ from datetime import datetime, timedelta from functools import partial from textwrap import dedent +from operator import methodcaller import pytz import pytest @@ -2729,6 +2730,34 @@ def test_resample_weekly_bug_1726(self): # it works! df.resample('W-MON', closed='left', label='left').first() + def test_resample_with_dst_time_change(self): + # GH 15549 + index = pd.DatetimeIndex([1457537600000000000, 1458059600000000000], + tz='UTC').tz_convert('America/Chicago') + df = pd.DataFrame([1, 2], index=index) + result = df.resample('12h', closed='right', + label='right').last().ffill() + + expected_index_values = ['2016-03-09 12:00:00-06:00', + '2016-03-10 00:00:00-06:00', + '2016-03-10 12:00:00-06:00', + '2016-03-11 00:00:00-06:00', + '2016-03-11 12:00:00-06:00', + '2016-03-12 00:00:00-06:00', + '2016-03-12 12:00:00-06:00', + '2016-03-13 00:00:00-06:00', + '2016-03-13 13:00:00-05:00', + '2016-03-14 01:00:00-05:00', + '2016-03-14 13:00:00-05:00', + '2016-03-15 01:00:00-05:00', + '2016-03-15 13:00:00-05:00'] + index = pd.DatetimeIndex(expected_index_values, + tz='UTC').tz_convert('America/Chicago') + expected = pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 2.0], index=index) + assert_frame_equal(result, expected) + def test_resample_bms_2752(self): # GH2753 foo = pd.Series(index=pd.bdate_range('20000101', '20000201')) @@ -3103,6 +3132,26 @@ def f(x): result = g.apply(f) assert_frame_equal(result, expected) + def test_apply_with_mutated_index(self): + # GH 15169 + index = pd.date_range('1-1-2015', '12-31-15', freq='D') + df = pd.DataFrame(data={'col1': np.random.rand(len(index))}, + index=index) + + def f(x): + s = pd.Series([1, 2], index=['a', 'b']) + return s + + expected = df.groupby(pd.Grouper(freq='M')).apply(f) + + result = df.resample('M').apply(f) + assert_frame_equal(result, expected) + + # A case for series + expected = df['col1'].groupby(pd.Grouper(freq='M')).apply(f) + result = df['col1'].resample('M').apply(f) + assert_series_equal(result, expected) + def test_resample_groupby_with_label(self): # GH 13235 index = date_range('2000-01-01', freq='2D', periods=5) @@ -3329,8 +3378,45 @@ def test_aggregate_normal(self): assert_frame_equal(expected, dt_result) """ - def test_aggregate_with_nat(self): + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_resample_entirly_nat_window(self, method, unit): + s = pd.Series([0] * 2 + [np.nan] * 2, + index=pd.date_range('2017', periods=4)) + # 0 / 1 by default + result = methodcaller(method)(s.resample("2d")) + expected = pd.Series([0.0, unit], + index=pd.to_datetime(['2017-01-01', + '2017-01-03'])) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = methodcaller(method, min_count=0)(s.resample("2d")) + expected = pd.Series([0.0, unit], + index=pd.to_datetime(['2017-01-01', + '2017-01-03'])) + tm.assert_series_equal(result, expected) + + # min_count=1 + result = methodcaller(method, min_count=1)(s.resample("2d")) + expected = pd.Series([0.0, np.nan], + index=pd.to_datetime(['2017-01-01', + '2017-01-03'])) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('func, fill_value', [ + ('min', np.nan), + ('max', np.nan), + ('sum', 0), + ('prod', 1), + ('count', 0), + ]) + def test_aggregate_with_nat(self, func, fill_value): # check TimeGrouper's aggregation is identical as normal groupby + # if NaT is included, 'var', 'std', 'mean', 'first','last' + # and 'nth' doesn't work yet n = 20 data = np.random.randn(n, 4).astype('int64') @@ -3344,39 +3430,78 @@ def test_aggregate_with_nat(self): normal_grouped = normal_df.groupby('key') dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) - for func in ['min', 'max', 'sum', 'prod']: - normal_result = getattr(normal_grouped, func)() - dt_result = getattr(dt_grouped, func)() - pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]], index=[3], - columns=['A', 'B', 'C', 'D']) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - assert_frame_equal(expected, dt_result) + normal_result = getattr(normal_grouped, func)() + dt_result = getattr(dt_grouped, func)() - for func in ['count']: - normal_result = getattr(normal_grouped, func)() - pad = DataFrame([[0, 0, 0, 0]], index=[3], - columns=['A', 'B', 'C', 'D']) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_frame_equal(expected, dt_result) + pad = DataFrame([[fill_value] * 4], index=[3], + columns=['A', 'B', 'C', 'D']) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + assert_frame_equal(expected, dt_result) + assert dt_result.index.name == 'key' - for func in ['size']: - normal_result = getattr(normal_grouped, func)() - pad = Series([0], index=[3]) - expected = normal_result.append(pad) - expected = expected.sort_index() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_series_equal(expected, dt_result) - # GH 9925 - assert dt_result.index.name == 'key' + def test_aggregate_with_nat_size(self): + # GH 9925 + n = 20 + data = np.random.randn(n, 4).astype('int64') + normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + normal_df['key'] = [1, 2, np.nan, 4, 5] * 4 + + dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) + dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT, + datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4 + + normal_grouped = normal_df.groupby('key') + dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) + + normal_result = normal_grouped.size() + dt_result = dt_grouped.size() + + pad = Series([0], index=[3]) + expected = normal_result.append(pad) + expected = expected.sort_index() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + assert_series_equal(expected, dt_result) + assert dt_result.index.name == 'key' + + def test_repr(self): + # GH18203 + result = repr(TimeGrouper(key='A', freq='H')) + expected = ("TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, " + "closed='left', label='left', how='mean', " + "convention='e', base=0)") + assert result == expected + + @pytest.mark.parametrize('method, unit', [ + ('sum', 0), + ('prod', 1), + ]) + def test_upsample_sum(self, method, unit): + s = pd.Series(1, index=pd.date_range("2017", periods=2, freq="H")) + resampled = s.resample("30T") + index = pd.to_datetime(['2017-01-01T00:00:00', + '2017-01-01T00:30:00', + '2017-01-01T01:00:00']) + + # 0 / 1 by default + result = methodcaller(method)(resampled) + expected = pd.Series([1, unit, 1], index=index) + tm.assert_series_equal(result, expected) + + # min_count=0 + result = methodcaller(method, min_count=0)(resampled) + expected = pd.Series([1, unit, 1], index=index) + tm.assert_series_equal(result, expected) - # if NaT is included, 'var', 'std', 'mean', 'first','last' - # and 'nth' doesn't work yet + # min_count=1 + result = methodcaller(method, min_count=1)(resampled) + expected = pd.Series([1, np.nan, 1], index=index) + tm.assert_series_equal(result, expected) + + # min_count>1 + result = methodcaller(method, min_count=2)(resampled) + expected = pd.Series([np.nan, np.nan, np.nan], index=index) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/test_sorting.py b/pandas/tests/test_sorting.py index a5b12bbf9608a..06c1fa1c0905a 100644 --- a/pandas/tests/test_sorting.py +++ b/pandas/tests/test_sorting.py @@ -332,16 +332,17 @@ def testit(label_list, shape): label_list2 = decons_group_index(group_index, shape) for a, b in zip(label_list, label_list2): - assert (np.array_equal(a, b)) + tm.assert_numpy_array_equal(a, b) shape = (4, 5, 6) - label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100), np.tile( - [0, 2, 4, 3, 0, 1, 2, 3], 100), np.tile( - [5, 1, 0, 2, 3, 0, 5, 4], 100)] + label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64)] testit(label_list, shape) shape = (10000, 10000) - label_list = [np.tile(np.arange(10000), 5), np.tile(np.arange(10000), 5)] + label_list = [np.tile(np.arange(10000, dtype=np.int64), 5), + np.tile(np.arange(10000, dtype=np.int64), 5)] testit(label_list, shape) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index f1b97081b6d93..8aa69bcbfdf7f 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -2086,6 +2086,18 @@ def test_rsplit_to_multiindex_expand(self): tm.assert_index_equal(result, exp) assert result.nlevels == 2 + def test_split_nan_expand(self): + # gh-18450 + s = Series(["foo,bar,baz", NA]) + result = s.str.split(",", expand=True) + exp = DataFrame([["foo", "bar", "baz"], [NA, NA, NA]]) + tm.assert_frame_equal(result, exp) + + # check that these are actually np.nan and not None + # TODO see GH 18463 + # tm.assert_frame_equal does not differentiate + assert all(np.isnan(x) for x in result.iloc[1]) + def test_split_with_name(self): # GH 12617 diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index c567613acebd1..e65de10c51300 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -475,6 +475,28 @@ def tests_empty_df_rolling(self, roller): result = DataFrame(index=pd.DatetimeIndex([])).rolling(roller).sum() tm.assert_frame_equal(result, expected) + def test_missing_minp_zero(self): + # https://github.com/pandas-dev/pandas/pull/18921 + # minp=0 + x = pd.Series([np.nan]) + result = x.rolling(1, min_periods=0).sum() + expected = pd.Series([0.0]) + tm.assert_series_equal(result, expected) + + # minp=1 + result = x.rolling(1, min_periods=1).sum() + expected = pd.Series([np.nan]) + tm.assert_series_equal(result, expected) + + def test_missing_minp_zero_variable(self): + # https://github.com/pandas-dev/pandas/pull/18921 + x = pd.Series([np.nan] * 4, + index=pd.DatetimeIndex(['2017-01-01', '2017-01-04', + '2017-01-06', '2017-01-07'])) + result = x.rolling(pd.Timedelta("2d"), min_periods=0).sum() + expected = pd.Series(0.0, index=x.index) + tm.assert_series_equal(result, expected) + def test_multi_index_names(self): # GH 16789, 16825 @@ -548,6 +570,19 @@ def test_empty_df_expanding(self, expander): index=pd.DatetimeIndex([])).expanding(expander).sum() tm.assert_frame_equal(result, expected) + def test_missing_minp_zero(self): + # https://github.com/pandas-dev/pandas/pull/18921 + # minp=0 + x = pd.Series([np.nan]) + result = x.expanding(min_periods=0).sum() + expected = pd.Series([0.0]) + tm.assert_series_equal(result, expected) + + # minp=1 + result = x.expanding(min_periods=1).sum() + expected = pd.Series([np.nan]) + tm.assert_series_equal(result, expected) + class TestEWM(Base): @@ -864,7 +899,8 @@ def test_centered_axis_validation(self): .rolling(window=3, center=True, axis=2).mean()) def test_rolling_sum(self): - self._check_moment_func(mom.rolling_sum, np.sum, name='sum') + self._check_moment_func(mom.rolling_sum, np.nansum, name='sum', + zero_min_periods_equal=False) def test_rolling_count(self): counter = lambda x: np.isfinite(x).astype(float).sum() @@ -1349,14 +1385,18 @@ def test_fperr_robustness(self): def _check_moment_func(self, f, static_comp, name=None, window=50, has_min_periods=True, has_center=True, has_time_rule=True, preserve_nan=True, - fill_value=None, test_stable=False, **kwargs): + fill_value=None, test_stable=False, + zero_min_periods_equal=True, + **kwargs): with warnings.catch_warnings(record=True): self._check_ndarray(f, static_comp, window=window, has_min_periods=has_min_periods, preserve_nan=preserve_nan, has_center=has_center, fill_value=fill_value, - test_stable=test_stable, **kwargs) + test_stable=test_stable, + zero_min_periods_equal=zero_min_periods_equal, + **kwargs) with warnings.catch_warnings(record=True): self._check_structures(f, static_comp, @@ -1375,7 +1415,8 @@ def _check_moment_func(self, f, static_comp, name=None, window=50, def _check_ndarray(self, f, static_comp, window=50, has_min_periods=True, preserve_nan=True, has_center=True, fill_value=None, - test_stable=False, test_window=True, **kwargs): + test_stable=False, test_window=True, + zero_min_periods_equal=True, **kwargs): def get_result(arr, window, min_periods=None, center=False): return f(arr, window, min_periods=min_periods, center=center, ** kwargs) @@ -1408,10 +1449,11 @@ def get_result(arr, window, min_periods=None, center=False): assert isna(result[3]) assert notna(result[4]) - # min_periods=0 - result0 = get_result(arr, 20, min_periods=0) - result1 = get_result(arr, 20, min_periods=1) - tm.assert_almost_equal(result0, result1) + if zero_min_periods_equal: + # min_periods=0 may be equivalent to min_periods=1 + result0 = get_result(arr, 20, min_periods=0) + result1 = get_result(arr, 20, min_periods=1) + tm.assert_almost_equal(result0, result1) else: result = get_result(arr, 50) tm.assert_almost_equal(result[-1], static_comp(arr[10:-10])) @@ -2491,6 +2533,14 @@ def test_rolling_corr_pairwise(self): self._check_pairwise_moment('rolling', 'corr', window=10, min_periods=5) + @pytest.mark.parametrize('window', range(7)) + def test_rolling_corr_with_zero_variance(self, window): + # GH 18430 + s = pd.Series(np.zeros(20)) + other = pd.Series(np.arange(20)) + + assert s.rolling(window=window).corr(other=other).isna().all() + def _check_pairwise_moment(self, dispatch, name, **kwargs): def get_result(obj, obj2=None): return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2) @@ -2979,6 +3029,16 @@ def test_rolling_kurt_edge_cases(self): x = d.rolling(window=4).kurt() tm.assert_series_equal(expected, x) + def test_rolling_skew_eq_value_fperr(self): + # #18804 all rolling skew for all equal values should return Nan + a = pd.Series([1.1] * 15).rolling(window=10).skew() + assert np.isnan(a).all() + + def test_rolling_kurt_eq_value_fperr(self): + # #18804 all rolling kurt for all equal values should return Nan + a = pd.Series([1.1] * 15).rolling(window=10).kurt() + assert np.isnan(a).all() + def _check_expanding_ndarray(self, func, static_comp, has_min_periods=True, has_time_rule=True, preserve_nan=True): result = func(self.arr) diff --git a/pandas/tests/tseries/test_timezones.py b/pandas/tests/tseries/test_timezones.py index aa8fe90ea6500..823e22c4f87d1 100644 --- a/pandas/tests/tseries/test_timezones.py +++ b/pandas/tests/tseries/test_timezones.py @@ -13,7 +13,7 @@ import pandas.util.testing as tm import pandas.tseries.offsets as offsets -from pandas.compat import lrange, zip +from pandas.compat import lrange, zip, PY3 from pandas.core.indexes.datetimes import bdate_range, date_range from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas._libs import tslib @@ -70,7 +70,7 @@ def test_utc_to_local_no_modify(self): rng_eastern = rng.tz_convert(self.tzstr('US/Eastern')) # Values are unmodified - assert np.array_equal(rng.asi8, rng_eastern.asi8) + tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8) assert self.cmptz(rng_eastern.tz, self.tz('US/Eastern')) @@ -108,7 +108,7 @@ def test_localize_utc_conversion_explicit(self): rng = date_range('3/10/2012', '3/11/2012', freq='30T') converted = rng.tz_localize(self.tz('US/Eastern')) expected_naive = rng + offsets.Hour(5) - assert np.array_equal(converted.asi8, expected_naive.asi8) + tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8) # DST ambiguity, this should fail rng = date_range('3/11/2012', '3/12/2012', freq='30T') @@ -424,7 +424,7 @@ def test_with_tz(self): # datetimes with tzinfo set dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), - '1/1/2009', tz=pytz.utc) + datetime(2009, 1, 1, tzinfo=pytz.utc)) pytest.raises(Exception, bdate_range, datetime(2005, 1, 1, tzinfo=pytz.utc), '1/1/2009', @@ -1278,16 +1278,22 @@ def test_replace_tzinfo(self): result_dt = dt.replace(tzinfo=tzinfo) result_pd = Timestamp(dt).replace(tzinfo=tzinfo) - if hasattr(result_dt, 'timestamp'): # New method in Py 3.3 - assert result_dt.timestamp() == result_pd.timestamp() + if PY3: + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + assert result_dt.timestamp() == result_pd.timestamp() + assert result_dt == result_pd assert result_dt == result_pd.to_pydatetime() result_dt = dt.replace(tzinfo=tzinfo).replace(tzinfo=None) result_pd = Timestamp(dt).replace(tzinfo=tzinfo).replace(tzinfo=None) - if hasattr(result_dt, 'timestamp'): # New method in Py 3.3 - assert result_dt.timestamp() == result_pd.timestamp() + if PY3: + # datetime.timestamp() converts in the local timezone + with tm.set_timezone('UTC'): + assert result_dt.timestamp() == result_pd.timestamp() + assert result_dt == result_pd assert result_dt == result_pd.to_pydatetime() diff --git a/pandas/tseries/converter.py b/pandas/tseries/converter.py index df603c4d880d8..26d3f3cb85edc 100644 --- a/pandas/tseries/converter.py +++ b/pandas/tseries/converter.py @@ -1,6 +1,7 @@ # flake8: noqa +import warnings -from pandas.plotting._converter import (register, time2num, +from pandas.plotting._converter import (time2num, TimeConverter, TimeFormatter, PeriodConverter, get_datevalue, DatetimeConverter, @@ -9,3 +10,11 @@ MilliSecondLocator, get_finder, TimeSeries_DateLocator, TimeSeries_DateFormatter) + + +def register(): + from pandas.plotting._converter import register as register_ + msg = ("'pandas.tseries.converter.register' has been moved and renamed to " + "'pandas.plotting.register_matplotlib_converters'. ") + warnings.warn(msg, FutureWarning, stacklevel=2) + register_() diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 730d2782e85d2..b6fc9c78d6476 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1074,8 +1074,12 @@ def assert_categorical_equal(left, right, check_dtype=True, def raise_assert_detail(obj, message, left, right, diff=None): if isinstance(left, np.ndarray): left = pprint_thing(left) + elif is_categorical_dtype(left): + left = repr(left) if isinstance(right, np.ndarray): right = pprint_thing(right) + elif is_categorical_dtype(right): + right = repr(right) msg = """{obj} are different @@ -2857,3 +2861,31 @@ def setTZ(tz): yield finally: setTZ(orig_tz) + + +def _make_skipna_wrapper(alternative, skipna_alternative=None): + """Create a function for calling on an array. + + Parameters + ---------- + alternative : function + The function to be called on the array with no NaNs. + Only used when 'skipna_alternative' is None. + skipna_alternative : function + The function to be called on the original array + + Returns + ------- + skipna_wrapper : function + """ + if skipna_alternative: + def skipna_wrapper(x): + return skipna_alternative(x.values) + else: + def skipna_wrapper(x): + nona = x.dropna() + if len(nona) == 0: + return np.nan + return alternative(nona) + + return skipna_wrapper diff --git a/scripts/convert_deps.py b/scripts/convert_deps.py new file mode 100644 index 0000000000000..aabeb24a0c3c8 --- /dev/null +++ b/scripts/convert_deps.py @@ -0,0 +1,29 @@ +""" +Convert the conda environment.yaml to a pip requirements.txt +""" +import yaml + +exclude = {'python=3'} +rename = {'pytables': 'tables'} + +with open("ci/environment-dev.yaml") as f: + dev = yaml.load(f) + +with open("ci/requirements-optional-conda.txt") as f: + optional = [x.strip() for x in f.readlines()] + +required = dev['dependencies'] +required = [rename.get(dep, dep) for dep in required if dep not in exclude] +optional = [rename.get(dep, dep) for dep in optional if dep not in exclude] + + +with open("ci/requirements_dev.txt", 'wt') as f: + f.write("# This file was autogenerated by scripts/convert_deps.py\n") + f.write("# Do not modify directly\n") + f.write('\n'.join(required)) + + +with open("ci/requirements-optional-pip.txt", 'wt') as f: + f.write("# This file was autogenerated by scripts/convert_deps.py\n") + f.write("# Do not modify directly\n") + f.write("\n".join(optional)) diff --git a/setup.cfg b/setup.cfg index 0123078523b6f..7a88ee8557dc7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -12,7 +12,7 @@ tag_prefix = v parentdir_prefix = pandas- [flake8] -ignore = E731,E402 +ignore = E731,E402,W503 max-line-length = 79 [yapf] diff --git a/setup.py b/setup.py index 158ee9493b6ac..0fea6f5641475 100755 --- a/setup.py +++ b/setup.py @@ -716,6 +716,7 @@ def pxd(name): 'parser/data/*.bz2', 'parser/data/*.txt', 'parser/data/*.tar', + 'parser/data/*.zip', 'parser/data/*.tar.gz', 'sas/data/*.csv', 'sas/data/*.xpt',
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19323
2018-01-20T10:43:07Z
2018-01-21T15:07:22Z
null
2018-01-21T15:07:35Z
Separate test_numeric_compat into method-specific tests
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index 1ce8ade50c071..3de1c4c982654 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -29,20 +29,21 @@ def full_like(array, value): class Numeric(Base): def test_numeric_compat(self): + pass # override Base method + def test_mul_int(self): idx = self.create_index() - didx = idx * idx - result = idx * 1 tm.assert_index_equal(result, idx) + def test_rmul_int(self): + idx = self.create_index() + result = 1 * idx tm.assert_index_equal(result, idx) - # in general not true for RangeIndex - if not isinstance(idx, RangeIndex): - result = idx * idx - tm.assert_index_equal(result, idx ** 2) + def test_div_int(self): + idx = self.create_index() # truediv under PY3 result = idx / 1 @@ -57,9 +58,16 @@ def test_numeric_compat(self): expected = Index(idx.values / 2) tm.assert_index_equal(result, expected) + def test_floordiv_int(self): + idx = self.create_index() + result = idx // 1 tm.assert_index_equal(result, idx) + def test_mul_int_array(self): + idx = self.create_index() + didx = idx * idx + result = idx * np.array(5, dtype='int64') tm.assert_index_equal(result, idx * 5) @@ -67,19 +75,45 @@ def test_numeric_compat(self): result = idx * np.arange(5, dtype=arr_dtype) tm.assert_index_equal(result, didx) + def test_mul_int_series(self): + idx = self.create_index() + didx = idx * idx + + arr_dtype = 'uint64' if isinstance(idx, UInt64Index) else 'int64' result = idx * Series(np.arange(5, dtype=arr_dtype)) tm.assert_series_equal(result, Series(didx)) + def test_mul_float_series(self): + idx = self.create_index() rng5 = np.arange(5, dtype='float64') + result = idx * Series(rng5 + 0.1) expected = Series(rng5 * (rng5 + 0.1)) tm.assert_series_equal(result, expected) - # invalid - pytest.raises(TypeError, - lambda: idx * date_range('20130101', periods=5)) - pytest.raises(ValueError, lambda: idx * idx[0:3]) - pytest.raises(ValueError, lambda: idx * np.array([1, 2])) + def test_mul_index(self): + idx = self.create_index() + + # in general not true for RangeIndex + if not isinstance(idx, RangeIndex): + result = idx * idx + tm.assert_index_equal(result, idx ** 2) + + def test_mul_datelike_raises(self): + idx = self.create_index() + with pytest.raises(TypeError): + idx * date_range('20130101', periods=5) + + def test_mul_size_mismatch_raises(self): + idx = self.create_index() + + with pytest.raises(ValueError): + idx * idx[0:3] + with pytest.raises(ValueError): + idx * np.array([1, 2]) + + def test_divmod(self): + idx = self.create_index() result = divmod(idx, 2) with np.errstate(all='ignore'): @@ -95,15 +129,22 @@ def test_numeric_compat(self): for r, e in zip(result, expected): tm.assert_index_equal(r, e) + def test_pow_float(self): # test power calculations both ways, GH 14973 - expected = pd.Float64Index(2.0**idx.values) - result = 2.0**idx - tm.assert_index_equal(result, expected) + idx = self.create_index() expected = pd.Float64Index(idx.values**2.0) result = idx**2.0 tm.assert_index_equal(result, expected) + def test_rpow_float(self): + # test power calculations both ways, GH 14973 + idx = self.create_index() + + expected = pd.Float64Index(2.0**idx.values) + result = 2.0**idx + tm.assert_index_equal(result, expected) + @pytest.mark.xfail(reason='GH#19252 Series has no __rdivmod__') def test_divmod_series(self): idx = self.create_index()
A more reasonably-scoped attempt at #19255. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19321
2018-01-20T01:07:20Z
2018-01-21T15:15:24Z
2018-01-21T15:15:24Z
2018-01-21T18:36:35Z
Centralize and de-duplicate comparison and arith tests
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py new file mode 100644 index 0000000000000..3f4e3877a276a --- /dev/null +++ b/pandas/tests/frame/test_arithmetic.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +import numpy as np + +import pandas as pd +import pandas.util.testing as tm + + +class TestPeriodFrameArithmetic(object): + + def test_ops_frame_period(self): + # GH 13043 + df = pd.DataFrame({'A': [pd.Period('2015-01', freq='M'), + pd.Period('2015-02', freq='M')], + 'B': [pd.Period('2014-01', freq='M'), + pd.Period('2014-02', freq='M')]}) + assert df['A'].dtype == object + assert df['B'].dtype == object + + p = pd.Period('2015-03', freq='M') + # dtype will be object because of original dtype + exp = pd.DataFrame({'A': np.array([2, 1], dtype=object), + 'B': np.array([14, 13], dtype=object)}) + tm.assert_frame_equal(p - df, exp) + tm.assert_frame_equal(df - p, -exp) + + df2 = pd.DataFrame({'A': [pd.Period('2015-05', freq='M'), + pd.Period('2015-06', freq='M')], + 'B': [pd.Period('2015-05', freq='M'), + pd.Period('2015-06', freq='M')]}) + assert df2['A'].dtype == object + assert df2['B'].dtype == object + + exp = pd.DataFrame({'A': np.array([4, 4], dtype=object), + 'B': np.array([16, 16], dtype=object)}) + tm.assert_frame_equal(df2 - df, exp) + tm.assert_frame_equal(df - df2, -exp) diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index 011b33a4d6f35..480f025db17ca 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -1,12 +1,14 @@ # -*- coding: utf-8 -*- import warnings from datetime import datetime, timedelta +import operator import pytest import numpy as np import pandas as pd +from pandas.compat.numpy import np_datetime64_compat import pandas.util.testing as tm from pandas.errors import PerformanceWarning from pandas import (Timestamp, Timedelta, Series, @@ -41,6 +43,187 @@ def addend(request): return request.param +class TestDatetimeIndexComparisons(object): + # TODO: De-duplicate with test_comparisons_nat below + def test_dti_cmp_nat(self): + left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT, + pd.Timestamp('2011-01-03')]) + right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]) + + for lhs, rhs in [(left, right), + (left.astype(object), right.astype(object))]: + result = rhs == lhs + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = lhs != rhs + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs == pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT == rhs, expected) + + expected = np.array([True, True, True]) + tm.assert_numpy_array_equal(lhs != pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT != lhs, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs < pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT > lhs, expected) + + @pytest.mark.parametrize('op', [operator.eq, operator.ne, + operator.gt, operator.ge, + operator.lt, operator.le]) + def test_comparison_tzawareness_compat(self, op): + # GH#18162 + dr = pd.date_range('2016-01-01', periods=6) + dz = dr.tz_localize('US/Pacific') + + with pytest.raises(TypeError): + op(dr, dz) + with pytest.raises(TypeError): + op(dr, list(dz)) + with pytest.raises(TypeError): + op(dz, dr) + with pytest.raises(TypeError): + op(dz, list(dr)) + + # Check that there isn't a problem aware-aware and naive-naive do not + # raise + assert (dr == dr).all() + assert (dr == list(dr)).all() + assert (dz == dz).all() + assert (dz == list(dz)).all() + + # Check comparisons against scalar Timestamps + ts = pd.Timestamp('2000-03-14 01:59') + ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam') + + assert (dr > ts).all() + with pytest.raises(TypeError): + op(dr, ts_tz) + + assert (dz > ts_tz).all() + with pytest.raises(TypeError): + op(dz, ts) + + @pytest.mark.parametrize('op', [operator.eq, operator.ne, + operator.gt, operator.ge, + operator.lt, operator.le]) + def test_nat_comparison_tzawareness(self, op): + # GH#19276 + # tzaware DatetimeIndex should not raise when compared to NaT + dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT, + '2014-05-01', '2014-07-01']) + expected = np.array([op == operator.ne] * len(dti)) + result = op(dti, pd.NaT) + tm.assert_numpy_array_equal(result, expected) + + result = op(dti.tz_localize('US/Pacific'), pd.NaT) + tm.assert_numpy_array_equal(result, expected) + + def test_comparisons_coverage(self): + rng = date_range('1/1/2000', periods=10) + + # raise TypeError for now + pytest.raises(TypeError, rng.__lt__, rng[3].value) + + result = rng == list(rng) + exp = rng == rng + tm.assert_numpy_array_equal(result, exp) + + def test_comparisons_nat(self): + + fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0]) + fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0]) + + didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT, + '2014-05-01', '2014-07-01']) + didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT, + '2014-06-01', '2014-07-01']) + darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'), + np_datetime64_compat('2014-03-01 00:00Z'), + np_datetime64_compat('nat'), np.datetime64('nat'), + np_datetime64_compat('2014-06-01 00:00Z'), + np_datetime64_compat('2014-07-01 00:00Z')]) + + cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)] + + # Check pd.NaT is handles as the same as np.nan + with tm.assert_produces_warning(None): + for idx1, idx2 in cases: + + result = idx1 < idx2 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 > idx1 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 <= idx2 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 >= idx1 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 == idx2 + expected = np.array([False, False, False, False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 != idx2 + expected = np.array([True, True, True, True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + with tm.assert_produces_warning(None): + for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]: + result = idx1 < val + expected = np.array([False, False, False, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + result = idx1 > val + tm.assert_numpy_array_equal(result, expected) + + result = idx1 <= val + tm.assert_numpy_array_equal(result, expected) + result = idx1 >= val + tm.assert_numpy_array_equal(result, expected) + + result = idx1 == val + tm.assert_numpy_array_equal(result, expected) + + result = idx1 != val + expected = np.array([True, True, True, True, True, True]) + tm.assert_numpy_array_equal(result, expected) + + # Check pd.NaT is handles as the same as np.nan + with tm.assert_produces_warning(None): + for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]: + result = idx1 < val + expected = np.array([True, False, False, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + result = idx1 > val + expected = np.array([False, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 <= val + expected = np.array([True, False, True, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + result = idx1 >= val + expected = np.array([False, False, True, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 == val + expected = np.array([False, False, True, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 != val + expected = np.array([True, True, False, True, True, True]) + tm.assert_numpy_array_equal(result, expected) + + class TestDatetimeIndexArithmetic(object): def test_dti_add_timestamp_raises(self): diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index e3ebb8769db02..49f94bfa65543 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -1,4 +1,3 @@ -import operator import pytest @@ -9,7 +8,6 @@ import pandas as pd import pandas.util.testing as tm from pandas.compat import lrange -from pandas.compat.numpy import np_datetime64_compat from pandas import (DatetimeIndex, Index, date_range, DataFrame, Timestamp, offsets) @@ -250,157 +248,6 @@ def test_append_join_nondatetimeindex(self): # it works rng.join(idx, how='outer') - @pytest.mark.parametrize('op', [operator.eq, operator.ne, - operator.gt, operator.ge, - operator.lt, operator.le]) - def test_comparison_tzawareness_compat(self, op): - # GH#18162 - dr = pd.date_range('2016-01-01', periods=6) - dz = dr.tz_localize('US/Pacific') - - with pytest.raises(TypeError): - op(dr, dz) - with pytest.raises(TypeError): - op(dr, list(dz)) - with pytest.raises(TypeError): - op(dz, dr) - with pytest.raises(TypeError): - op(dz, list(dr)) - - # Check that there isn't a problem aware-aware and naive-naive do not - # raise - assert (dr == dr).all() - assert (dr == list(dr)).all() - assert (dz == dz).all() - assert (dz == list(dz)).all() - - # Check comparisons against scalar Timestamps - ts = pd.Timestamp('2000-03-14 01:59') - ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam') - - assert (dr > ts).all() - with pytest.raises(TypeError): - op(dr, ts_tz) - - assert (dz > ts_tz).all() - with pytest.raises(TypeError): - op(dz, ts) - - @pytest.mark.parametrize('op', [operator.eq, operator.ne, - operator.gt, operator.ge, - operator.lt, operator.le]) - def test_nat_comparison_tzawareness(self, op): - # GH#19276 - # tzaware DatetimeIndex should not raise when compared to NaT - dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT, - '2014-05-01', '2014-07-01']) - expected = np.array([op == operator.ne] * len(dti)) - result = op(dti, pd.NaT) - tm.assert_numpy_array_equal(result, expected) - - result = op(dti.tz_localize('US/Pacific'), pd.NaT) - tm.assert_numpy_array_equal(result, expected) - - def test_comparisons_coverage(self): - rng = date_range('1/1/2000', periods=10) - - # raise TypeError for now - pytest.raises(TypeError, rng.__lt__, rng[3].value) - - result = rng == list(rng) - exp = rng == rng - tm.assert_numpy_array_equal(result, exp) - - def test_comparisons_nat(self): - - fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0]) - fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0]) - - didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT, - '2014-05-01', '2014-07-01']) - didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT, - '2014-06-01', '2014-07-01']) - darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'), - np_datetime64_compat('2014-03-01 00:00Z'), - np_datetime64_compat('nat'), np.datetime64('nat'), - np_datetime64_compat('2014-06-01 00:00Z'), - np_datetime64_compat('2014-07-01 00:00Z')]) - - cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)] - - # Check pd.NaT is handles as the same as np.nan - with tm.assert_produces_warning(None): - for idx1, idx2 in cases: - - result = idx1 < idx2 - expected = np.array([True, False, False, False, True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = idx2 > idx1 - expected = np.array([True, False, False, False, True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = idx1 <= idx2 - expected = np.array([True, False, False, False, True, True]) - tm.assert_numpy_array_equal(result, expected) - - result = idx2 >= idx1 - expected = np.array([True, False, False, False, True, True]) - tm.assert_numpy_array_equal(result, expected) - - result = idx1 == idx2 - expected = np.array([False, False, False, False, False, True]) - tm.assert_numpy_array_equal(result, expected) - - result = idx1 != idx2 - expected = np.array([True, True, True, True, True, False]) - tm.assert_numpy_array_equal(result, expected) - - with tm.assert_produces_warning(None): - for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]: - result = idx1 < val - expected = np.array([False, False, False, False, False, False]) - tm.assert_numpy_array_equal(result, expected) - result = idx1 > val - tm.assert_numpy_array_equal(result, expected) - - result = idx1 <= val - tm.assert_numpy_array_equal(result, expected) - result = idx1 >= val - tm.assert_numpy_array_equal(result, expected) - - result = idx1 == val - tm.assert_numpy_array_equal(result, expected) - - result = idx1 != val - expected = np.array([True, True, True, True, True, True]) - tm.assert_numpy_array_equal(result, expected) - - # Check pd.NaT is handles as the same as np.nan - with tm.assert_produces_warning(None): - for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]: - result = idx1 < val - expected = np.array([True, False, False, False, False, False]) - tm.assert_numpy_array_equal(result, expected) - result = idx1 > val - expected = np.array([False, False, False, False, True, True]) - tm.assert_numpy_array_equal(result, expected) - - result = idx1 <= val - expected = np.array([True, False, True, False, False, False]) - tm.assert_numpy_array_equal(result, expected) - result = idx1 >= val - expected = np.array([False, False, True, False, True, True]) - tm.assert_numpy_array_equal(result, expected) - - result = idx1 == val - expected = np.array([False, False, True, False, False, False]) - tm.assert_numpy_array_equal(result, expected) - - result = idx1 != val - expected = np.array([True, True, False, True, True, True]) - tm.assert_numpy_array_equal(result, expected) - def test_map(self): rng = date_range('1/1/2000', periods=10) diff --git a/pandas/tests/indexes/datetimes/test_datetimelike.py b/pandas/tests/indexes/datetimes/test_datetimelike.py index 538e10e6011ec..9d6d27ecb4b6f 100644 --- a/pandas/tests/indexes/datetimes/test_datetimelike.py +++ b/pandas/tests/indexes/datetimes/test_datetimelike.py @@ -21,28 +21,7 @@ def create_index(self): return date_range('20130101', periods=5) def test_shift(self): - - # test shift for datetimeIndex and non datetimeIndex - # GH8083 - - drange = self.create_index() - result = drange.shift(1) - expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04', - '2013-01-05', - '2013-01-06'], freq='D') - tm.assert_index_equal(result, expected) - - result = drange.shift(-1) - expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02', - '2013-01-03', '2013-01-04'], - freq='D') - tm.assert_index_equal(result, expected) - - result = drange.shift(3, freq='2D') - expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09', - '2013-01-10', - '2013-01-11'], freq='D') - tm.assert_index_equal(result, expected) + pass # handled in test_ops def test_pickle_compat_construction(self): pass diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index a2a84adbf46c1..a91dbd905e12c 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -385,33 +385,6 @@ def test_resolution(self): tz=tz) assert idx.resolution == expected - def test_comp_nat(self): - left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT, - pd.Timestamp('2011-01-03')]) - right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]) - - for lhs, rhs in [(left, right), - (left.astype(object), right.astype(object))]: - result = rhs == lhs - expected = np.array([False, False, True]) - tm.assert_numpy_array_equal(result, expected) - - result = lhs != rhs - expected = np.array([True, True, False]) - tm.assert_numpy_array_equal(result, expected) - - expected = np.array([False, False, False]) - tm.assert_numpy_array_equal(lhs == pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT == rhs, expected) - - expected = np.array([True, True, True]) - tm.assert_numpy_array_equal(lhs != pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT != lhs, expected) - - expected = np.array([False, False, False]) - tm.assert_numpy_array_equal(lhs < pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT > lhs, expected) - def test_value_counts_unique(self): # GH 7735 for tz in self.tz: @@ -617,6 +590,29 @@ def test_shift(self): '2011-01-01 09:00'], name='xxx', tz=tz) tm.assert_index_equal(idx.shift(-3, freq='H'), exp) + # TODO: moved from test_datetimelike; de-duplicate with test_shift above + def test_shift2(self): + # test shift for datetimeIndex and non datetimeIndex + # GH8083 + drange = pd.date_range('20130101', periods=5) + result = drange.shift(1) + expected = pd.DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04', + '2013-01-05', + '2013-01-06'], freq='D') + tm.assert_index_equal(result, expected) + + result = drange.shift(-1) + expected = pd.DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02', + '2013-01-03', '2013-01-04'], + freq='D') + tm.assert_index_equal(result, expected) + + result = drange.shift(3, freq='2D') + expected = pd.DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09', + '2013-01-10', + '2013-01-11'], freq='D') + tm.assert_index_equal(result, expected) + def test_nat(self): assert pd.DatetimeIndex._na_value is pd.NaT assert pd.DatetimeIndex([])._na_value is pd.NaT diff --git a/pandas/tests/indexes/period/test_ops.py b/pandas/tests/indexes/period/test_ops.py index a78bc6fc577b8..21a9ffdde3444 100644 --- a/pandas/tests/indexes/period/test_ops.py +++ b/pandas/tests/indexes/period/test_ops.py @@ -6,7 +6,7 @@ import pandas._libs.tslib as tslib import pandas.util.testing as tm import pandas.core.indexes.period as period -from pandas import (DatetimeIndex, PeriodIndex, period_range, Series, Period, +from pandas import (DatetimeIndex, PeriodIndex, Series, Period, _np_version_under1p10, Index) from pandas.tests.test_base import Ops @@ -285,33 +285,6 @@ def test_resolution(self): idx = pd.period_range(start='2013-04-01', periods=30, freq=freq) assert idx.resolution == expected - def test_comp_nat(self): - left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT, - pd.Period('2011-01-03')]) - right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')]) - - for lhs, rhs in [(left, right), - (left.astype(object), right.astype(object))]: - result = lhs == rhs - expected = np.array([False, False, True]) - tm.assert_numpy_array_equal(result, expected) - - result = lhs != rhs - expected = np.array([True, True, False]) - tm.assert_numpy_array_equal(result, expected) - - expected = np.array([False, False, False]) - tm.assert_numpy_array_equal(lhs == pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT == rhs, expected) - - expected = np.array([True, True, True]) - tm.assert_numpy_array_equal(lhs != pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT != lhs, expected) - - expected = np.array([False, False, False]) - tm.assert_numpy_array_equal(lhs < pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT > lhs, expected) - def test_value_counts_unique(self): # GH 7735 idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10) @@ -732,77 +705,6 @@ def test_pi_comp_period_nat(self): self._check(idx, f, exp) -class TestSeriesPeriod(object): - - def setup_method(self, method): - self.series = Series(period_range('2000-01-01', periods=10, freq='D')) - - def test_ops_series_timedelta(self): - # GH 13043 - s = pd.Series([pd.Period('2015-01-01', freq='D'), - pd.Period('2015-01-02', freq='D')], name='xxx') - assert s.dtype == object - - exp = pd.Series([pd.Period('2015-01-02', freq='D'), - pd.Period('2015-01-03', freq='D')], name='xxx') - tm.assert_series_equal(s + pd.Timedelta('1 days'), exp) - tm.assert_series_equal(pd.Timedelta('1 days') + s, exp) - - tm.assert_series_equal(s + pd.tseries.offsets.Day(), exp) - tm.assert_series_equal(pd.tseries.offsets.Day() + s, exp) - - def test_ops_series_period(self): - # GH 13043 - s = pd.Series([pd.Period('2015-01-01', freq='D'), - pd.Period('2015-01-02', freq='D')], name='xxx') - assert s.dtype == object - - p = pd.Period('2015-01-10', freq='D') - # dtype will be object because of original dtype - exp = pd.Series([9, 8], name='xxx', dtype=object) - tm.assert_series_equal(p - s, exp) - tm.assert_series_equal(s - p, -exp) - - s2 = pd.Series([pd.Period('2015-01-05', freq='D'), - pd.Period('2015-01-04', freq='D')], name='xxx') - assert s2.dtype == object - - exp = pd.Series([4, 2], name='xxx', dtype=object) - tm.assert_series_equal(s2 - s, exp) - tm.assert_series_equal(s - s2, -exp) - - -class TestFramePeriod(object): - - def test_ops_frame_period(self): - # GH 13043 - df = pd.DataFrame({'A': [pd.Period('2015-01', freq='M'), - pd.Period('2015-02', freq='M')], - 'B': [pd.Period('2014-01', freq='M'), - pd.Period('2014-02', freq='M')]}) - assert df['A'].dtype == object - assert df['B'].dtype == object - - p = pd.Period('2015-03', freq='M') - # dtype will be object because of original dtype - exp = pd.DataFrame({'A': np.array([2, 1], dtype=object), - 'B': np.array([14, 13], dtype=object)}) - tm.assert_frame_equal(p - df, exp) - tm.assert_frame_equal(df - p, -exp) - - df2 = pd.DataFrame({'A': [pd.Period('2015-05', freq='M'), - pd.Period('2015-06', freq='M')], - 'B': [pd.Period('2015-05', freq='M'), - pd.Period('2015-06', freq='M')]}) - assert df2['A'].dtype == object - assert df2['B'].dtype == object - - exp = pd.DataFrame({'A': np.array([4, 4], dtype=object), - 'B': np.array([16, 16], dtype=object)}) - tm.assert_frame_equal(df2 - df, exp) - tm.assert_frame_equal(df - df2, -exp) - - class TestPeriodIndexComparisons(object): def test_pi_pi_comp(self): @@ -942,3 +844,31 @@ def test_pi_nat_comp(self): with tm.assert_raises_regex( period.IncompatibleFrequency, msg): idx1 == diff + + # TODO: De-duplicate with test_pi_nat_comp + def test_comp_nat(self): + left = pd.PeriodIndex([pd.Period('2011-01-01'), pd.NaT, + pd.Period('2011-01-03')]) + right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period('2011-01-03')]) + + for lhs, rhs in [(left, right), + (left.astype(object), right.astype(object))]: + result = lhs == rhs + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = lhs != rhs + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs == pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT == rhs, expected) + + expected = np.array([True, True, True]) + tm.assert_numpy_array_equal(lhs != pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT != lhs, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs < pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT > lhs, expected) diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 44f48f3ea9833..1a6aabc2f258f 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -26,9 +26,120 @@ def freq(request): return request.param +class TestTimedeltaIndexComparisons(object): + def test_tdi_cmp_str_invalid(self): + # GH 13624 + tdi = TimedeltaIndex(['1 day', '2 days']) + + for left, right in [(tdi, 'a'), ('a', tdi)]: + with pytest.raises(TypeError): + left > right + + with pytest.raises(TypeError): + left == right + + with pytest.raises(TypeError): + left != right + + def test_comparisons_coverage(self): + rng = timedelta_range('1 days', periods=10) + + result = rng < rng[3] + exp = np.array([True, True, True] + [False] * 7) + tm.assert_numpy_array_equal(result, exp) + + # raise TypeError for now + pytest.raises(TypeError, rng.__lt__, rng[3].value) + + result = rng == list(rng) + exp = rng == rng + tm.assert_numpy_array_equal(result, exp) + + def test_comp_nat(self): + left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT, + pd.Timedelta('3 days')]) + right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')]) + + for lhs, rhs in [(left, right), + (left.astype(object), right.astype(object))]: + result = rhs == lhs + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = rhs != lhs + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs == pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT == rhs, expected) + + expected = np.array([True, True, True]) + tm.assert_numpy_array_equal(lhs != pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT != lhs, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs < pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT > lhs, expected) + + def test_comparisons_nat(self): + tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT, + '1 day 00:00:01', '5 day 00:00:03']) + tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT, + '1 day 00:00:02', '5 days 00:00:03']) + tdarr = np.array([np.timedelta64(2, 'D'), + np.timedelta64(2, 'D'), np.timedelta64('nat'), + np.timedelta64('nat'), + np.timedelta64(1, 'D') + np.timedelta64(2, 's'), + np.timedelta64(5, 'D') + np.timedelta64(3, 's')]) + + cases = [(tdidx1, tdidx2), (tdidx1, tdarr)] + + # Check pd.NaT is handles as the same as np.nan + for idx1, idx2 in cases: + + result = idx1 < idx2 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 > idx1 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 <= idx2 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 >= idx1 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 == idx2 + expected = np.array([False, False, False, False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 != idx2 + expected = np.array([True, True, True, True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + class TestTimedeltaIndexArithmetic(object): _holder = TimedeltaIndex + # ------------------------------------------------------------- + # Invalid Operations + + def test_tdi_add_str_invalid(self): + # GH 13624 + tdi = TimedeltaIndex(['1 day', '2 days']) + + with pytest.raises(TypeError): + tdi + 'a' + with pytest.raises(TypeError): + 'a' + tdi + + # ------------------------------------------------------------- + @pytest.mark.parametrize('box', [np.array, pd.Index]) def test_tdi_add_offset_array(self, box): # GH#18849 @@ -128,41 +239,68 @@ def test_tdi_with_offset_series(self, names): with tm.assert_produces_warning(PerformanceWarning): anchored - tdi - # TODO: Split by ops, better name - def test_numeric_compat(self): + def test_mul_int(self): idx = self._holder(np.arange(5, dtype='int64')) - didx = self._holder(np.arange(5, dtype='int64') ** 2) result = idx * 1 tm.assert_index_equal(result, idx) + def test_rmul_int(self): + idx = self._holder(np.arange(5, dtype='int64')) result = 1 * idx tm.assert_index_equal(result, idx) + def test_div_int(self): + idx = self._holder(np.arange(5, dtype='int64')) result = idx / 1 tm.assert_index_equal(result, idx) + def test_floordiv_int(self): + idx = self._holder(np.arange(5, dtype='int64')) result = idx // 1 tm.assert_index_equal(result, idx) + def test_mul_int_array_zerodim(self): + rng5 = np.arange(5, dtype='int64') + idx = self._holder(rng5) + expected = self._holder(rng5 * 5) result = idx * np.array(5, dtype='int64') - tm.assert_index_equal(result, - self._holder(np.arange(5, dtype='int64') * 5)) + tm.assert_index_equal(result, expected) + + def test_mul_int_array(self): + rng5 = np.arange(5, dtype='int64') + idx = self._holder(rng5) + didx = self._holder(rng5 ** 2) - result = idx * np.arange(5, dtype='int64') + result = idx * rng5 tm.assert_index_equal(result, didx) + def test_mul_int_series(self): + idx = self._holder(np.arange(5, dtype='int64')) + didx = self._holder(np.arange(5, dtype='int64') ** 2) + result = idx * Series(np.arange(5, dtype='int64')) + tm.assert_series_equal(result, Series(didx)) - rng5 = np.arange(5, dtype='float64') - result = idx * Series(rng5 + 0.1) - tm.assert_series_equal(result, - Series(self._holder(rng5 * (rng5 + 0.1)))) + def test_mul_float_series(self): + idx = self._holder(np.arange(5, dtype='int64')) + + rng5f = np.arange(5, dtype='float64') + result = idx * Series(rng5f + 0.1) + expected = Series(self._holder(rng5f * (rng5f + 0.1))) + tm.assert_series_equal(result, expected) + + def test_dti_mul_dti_raises(self): + idx = self._holder(np.arange(5, dtype='int64')) + with pytest.raises(TypeError): + idx * idx - # invalid - pytest.raises(TypeError, lambda: idx * idx) - pytest.raises(ValueError, lambda: idx * self._holder(np.arange(3))) - pytest.raises(ValueError, lambda: idx * np.array([1, 2])) + def test_dti_mul_too_short_raises(self): + idx = self._holder(np.arange(5, dtype='int64')) + with pytest.raises(ValueError): + idx * self._holder(np.arange(3)) + with pytest.raises(ValueError): + idx * np.array([1, 2]) def test_ufunc_coercions(self): # normal ops are also tested in tseries/test_timedeltas.py diff --git a/pandas/tests/indexes/timedeltas/test_ops.py b/pandas/tests/indexes/timedeltas/test_ops.py index 081e299caa876..112c62b7e2f8d 100644 --- a/pandas/tests/indexes/timedeltas/test_ops.py +++ b/pandas/tests/indexes/timedeltas/test_ops.py @@ -212,33 +212,6 @@ def test_summary(self): result = idx.summary() assert result == expected - def test_comp_nat(self): - left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT, - pd.Timedelta('3 days')]) - right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')]) - - for lhs, rhs in [(left, right), - (left.astype(object), right.astype(object))]: - result = rhs == lhs - expected = np.array([False, False, True]) - tm.assert_numpy_array_equal(result, expected) - - result = rhs != lhs - expected = np.array([True, True, False]) - tm.assert_numpy_array_equal(result, expected) - - expected = np.array([False, False, False]) - tm.assert_numpy_array_equal(lhs == pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT == rhs, expected) - - expected = np.array([True, True, True]) - tm.assert_numpy_array_equal(lhs != pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT != lhs, expected) - - expected = np.array([False, False, False]) - tm.assert_numpy_array_equal(lhs < pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT > lhs, expected) - def test_value_counts_unique(self): # GH 7735 @@ -493,23 +466,6 @@ def test_equals(self): class TestTimedeltas(object): _multiprocess_can_split_ = True - def test_ops_error_str(self): - # GH 13624 - tdi = TimedeltaIndex(['1 day', '2 days']) - - for l, r in [(tdi, 'a'), ('a', tdi)]: - with pytest.raises(TypeError): - l + r - - with pytest.raises(TypeError): - l > r - - with pytest.raises(TypeError): - l == r - - with pytest.raises(TypeError): - l != r - def test_timedelta_ops(self): # GH4984 # make sure ops return Timedelta @@ -564,18 +520,3 @@ def test_timedelta_ops(self): s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'), Timestamp('2015-02-15')]) assert s.diff().median() == timedelta(days=6) - - def test_compare_timedelta_series(self): - # regresssion test for GH5963 - s = pd.Series([timedelta(days=1), timedelta(days=2)]) - actual = s > timedelta(days=1) - expected = pd.Series([False, True]) - tm.assert_series_equal(actual, expected) - - def test_compare_timedelta_ndarray(self): - # GH11835 - periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')] - arr = np.array(periods) - result = arr[0] > arr - expected = np.array([False, False]) - tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 5a4d6dabbde3e..1af971e8a4326 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -203,61 +203,6 @@ def test_map(self): exp = Int64Index([f(x) for x in rng]) tm.assert_index_equal(result, exp) - def test_comparisons_nat(self): - - tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT, - '1 day 00:00:01', '5 day 00:00:03']) - tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT, - '1 day 00:00:02', '5 days 00:00:03']) - tdarr = np.array([np.timedelta64(2, 'D'), - np.timedelta64(2, 'D'), np.timedelta64('nat'), - np.timedelta64('nat'), - np.timedelta64(1, 'D') + np.timedelta64(2, 's'), - np.timedelta64(5, 'D') + np.timedelta64(3, 's')]) - - cases = [(tdidx1, tdidx2), (tdidx1, tdarr)] - - # Check pd.NaT is handles as the same as np.nan - for idx1, idx2 in cases: - - result = idx1 < idx2 - expected = np.array([True, False, False, False, True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = idx2 > idx1 - expected = np.array([True, False, False, False, True, False]) - tm.assert_numpy_array_equal(result, expected) - - result = idx1 <= idx2 - expected = np.array([True, False, False, False, True, True]) - tm.assert_numpy_array_equal(result, expected) - - result = idx2 >= idx1 - expected = np.array([True, False, False, False, True, True]) - tm.assert_numpy_array_equal(result, expected) - - result = idx1 == idx2 - expected = np.array([False, False, False, False, False, True]) - tm.assert_numpy_array_equal(result, expected) - - result = idx1 != idx2 - expected = np.array([True, True, True, True, True, False]) - tm.assert_numpy_array_equal(result, expected) - - def test_comparisons_coverage(self): - rng = timedelta_range('1 days', periods=10) - - result = rng < rng[3] - exp = np.array([True, True, True] + [False] * 7) - tm.assert_numpy_array_equal(result, exp) - - # raise TypeError for now - pytest.raises(TypeError, rng.__lt__, rng[3].value) - - result = rng == list(rng) - exp = rng == rng - tm.assert_numpy_array_equal(result, exp) - def test_total_seconds(self): # GH 10939 # test index diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index 8c574d8f8873b..64d4940082978 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -276,6 +276,14 @@ def test_comparison_object_array(self): assert res.shape == expected.shape assert (res == expected).all() + def test_compare_timedelta_ndarray(self): + # GH11835 + periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')] + arr = np.array(periods) + result = arr[0] > arr + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + class TestTimedeltas(object): _multiprocess_can_split_ = True diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py new file mode 100644 index 0000000000000..9db05ff590fed --- /dev/null +++ b/pandas/tests/series/test_arithmetic.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +from datetime import timedelta + +import pandas as pd +import pandas.util.testing as tm + + +class TestTimedeltaSeriesComparisons(object): + def test_compare_timedelta_series(self): + # regresssion test for GH5963 + s = pd.Series([timedelta(days=1), timedelta(days=2)]) + actual = s > timedelta(days=1) + expected = pd.Series([False, True]) + tm.assert_series_equal(actual, expected) + + +class TestPeriodSeriesArithmetic(object): + def test_ops_series_timedelta(self): + # GH 13043 + ser = pd.Series([pd.Period('2015-01-01', freq='D'), + pd.Period('2015-01-02', freq='D')], name='xxx') + assert ser.dtype == object + + expected = pd.Series([pd.Period('2015-01-02', freq='D'), + pd.Period('2015-01-03', freq='D')], name='xxx') + + result = ser + pd.Timedelta('1 days') + tm.assert_series_equal(result, expected) + + result = pd.Timedelta('1 days') + ser + tm.assert_series_equal(result, expected) + + result = ser + pd.tseries.offsets.Day() + tm.assert_series_equal(result, expected) + + result = pd.tseries.offsets.Day() + ser + tm.assert_series_equal(result, expected) + + def test_ops_series_period(self): + # GH 13043 + ser = pd.Series([pd.Period('2015-01-01', freq='D'), + pd.Period('2015-01-02', freq='D')], name='xxx') + assert ser.dtype == object + + per = pd.Period('2015-01-10', freq='D') + # dtype will be object because of original dtype + expected = pd.Series([9, 8], name='xxx', dtype=object) + tm.assert_series_equal(per - ser, expected) + tm.assert_series_equal(ser - per, -expected) + + s2 = pd.Series([pd.Period('2015-01-05', freq='D'), + pd.Period('2015-01-04', freq='D')], name='xxx') + assert s2.dtype == object + + expected = pd.Series([4, 2], name='xxx', dtype=object) + tm.assert_series_equal(s2 - ser, expected) + tm.assert_series_equal(ser - s2, -expected)
These are almost all cut/paste moving tests to the appropriate places. Two exceptions: - Some "TODO: de-duplicate this with that" notes (mostly to myself) - `TestTimedeltaIndexArithmetic.test_numeric_compat` badly needed to be split into more specific tests, so that is done here.
https://api.github.com/repos/pandas-dev/pandas/pulls/19317
2018-01-19T18:39:21Z
2018-01-21T15:25:56Z
2018-01-21T15:25:56Z
2018-01-21T18:36:27Z
Refactor core.internals into core/internals/ dir
diff --git a/pandas/core/internals/__init__.py b/pandas/core/internals/__init__.py new file mode 100644 index 0000000000000..59098b5da3bd9 --- /dev/null +++ b/pandas/core/internals/__init__.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +from .blocks import * # noqa +from .managers import * # noqa +from .joins import * # noqa + +from .blocks import (make_block, + Block, + IntBlock, FloatBlock, + DatetimeBlock, DatetimeTZBlock, TimeDeltaBlock, + CategoricalBlock, ObjectBlock, SparseBlock, + NonConsolidatableMixIn, + _block2d_to_blocknd, _factor_indexer, + _block_shape, _safe_reshape, + BlockPlacement) + +from .managers import (BlockManager, SingleBlockManager, + create_block_manager_from_arrays, + create_block_manager_from_blocks, + items_overlap_with_suffix) + +from .joins import concatenate_block_managers + + +__all__ = ["BlockManager", "SingleBlockManager", + "create_block_manager_from_arrays", + "create_block_manager_from_blocks", + "items_overlap_with_suffix", + "make_block", + "Block", + "IntBlock", "FloatBlock", + "DatetimeBlock", "DatetimeTZBlock", "TimeDeltaBlock", + "CategoricalBlock", "ObjectBlock", "SparseBlock", + "_block2d_to_blocknd", + "_factor_indexer", + "_block_shape", + "_safe_reshape", + "concatenate_block_managers", + "BlockPlacement"] diff --git a/pandas/core/internals.py b/pandas/core/internals/blocks.py similarity index 56% rename from pandas/core/internals.py rename to pandas/core/internals/blocks.py index d95062c54b4c6..eaa2f6205f8d8 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals/blocks.py @@ -1,26 +1,29 @@ +# -*- coding: utf-8 -*- import warnings -import copy from warnings import catch_warnings import inspect -import itertools import re -import operator from datetime import datetime, timedelta, date -from collections import defaultdict -from functools import partial import numpy as np -from pandas._libs import internals as libinternals +from pandas._libs import lib, tslib, internals as libinternals +from pandas._libs.internals import BlockPlacement +from pandas._libs.tslib import Timedelta +from pandas._libs.tslibs import conversion -from pandas.core.base import PandasObject +from pandas.util._validators import validate_bool_kwarg +from pandas import compat +from pandas.compat import range, zip + +from pandas.io.formats.printing import pprint_thing from pandas.core.dtypes.dtypes import ( ExtensionDtype, DatetimeTZDtype, CategoricalDtype) from pandas.core.dtypes.common import ( _TD_DTYPE, _NS_DTYPE, - _ensure_int64, _ensure_platform_int, + _ensure_platform_int, is_integer, is_dtype_equal, is_timedelta64_dtype, @@ -30,14 +33,12 @@ is_datetime64tz_dtype, is_bool_dtype, is_object_dtype, - is_datetimelike_v_numeric, - is_float_dtype, is_numeric_dtype, + is_float_dtype, is_numeric_v_string_like, is_extension_type, is_list_like, is_re, is_re_compilable, - is_scalar, - _get_dtype) + is_scalar) from pandas.core.dtypes.cast import ( maybe_downcast_to_dtype, maybe_upcast, @@ -54,28 +55,18 @@ _isna_compat, is_null_datelike_scalar) import pandas.core.dtypes.concat as _concat - from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex -from pandas.core.common import is_null_slice, _any_not_none + import pandas.core.algorithms as algos +import pandas.core.missing as missing +from pandas.core.base import PandasObject +from pandas.core.common import is_null_slice, _any_not_none -from pandas.core.index import Index, MultiIndex, _ensure_index -from pandas.core.indexing import maybe_convert_indices, length_of_indexer +from pandas.core.indexing import length_of_indexer from pandas.core.arrays.categorical import Categorical, _maybe_to_categorical from pandas.core.indexes.datetimes import DatetimeIndex -from pandas.io.formats.printing import pprint_thing - -import pandas.core.missing as missing -from pandas.core.sparse.array import _maybe_to_sparse, SparseArray -from pandas._libs import lib, tslib -from pandas._libs.tslib import Timedelta -from pandas._libs.internals import BlockPlacement -from pandas._libs.tslibs import conversion -from pandas.util._decorators import cache_readonly -from pandas.util._validators import validate_bool_kwarg -from pandas import compat -from pandas.compat import range, map, zip, u +from pandas.core.sparse.array import SparseArray class Block(PandasObject): @@ -2973,2701 +2964,213 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None, return klass(values, ndim=ndim, fastpath=fastpath, placement=placement) -# TODO: flexible with index=None and/or items=None - -class BlockManager(PandasObject): - """ - Core internal data structure to implement DataFrame, Series, Panel, etc. - - Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a - lightweight blocked set of labeled data to be manipulated by the DataFrame - public API class +# TODO: flexible with index=None and/or items=None - Attributes - ---------- - shape - ndim - axes - values - items - Methods - ------- - set_axis(axis, new_labels) - copy(deep=True) +def _merge_blocks(blocks, dtype=None, _can_consolidate=True): - get_dtype_counts - get_ftype_counts - get_dtypes - get_ftypes + if len(blocks) == 1: + return blocks[0] - apply(func, axes, block_filter_fn) + if _can_consolidate: - get_bool_data - get_numeric_data + if dtype is None: + if len({b.dtype for b in blocks}) != 1: + raise AssertionError("_merge_blocks are invalid!") + dtype = blocks[0].dtype - get_slice(slice_like, axis) - get(label) - iget(loc) - get_scalar(label_tup) + # FIXME: optimization potential in case all mgrs contain slices and + # combination of those slices is a slice, too. + new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks]) + new_values = _vstack([b.values for b in blocks], dtype) - take(indexer, axis) - reindex_axis(new_labels, axis) - reindex_indexer(new_labels, indexer, axis) + argsort = np.argsort(new_mgr_locs) + new_values = new_values[argsort] + new_mgr_locs = new_mgr_locs[argsort] - delete(label) - insert(loc, label, value) - set(label, value) + return make_block(new_values, fastpath=True, placement=new_mgr_locs) - Parameters - ---------- + # no merge + return blocks - Notes - ----- - This is *not* a public API class - """ - __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', - '_is_consolidated', '_blknos', '_blklocs'] - - def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True): - self.axes = [_ensure_index(ax) for ax in axes] - self.blocks = tuple(blocks) - - for block in blocks: - if block.is_sparse: - if len(block.mgr_locs) != 1: - raise AssertionError("Sparse block refers to multiple " - "items") +def _extend_blocks(result, blocks=None): + """ return a new extended blocks, givin the result """ + from .managers import BlockManager + if blocks is None: + blocks = [] + if isinstance(result, list): + for r in result: + if isinstance(r, list): + blocks.extend(r) else: - if self.ndim != block.ndim: - raise AssertionError( - 'Number of Block dimensions ({block}) must equal ' - 'number of axes ({self})'.format(block=block.ndim, - self=self.ndim)) - - if do_integrity_check: - self._verify_integrity() - - self._consolidate_check() - - self._rebuild_blknos_and_blklocs() - - def make_empty(self, axes=None): - """ return an empty BlockManager with the items axis of len 0 """ - if axes is None: - axes = [_ensure_index([])] + [_ensure_index(a) - for a in self.axes[1:]] - - # preserve dtype if possible - if self.ndim == 1: - blocks = np.array([], dtype=self.array_dtype) - else: - blocks = [] - return self.__class__(blocks, axes) - - def __nonzero__(self): - return True - - # Python3 compat - __bool__ = __nonzero__ - - @property - def shape(self): - return tuple(len(ax) for ax in self.axes) - - @property - def ndim(self): - return len(self.axes) - - def set_axis(self, axis, new_labels): - new_labels = _ensure_index(new_labels) - old_len = len(self.axes[axis]) - new_len = len(new_labels) - - if new_len != old_len: - raise ValueError( - 'Length mismatch: Expected axis has {old} elements, new ' - 'values have {new} elements'.format(old=old_len, new=new_len)) - - self.axes[axis] = new_labels - - def rename_axis(self, mapper, axis, copy=True, level=None): - """ - Rename one of axes. - - Parameters - ---------- - mapper : unary callable - axis : int - copy : boolean, default True - level : int, default None - - """ - obj = self.copy(deep=copy) - obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) - return obj - - def add_prefix(self, prefix): - f = partial('{prefix}{}'.format, prefix=prefix) - return self.rename_axis(f, axis=0) - - def add_suffix(self, suffix): - f = partial('{}{suffix}'.format, suffix=suffix) - return self.rename_axis(f, axis=0) - - @property - def _is_single_block(self): - if self.ndim == 1: - return True - - if len(self.blocks) != 1: - return False - - blk = self.blocks[0] - return (blk.mgr_locs.is_slice_like and - blk.mgr_locs.as_slice == slice(0, len(self), 1)) - - def _rebuild_blknos_and_blklocs(self): - """ - Update mgr._blknos / mgr._blklocs. - """ - new_blknos = np.empty(self.shape[0], dtype=np.int64) - new_blklocs = np.empty(self.shape[0], dtype=np.int64) - new_blknos.fill(-1) - new_blklocs.fill(-1) - - for blkno, blk in enumerate(self.blocks): - rl = blk.mgr_locs - new_blknos[rl.indexer] = blkno - new_blklocs[rl.indexer] = np.arange(len(rl)) - - if (new_blknos == -1).any(): - raise AssertionError("Gaps in blk ref_locs") + blocks.append(r) + elif isinstance(result, BlockManager): + blocks.extend(result.blocks) + else: + blocks.append(result) + return blocks - self._blknos = new_blknos - self._blklocs = new_blklocs - # make items read only for now - def _get_items(self): - return self.axes[0] +def _block_shape(values, ndim=1, shape=None): + """ guarantee the shape of the values to be at least 1 d """ + if values.ndim < ndim: + if shape is None: + shape = values.shape + values = values.reshape(tuple((1, ) + shape)) + return values - items = property(fget=_get_items) - def _get_counts(self, f): - """ return a dict of the counts of the function in BlockManager """ - self._consolidate_inplace() - counts = dict() - for b in self.blocks: - v = f(b) - counts[v] = counts.get(v, 0) + b.shape[0] - return counts +def _vstack(to_stack, dtype): - def get_dtype_counts(self): - return self._get_counts(lambda b: b.dtype.name) + # work around NumPy 1.6 bug + if dtype == _NS_DTYPE or dtype == _TD_DTYPE: + new_values = np.vstack([x.view('i8') for x in to_stack]) + return new_values.view(dtype) - def get_ftype_counts(self): - return self._get_counts(lambda b: b.ftype) + else: + return np.vstack(to_stack) - def get_dtypes(self): - dtypes = np.array([blk.dtype for blk in self.blocks]) - return algos.take_1d(dtypes, self._blknos, allow_fill=False) - def get_ftypes(self): - ftypes = np.array([blk.ftype for blk in self.blocks]) - return algos.take_1d(ftypes, self._blknos, allow_fill=False) +def _block2d_to_blocknd(values, placement, shape, labels, ref_items): + """ pivot to the labels shape """ + panel_shape = (len(placement),) + shape - def __getstate__(self): - block_values = [b.values for b in self.blocks] - block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] - axes_array = [ax for ax in self.axes] - - extra_state = { - '0.14.1': { - 'axes': axes_array, - 'blocks': [dict(values=b.values, mgr_locs=b.mgr_locs.indexer) - for b in self.blocks] - } - } - - # First three elements of the state are to maintain forward - # compatibility with 0.13.1. - return axes_array, block_values, block_items, extra_state + # TODO: lexsort depth needs to be 2!! - def __setstate__(self, state): - def unpickle_block(values, mgr_locs): - # numpy < 1.7 pickle compat - if values.dtype == 'M8[us]': - values = values.astype('M8[ns]') - return make_block(values, placement=mgr_locs) - - if (isinstance(state, tuple) and len(state) >= 4 and - '0.14.1' in state[3]): - state = state[3]['0.14.1'] - self.axes = [_ensure_index(ax) for ax in state['axes']] - self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs']) - for b in state['blocks']) - else: - # discard anything after 3rd, support beta pickling format for a - # little while longer - ax_arrays, bvalues, bitems = state[:3] - - self.axes = [_ensure_index(ax) for ax in ax_arrays] - - if len(bitems) == 1 and self.axes[0].equals(bitems[0]): - # This is a workaround for pre-0.14.1 pickles that didn't - # support unpickling multi-block frames/panels with non-unique - # columns/items, because given a manager with items ["a", "b", - # "a"] there's no way of knowing which block's "a" is where. - # - # Single-block case can be supported under the assumption that - # block items corresponded to manager items 1-to-1. - all_mgr_locs = [slice(0, len(bitems[0]))] - else: - all_mgr_locs = [self.axes[0].get_indexer(blk_items) - for blk_items in bitems] + # Create observation selection vector using major and minor + # labels, for converting to panel format. + selector = _factor_indexer(shape[1:], labels) + mask = np.zeros(np.prod(shape), dtype=bool) + mask.put(selector, True) - self.blocks = tuple( - unpickle_block(values, mgr_locs) - for values, mgr_locs in zip(bvalues, all_mgr_locs)) + if mask.all(): + pvalues = np.empty(panel_shape, dtype=values.dtype) + else: + dtype, fill_value = maybe_promote(values.dtype) + pvalues = np.empty(panel_shape, dtype=dtype) + pvalues.fill(fill_value) - self._post_setstate() + for i in range(len(placement)): + pvalues[i].flat[mask] = values[:, i] - def _post_setstate(self): - self._is_consolidated = False - self._known_consolidated = False - self._rebuild_blknos_and_blklocs() + return make_block(pvalues, placement=placement) - def __len__(self): - return len(self.items) - def __unicode__(self): - output = pprint_thing(self.__class__.__name__) - for i, ax in enumerate(self.axes): - if i == 0: - output += u('\nItems: {ax}'.format(ax=ax)) - else: - output += u('\nAxis {i}: {ax}'.format(i=i, ax=ax)) - - for block in self.blocks: - output += u('\n{block}'.format(block=pprint_thing(block))) - return output - - def _verify_integrity(self): - mgr_shape = self.shape - tot_items = sum(len(x.mgr_locs) for x in self.blocks) - for block in self.blocks: - if block._verify_integrity and block.shape[1:] != mgr_shape[1:]: - construction_error(tot_items, block.shape[1:], self.axes) - if len(self.items) != tot_items: - raise AssertionError('Number of manager items must equal union of ' - 'block items\n# manager items: {0}, # ' - 'tot_items: {1}'.format( - len(self.items), tot_items)) - - def apply(self, f, axes=None, filter=None, do_integrity_check=False, - consolidate=True, **kwargs): - """ - iterate over the blocks, collect and create a new block manager +def _factor_indexer(shape, labels): + """ + given a tuple of shape and a list of Categorical labels, return the + expanded label indexer + """ + mult = np.array(shape)[::-1].cumprod()[::-1] + return _ensure_platform_int( + np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T) - Parameters - ---------- - f : the callable or function name to operate on at the block level - axes : optional (if not supplied, use self.axes) - filter : list, if supplied, only call the block if the filter is in - the block - do_integrity_check : boolean, default False. Do the block manager - integrity check - consolidate: boolean, default True. Join together blocks having same - dtype - Returns - ------- - Block Manager (new object) +def _safe_reshape(arr, new_shape): + """ + If possible, reshape `arr` to have shape `new_shape`, + with a couple of exceptions (see gh-13012): - """ + 1) If `arr` is a Categorical or Index, `arr` will be + returned as is. + 2) If `arr` is a Series, the `_values` attribute will + be reshaped and returned. - result_blocks = [] + Parameters + ---------- + arr : array-like, object to be reshaped + new_shape : int or tuple of ints, the new shape + """ + if isinstance(arr, ABCSeries): + arr = arr._values + if not isinstance(arr, Categorical): + arr = arr.reshape(new_shape) + return arr - # filter kwarg is used in replace-* family of methods - if filter is not None: - filter_locs = set(self.items.get_indexer_for(filter)) - if len(filter_locs) == len(self.items): - # All items are included, as if there were no filtering - filter = None - else: - kwargs['filter'] = filter_locs - if consolidate: - self._consolidate_inplace() +def _putmask_smart(v, m, n): + """ + Return a new ndarray, try to preserve dtype if possible. - if f == 'where': - align_copy = True - if kwargs.get('align', True): - align_keys = ['other', 'cond'] - else: - align_keys = ['cond'] - elif f == 'putmask': - align_copy = False - if kwargs.get('align', True): - align_keys = ['new', 'mask'] - else: - align_keys = ['mask'] - elif f == 'eval': - align_copy = False - align_keys = ['other'] - elif f == 'fillna': - # fillna internally does putmask, maybe it's better to do this - # at mgr, not block level? - align_copy = False - align_keys = ['value'] - else: - align_keys = [] + Parameters + ---------- + v : `values`, updated in-place (array like) + m : `mask`, applies to both sides (array like) + n : `new values` either scalar or an array like aligned with `values` - aligned_args = dict((k, kwargs[k]) - for k in align_keys - if hasattr(kwargs[k], 'reindex_axis')) + Returns + ------- + values : ndarray with updated values + this *may* be a copy of the original - for b in self.blocks: - if filter is not None: - if not b.mgr_locs.isin(filter_locs).any(): - result_blocks.append(b) - continue - - if aligned_args: - b_items = self.items[b.mgr_locs.indexer] - - for k, obj in aligned_args.items(): - axis = getattr(obj, '_info_axis_number', 0) - kwargs[k] = obj.reindex(b_items, axis=axis, - copy=align_copy) - - kwargs['mgr'] = self - applied = getattr(b, f)(**kwargs) - result_blocks = _extend_blocks(applied, result_blocks) - - if len(result_blocks) == 0: - return self.make_empty(axes or self.axes) - bm = self.__class__(result_blocks, axes or self.axes, - do_integrity_check=do_integrity_check) - bm._consolidate_inplace() - return bm - - def reduction(self, f, axis=0, consolidate=True, transposed=False, - **kwargs): - """ - iterate over the blocks, collect and create a new block manager. - This routine is intended for reduction type operations and - will do inference on the generated blocks. + See Also + -------- + ndarray.putmask + """ - Parameters - ---------- - f: the callable or function name to operate on at the block level - axis: reduction axis, default 0 - consolidate: boolean, default True. Join together blocks having same - dtype - transposed: boolean, default False - we are holding transposed data + # we cannot use np.asarray() here as we cannot have conversions + # that numpy does when numeric are mixed with strings - Returns - ------- - Block Manager (new object) + # n should be the length of the mask or a scalar here + if not is_list_like(n): + n = np.repeat(n, len(m)) + elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar + n = np.repeat(np.array(n, ndmin=1), len(m)) - """ + # see if we are only masking values that if putted + # will work in the current dtype + try: + nn = n[m] - if consolidate: - self._consolidate_inplace() + # make sure that we have a nullable type + # if we have nulls + if not _isna_compat(v, nn[0]): + raise ValueError - axes, blocks = [], [] - for b in self.blocks: - kwargs['mgr'] = self - axe, block = getattr(b, f)(axis=axis, **kwargs) + # we ignore ComplexWarning here + with catch_warnings(record=True): + nn_at = nn.astype(v.dtype) - axes.append(axe) - blocks.append(block) + # avoid invalid dtype comparisons + # between numbers & strings - # note that some DatetimeTZ, Categorical are always ndim==1 - ndim = {b.ndim for b in blocks} + # only compare integers/floats + # don't compare integers to datetimelikes + if (not is_numeric_v_string_like(nn, nn_at) and + (is_float_dtype(nn.dtype) or + is_integer_dtype(nn.dtype) and + is_float_dtype(nn_at.dtype) or + is_integer_dtype(nn_at.dtype))): - if 2 in ndim: + comp = (nn == nn_at) + if is_list_like(comp) and comp.all(): + nv = v.copy() + nv[m] = nn_at + return nv + except (ValueError, IndexError, TypeError): + pass - new_axes = list(self.axes) + n = np.asarray(n) - # multiple blocks that are reduced - if len(blocks) > 1: - new_axes[1] = axes[0] + def _putmask_preserve(nv, n): + try: + nv[m] = n[m] + except (IndexError, ValueError): + nv[m] = n + return nv - # reset the placement to the original - for b, sb in zip(blocks, self.blocks): - b.mgr_locs = sb.mgr_locs + # preserves dtype if possible + if v.dtype.kind == n.dtype.kind: + return _putmask_preserve(v, n) - else: - new_axes[axis] = Index(np.concatenate( - [ax.values for ax in axes])) + # change the dtype if needed + dtype, _ = maybe_promote(n.dtype) - if transposed: - new_axes = new_axes[::-1] - blocks = [b.make_block(b.values.T, - placement=np.arange(b.shape[1]) - ) for b in blocks] - - return self.__class__(blocks, new_axes) - - # 0 ndim - if 0 in ndim and 1 not in ndim: - values = np.array([b.values for b in blocks]) - if len(values) == 1: - return values.item() - blocks = [make_block(values, ndim=1)] - axes = Index([ax[0] for ax in axes]) - - # single block - values = _concat._concat_compat([b.values for b in blocks]) - - # compute the orderings of our original data - if len(self.blocks) > 1: - - indexer = np.empty(len(self.axes[0]), dtype=np.intp) - i = 0 - for b in self.blocks: - for j in b.mgr_locs: - indexer[j] = i - i = i + 1 - - values = values.take(indexer) - - return SingleBlockManager( - [make_block(values, - ndim=1, - placement=np.arange(len(values)))], - axes[0]) - - def isna(self, **kwargs): - return self.apply('apply', **kwargs) - - def where(self, **kwargs): - return self.apply('where', **kwargs) - - def eval(self, **kwargs): - return self.apply('eval', **kwargs) - - def quantile(self, **kwargs): - return self.reduction('quantile', **kwargs) - - def setitem(self, **kwargs): - return self.apply('setitem', **kwargs) - - def putmask(self, **kwargs): - return self.apply('putmask', **kwargs) - - def diff(self, **kwargs): - return self.apply('diff', **kwargs) - - def interpolate(self, **kwargs): - return self.apply('interpolate', **kwargs) - - def shift(self, **kwargs): - return self.apply('shift', **kwargs) - - def fillna(self, **kwargs): - return self.apply('fillna', **kwargs) - - def downcast(self, **kwargs): - return self.apply('downcast', **kwargs) - - def astype(self, dtype, **kwargs): - return self.apply('astype', dtype=dtype, **kwargs) - - def convert(self, **kwargs): - return self.apply('convert', **kwargs) - - def replace(self, **kwargs): - return self.apply('replace', **kwargs) - - def replace_list(self, src_list, dest_list, inplace=False, regex=False, - mgr=None): - """ do a list replace """ - - inplace = validate_bool_kwarg(inplace, 'inplace') - - if mgr is None: - mgr = self - - # figure out our mask a-priori to avoid repeated replacements - values = self.as_array() - - def comp(s): - if isna(s): - return isna(values) - return _maybe_compare(values, getattr(s, 'asm8', s), operator.eq) - - masks = [comp(s) for i, s in enumerate(src_list)] - - result_blocks = [] - src_len = len(src_list) - 1 - for blk in self.blocks: - - # its possible to get multiple result blocks here - # replace ALWAYS will return a list - rb = [blk if inplace else blk.copy()] - for i, (s, d) in enumerate(zip(src_list, dest_list)): - new_rb = [] - for b in rb: - if b.dtype == np.object_: - convert = i == src_len - result = b.replace(s, d, inplace=inplace, regex=regex, - mgr=mgr, convert=convert) - new_rb = _extend_blocks(result, new_rb) - else: - # get our mask for this element, sized to this - # particular block - m = masks[i][b.mgr_locs.indexer] - if m.any(): - b = b.coerce_to_target_dtype(d) - new_rb.extend(b.putmask(m, d, inplace=True)) - else: - new_rb.append(b) - rb = new_rb - result_blocks.extend(rb) - - bm = self.__class__(result_blocks, self.axes) - bm._consolidate_inplace() - return bm - - def reshape_nd(self, axes, **kwargs): - """ a 2d-nd reshape operation on a BlockManager """ - return self.apply('reshape_nd', axes=axes, **kwargs) - - def is_consolidated(self): - """ - Return True if more than one block with the same dtype - """ - if not self._known_consolidated: - self._consolidate_check() - return self._is_consolidated - - def _consolidate_check(self): - ftypes = [blk.ftype for blk in self.blocks] - self._is_consolidated = len(ftypes) == len(set(ftypes)) - self._known_consolidated = True - - @property - def is_mixed_type(self): - # Warning, consolidation needs to get checked upstairs - self._consolidate_inplace() - return len(self.blocks) > 1 - - @property - def is_numeric_mixed_type(self): - # Warning, consolidation needs to get checked upstairs - self._consolidate_inplace() - return all(block.is_numeric for block in self.blocks) - - @property - def is_datelike_mixed_type(self): - # Warning, consolidation needs to get checked upstairs - self._consolidate_inplace() - return any(block.is_datelike for block in self.blocks) - - @property - def is_view(self): - """ return a boolean if we are a single block and are a view """ - if len(self.blocks) == 1: - return self.blocks[0].is_view - - # It is technically possible to figure out which blocks are views - # e.g. [ b.values.base is not None for b in self.blocks ] - # but then we have the case of possibly some blocks being a view - # and some blocks not. setting in theory is possible on the non-view - # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit - # complicated - - return False - - def get_bool_data(self, copy=False): - """ - Parameters - ---------- - copy : boolean, default False - Whether to copy the blocks - """ - self._consolidate_inplace() - return self.combine([b for b in self.blocks if b.is_bool], copy) - - def get_numeric_data(self, copy=False): - """ - Parameters - ---------- - copy : boolean, default False - Whether to copy the blocks - """ - self._consolidate_inplace() - return self.combine([b for b in self.blocks if b.is_numeric], copy) - - def combine(self, blocks, copy=True): - """ return a new manager with the blocks """ - if len(blocks) == 0: - return self.make_empty() - - # FIXME: optimization potential - indexer = np.sort(np.concatenate([b.mgr_locs.as_array - for b in blocks])) - inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) - - new_blocks = [] - for b in blocks: - b = b.copy(deep=copy) - b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array, - axis=0, allow_fill=False) - new_blocks.append(b) - - axes = list(self.axes) - axes[0] = self.items.take(indexer) - - return self.__class__(new_blocks, axes, do_integrity_check=False) - - def get_slice(self, slobj, axis=0): - if axis >= self.ndim: - raise IndexError("Requested axis not found in manager") - - if axis == 0: - new_blocks = self._slice_take_blocks_ax0(slobj) - else: - slicer = [slice(None)] * (axis + 1) - slicer[axis] = slobj - slicer = tuple(slicer) - new_blocks = [blk.getitem_block(slicer) for blk in self.blocks] - - new_axes = list(self.axes) - new_axes[axis] = new_axes[axis][slobj] - - bm = self.__class__(new_blocks, new_axes, do_integrity_check=False, - fastpath=True) - bm._consolidate_inplace() - return bm - - def __contains__(self, item): - return item in self.items - - @property - def nblocks(self): - return len(self.blocks) - - def copy(self, deep=True, mgr=None): - """ - Make deep or shallow copy of BlockManager - - Parameters - ---------- - deep : boolean o rstring, default True - If False, return shallow copy (do not copy data) - If 'all', copy data and a deep copy of the index - - Returns - ------- - copy : BlockManager - """ - - # this preserves the notion of view copying of axes - if deep: - if deep == 'all': - copy = lambda ax: ax.copy(deep=True) - else: - copy = lambda ax: ax.view() - new_axes = [copy(ax) for ax in self.axes] - else: - new_axes = list(self.axes) - return self.apply('copy', axes=new_axes, deep=deep, - do_integrity_check=False) - - def as_array(self, transpose=False, items=None): - """Convert the blockmanager data into an numpy array. - - Parameters - ---------- - transpose : boolean, default False - If True, transpose the return array - items : list of strings or None - Names of block items that will be included in the returned - array. ``None`` means that all block items will be used - - Returns - ------- - arr : ndarray - """ - if len(self.blocks) == 0: - arr = np.empty(self.shape, dtype=float) - return arr.transpose() if transpose else arr - - if items is not None: - mgr = self.reindex_axis(items, axis=0) - else: - mgr = self - - if self._is_single_block or not self.is_mixed_type: - arr = mgr.blocks[0].get_values() - else: - arr = mgr._interleave() - - return arr.transpose() if transpose else arr - - def _interleave(self): - """ - Return ndarray from blocks with specified item order - Items must be contained in the blocks - """ - dtype = _interleaved_dtype(self.blocks) - - result = np.empty(self.shape, dtype=dtype) - - if result.shape[0] == 0: - # Workaround for numpy 1.7 bug: - # - # >>> a = np.empty((0,10)) - # >>> a[slice(0,0)] - # array([], shape=(0, 10), dtype=float64) - # >>> a[[]] - # Traceback (most recent call last): - # File "<stdin>", line 1, in <module> - # IndexError: index 0 is out of bounds for axis 0 with size 0 - return result - - itemmask = np.zeros(self.shape[0]) - - for blk in self.blocks: - rl = blk.mgr_locs - result[rl.indexer] = blk.get_values(dtype) - itemmask[rl.indexer] = 1 - - if not itemmask.all(): - raise AssertionError('Some items were not contained in blocks') - - return result - - def to_dict(self, copy=True): - """ - Return a dict of str(dtype) -> BlockManager - - Parameters - ---------- - copy : boolean, default True - - Returns - ------- - values : a dict of dtype -> BlockManager - - Notes - ----- - This consolidates based on str(dtype) - """ - self._consolidate_inplace() - - bd = {} - for b in self.blocks: - bd.setdefault(str(b.dtype), []).append(b) - - return {dtype: self.combine(blocks, copy=copy) - for dtype, blocks in bd.items()} - - def xs(self, key, axis=1, copy=True, takeable=False): - if axis < 1: - raise AssertionError( - 'Can only take xs across axis >= 1, got {ax}'.format(ax=axis)) - - # take by position - if takeable: - loc = key - else: - loc = self.axes[axis].get_loc(key) - - slicer = [slice(None, None) for _ in range(self.ndim)] - slicer[axis] = loc - slicer = tuple(slicer) - - new_axes = list(self.axes) - - # could be an array indexer! - if isinstance(loc, (slice, np.ndarray)): - new_axes[axis] = new_axes[axis][loc] - else: - new_axes.pop(axis) - - new_blocks = [] - if len(self.blocks) > 1: - # we must copy here as we are mixed type - for blk in self.blocks: - newb = make_block(values=blk.values[slicer], - klass=blk.__class__, fastpath=True, - placement=blk.mgr_locs) - new_blocks.append(newb) - elif len(self.blocks) == 1: - block = self.blocks[0] - vals = block.values[slicer] - if copy: - vals = vals.copy() - new_blocks = [make_block(values=vals, - placement=block.mgr_locs, - klass=block.__class__, - fastpath=True, )] - - return self.__class__(new_blocks, new_axes) - - def fast_xs(self, loc): - """ - get a cross sectional for a given location in the - items ; handle dups - - return the result, is *could* be a view in the case of a - single block - """ - if len(self.blocks) == 1: - return self.blocks[0].iget((slice(None), loc)) - - items = self.items - - # non-unique (GH4726) - if not items.is_unique: - result = self._interleave() - if self.ndim == 2: - result = result.T - return result[loc] - - # unique - dtype = _interleaved_dtype(self.blocks) - n = len(items) - result = np.empty(n, dtype=dtype) - for blk in self.blocks: - # Such assignment may incorrectly coerce NaT to None - # result[blk.mgr_locs] = blk._slice((slice(None), loc)) - for i, rl in enumerate(blk.mgr_locs): - result[rl] = blk._try_coerce_result(blk.iget((i, loc))) - - return result - - def consolidate(self): - """ - Join together blocks having same dtype - - Returns - ------- - y : BlockManager - """ - if self.is_consolidated(): - return self - - bm = self.__class__(self.blocks, self.axes) - bm._is_consolidated = False - bm._consolidate_inplace() - return bm - - def _consolidate_inplace(self): - if not self.is_consolidated(): - self.blocks = tuple(_consolidate(self.blocks)) - self._is_consolidated = True - self._known_consolidated = True - self._rebuild_blknos_and_blklocs() - - def get(self, item, fastpath=True): - """ - Return values for selected item (ndarray or BlockManager). - """ - if self.items.is_unique: - - if not isna(item): - loc = self.items.get_loc(item) - else: - indexer = np.arange(len(self.items))[isna(self.items)] - - # allow a single nan location indexer - if not is_scalar(indexer): - if len(indexer) == 1: - loc = indexer.item() - else: - raise ValueError("cannot label index with a null key") - - return self.iget(loc, fastpath=fastpath) - else: - - if isna(item): - raise TypeError("cannot label index with a null key") - - indexer = self.items.get_indexer_for([item]) - return self.reindex_indexer(new_axis=self.items[indexer], - indexer=indexer, axis=0, - allow_dups=True) - - def iget(self, i, fastpath=True): - """ - Return the data as a SingleBlockManager if fastpath=True and possible - - Otherwise return as a ndarray - """ - block = self.blocks[self._blknos[i]] - values = block.iget(self._blklocs[i]) - if not fastpath or not block._box_to_block_values or values.ndim != 1: - return values - - # fastpath shortcut for select a single-dim from a 2-dim BM - return SingleBlockManager( - [block.make_block_same_class(values, - placement=slice(0, len(values)), - ndim=1, fastpath=True)], - self.axes[1]) - - def get_scalar(self, tup): - """ - Retrieve single item - """ - full_loc = [ax.get_loc(x) for ax, x in zip(self.axes, tup)] - blk = self.blocks[self._blknos[full_loc[0]]] - values = blk.values - - # FIXME: this may return non-upcasted types? - if values.ndim == 1: - return values[full_loc[1]] - - full_loc[0] = self._blklocs[full_loc[0]] - return values[tuple(full_loc)] - - def delete(self, item): - """ - Delete selected item (items if non-unique) in-place. - """ - indexer = self.items.get_loc(item) - - is_deleted = np.zeros(self.shape[0], dtype=np.bool_) - is_deleted[indexer] = True - ref_loc_offset = -is_deleted.cumsum() - - is_blk_deleted = [False] * len(self.blocks) - - if isinstance(indexer, int): - affected_start = indexer - else: - affected_start = is_deleted.nonzero()[0][0] - - for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]): - blk = self.blocks[blkno] - bml = blk.mgr_locs - blk_del = is_deleted[bml.indexer].nonzero()[0] - - if len(blk_del) == len(bml): - is_blk_deleted[blkno] = True - continue - elif len(blk_del) != 0: - blk.delete(blk_del) - bml = blk.mgr_locs - - blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer]) - - # FIXME: use Index.delete as soon as it uses fastpath=True - self.axes[0] = self.items[~is_deleted] - self.blocks = tuple(b for blkno, b in enumerate(self.blocks) - if not is_blk_deleted[blkno]) - self._shape = None - self._rebuild_blknos_and_blklocs() - - def set(self, item, value, check=False): - """ - Set new item in-place. Does not consolidate. Adds new Block if not - contained in the current set of items - if check, then validate that we are not setting the same data in-place - """ - # FIXME: refactor, clearly separate broadcasting & zip-like assignment - # can prob also fix the various if tests for sparse/categorical - - value_is_extension_type = is_extension_type(value) - - # categorical/spares/datetimetz - if value_is_extension_type: - - def value_getitem(placement): - return value - else: - if value.ndim == self.ndim - 1: - value = _safe_reshape(value, (1,) + value.shape) - - def value_getitem(placement): - return value - else: - - def value_getitem(placement): - return value[placement.indexer] - - if value.shape[1:] != self.shape[1:]: - raise AssertionError('Shape of new values must be compatible ' - 'with manager shape') - - try: - loc = self.items.get_loc(item) - except KeyError: - # This item wasn't present, just insert at end - self.insert(len(self.items), item, value) - return - - if isinstance(loc, int): - loc = [loc] - - blknos = self._blknos[loc] - blklocs = self._blklocs[loc].copy() - - unfit_mgr_locs = [] - unfit_val_locs = [] - removed_blknos = [] - for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks), - group=True): - blk = self.blocks[blkno] - blk_locs = blklocs[val_locs.indexer] - if blk.should_store(value): - blk.set(blk_locs, value_getitem(val_locs), check=check) - else: - unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) - unfit_val_locs.append(val_locs) - - # If all block items are unfit, schedule the block for removal. - if len(val_locs) == len(blk.mgr_locs): - removed_blknos.append(blkno) - else: - self._blklocs[blk.mgr_locs.indexer] = -1 - blk.delete(blk_locs) - self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk)) - - if len(removed_blknos): - # Remove blocks & update blknos accordingly - is_deleted = np.zeros(self.nblocks, dtype=np.bool_) - is_deleted[removed_blknos] = True - - new_blknos = np.empty(self.nblocks, dtype=np.int64) - new_blknos.fill(-1) - new_blknos[~is_deleted] = np.arange(self.nblocks - - len(removed_blknos)) - self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0, - allow_fill=False) - self.blocks = tuple(blk for i, blk in enumerate(self.blocks) - if i not in set(removed_blknos)) - - if unfit_val_locs: - unfit_mgr_locs = np.concatenate(unfit_mgr_locs) - unfit_count = len(unfit_mgr_locs) - - new_blocks = [] - if value_is_extension_type: - # This code (ab-)uses the fact that sparse blocks contain only - # one item. - new_blocks.extend( - make_block(values=value.copy(), ndim=self.ndim, - placement=slice(mgr_loc, mgr_loc + 1)) - for mgr_loc in unfit_mgr_locs) - - self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) + - len(self.blocks)) - self._blklocs[unfit_mgr_locs] = 0 - - else: - # unfit_val_locs contains BlockPlacement objects - unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) - - new_blocks.append( - make_block(values=value_getitem(unfit_val_items), - ndim=self.ndim, placement=unfit_mgr_locs)) - - self._blknos[unfit_mgr_locs] = len(self.blocks) - self._blklocs[unfit_mgr_locs] = np.arange(unfit_count) - - self.blocks += tuple(new_blocks) - - # Newly created block's dtype may already be present. - self._known_consolidated = False - - def insert(self, loc, item, value, allow_duplicates=False): - """ - Insert item at selected position. - - Parameters - ---------- - loc : int - item : hashable - value : array_like - allow_duplicates: bool - If False, trying to insert non-unique item will raise - - """ - if not allow_duplicates and item in self.items: - # Should this be a different kind of error?? - raise ValueError('cannot insert {}, already exists'.format(item)) - - if not isinstance(loc, int): - raise TypeError("loc must be int") - - # insert to the axis; this could possibly raise a TypeError - new_axis = self.items.insert(loc, item) - - block = make_block(values=value, ndim=self.ndim, - placement=slice(loc, loc + 1)) - - for blkno, count in _fast_count_smallints(self._blknos[loc:]): - blk = self.blocks[blkno] - if count == len(blk.mgr_locs): - blk.mgr_locs = blk.mgr_locs.add(1) - else: - new_mgr_locs = blk.mgr_locs.as_array.copy() - new_mgr_locs[new_mgr_locs >= loc] += 1 - blk.mgr_locs = new_mgr_locs - - if loc == self._blklocs.shape[0]: - # np.append is a lot faster (at least in numpy 1.7.1), let's use it - # if we can. - self._blklocs = np.append(self._blklocs, 0) - self._blknos = np.append(self._blknos, len(self.blocks)) - else: - self._blklocs = np.insert(self._blklocs, loc, 0) - self._blknos = np.insert(self._blknos, loc, len(self.blocks)) - - self.axes[0] = new_axis - self.blocks += (block,) - self._shape = None - - self._known_consolidated = False - - if len(self.blocks) > 100: - self._consolidate_inplace() - - def reindex_axis(self, new_index, axis, method=None, limit=None, - fill_value=None, copy=True): - """ - Conform block manager to new index. - """ - new_index = _ensure_index(new_index) - new_index, indexer = self.axes[axis].reindex(new_index, method=method, - limit=limit) - - return self.reindex_indexer(new_index, indexer, axis=axis, - fill_value=fill_value, copy=copy) - - def reindex_indexer(self, new_axis, indexer, axis, fill_value=None, - allow_dups=False, copy=True): - """ - Parameters - ---------- - new_axis : Index - indexer : ndarray of int64 or None - axis : int - fill_value : object - allow_dups : bool - - pandas-indexer with -1's only. - """ - if indexer is None: - if new_axis is self.axes[axis] and not copy: - return self - - result = self.copy(deep=copy) - result.axes = list(self.axes) - result.axes[axis] = new_axis - return result - - self._consolidate_inplace() - - # some axes don't allow reindexing with dups - if not allow_dups: - self.axes[axis]._can_reindex(indexer) - - if axis >= self.ndim: - raise IndexError("Requested axis not found in manager") - - if axis == 0: - new_blocks = self._slice_take_blocks_ax0(indexer, - fill_tuple=(fill_value,)) - else: - new_blocks = [blk.take_nd(indexer, axis=axis, fill_tuple=( - fill_value if fill_value is not None else blk.fill_value,)) - for blk in self.blocks] - - new_axes = list(self.axes) - new_axes[axis] = new_axis - return self.__class__(new_blocks, new_axes) - - def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): - """ - Slice/take blocks along axis=0. - - Overloaded for SingleBlock - - Returns - ------- - new_blocks : list of Block - - """ - - allow_fill = fill_tuple is not None - - sl_type, slobj, sllen = _preprocess_slice_or_indexer( - slice_or_indexer, self.shape[0], allow_fill=allow_fill) - - if self._is_single_block: - blk = self.blocks[0] - - if sl_type in ('slice', 'mask'): - return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))] - elif not allow_fill or self.ndim == 1: - if allow_fill and fill_tuple[0] is None: - _, fill_value = maybe_promote(blk.dtype) - fill_tuple = (fill_value, ) - - return [blk.take_nd(slobj, axis=0, - new_mgr_locs=slice(0, sllen), - fill_tuple=fill_tuple)] - - if sl_type in ('slice', 'mask'): - blknos = self._blknos[slobj] - blklocs = self._blklocs[slobj] - else: - blknos = algos.take_1d(self._blknos, slobj, fill_value=-1, - allow_fill=allow_fill) - blklocs = algos.take_1d(self._blklocs, slobj, fill_value=-1, - allow_fill=allow_fill) - - # When filling blknos, make sure blknos is updated before appending to - # blocks list, that way new blkno is exactly len(blocks). - # - # FIXME: mgr_groupby_blknos must return mgr_locs in ascending order, - # pytables serialization will break otherwise. - blocks = [] - for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks), - group=True): - if blkno == -1: - # If we've got here, fill_tuple was not None. - fill_value = fill_tuple[0] - - blocks.append(self._make_na_block(placement=mgr_locs, - fill_value=fill_value)) - else: - blk = self.blocks[blkno] - - # Otherwise, slicing along items axis is necessary. - if not blk._can_consolidate: - # A non-consolidatable block, it's easy, because there's - # only one item and each mgr loc is a copy of that single - # item. - for mgr_loc in mgr_locs: - newblk = blk.copy(deep=True) - newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1) - blocks.append(newblk) - - else: - blocks.append(blk.take_nd(blklocs[mgr_locs.indexer], - axis=0, new_mgr_locs=mgr_locs, - fill_tuple=None)) - - return blocks - - def _make_na_block(self, placement, fill_value=None): - # TODO: infer dtypes other than float64 from fill_value - - if fill_value is None: - fill_value = np.nan - block_shape = list(self.shape) - block_shape[0] = len(placement) - - dtype, fill_value = infer_dtype_from_scalar(fill_value) - block_values = np.empty(block_shape, dtype=dtype) - block_values.fill(fill_value) - return make_block(block_values, placement=placement) - - def take(self, indexer, axis=1, verify=True, convert=True): - """ - Take items along any axis. - """ - self._consolidate_inplace() - indexer = (np.arange(indexer.start, indexer.stop, indexer.step, - dtype='int64') - if isinstance(indexer, slice) - else np.asanyarray(indexer, dtype='int64')) - - n = self.shape[axis] - if convert: - indexer = maybe_convert_indices(indexer, n) - - if verify: - if ((indexer == -1) | (indexer >= n)).any(): - raise Exception('Indices must be nonzero and less than ' - 'the axis length') - - new_labels = self.axes[axis].take(indexer) - return self.reindex_indexer(new_axis=new_labels, indexer=indexer, - axis=axis, allow_dups=True) - - def merge(self, other, lsuffix='', rsuffix=''): - if not self._is_indexed_like(other): - raise AssertionError('Must have same axes to merge managers') - - l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix, - right=other.items, rsuffix=rsuffix) - new_items = _concat_indexes([l, r]) - - new_blocks = [blk.copy(deep=False) for blk in self.blocks] - - offset = self.shape[0] - for blk in other.blocks: - blk = blk.copy(deep=False) - blk.mgr_locs = blk.mgr_locs.add(offset) - new_blocks.append(blk) - - new_axes = list(self.axes) - new_axes[0] = new_items - - return self.__class__(_consolidate(new_blocks), new_axes) - - def _is_indexed_like(self, other): - """ - Check all axes except items - """ - if self.ndim != other.ndim: - raise AssertionError( - 'Number of dimensions must agree got {ndim} and ' - '{oth_ndim}'.format(ndim=self.ndim, oth_ndim=other.ndim)) - for ax, oax in zip(self.axes[1:], other.axes[1:]): - if not ax.equals(oax): - return False - return True - - def equals(self, other): - self_axes, other_axes = self.axes, other.axes - if len(self_axes) != len(other_axes): - return False - if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): - return False - self._consolidate_inplace() - other._consolidate_inplace() - if len(self.blocks) != len(other.blocks): - return False - - # canonicalize block order, using a tuple combining the type - # name and then mgr_locs because there might be unconsolidated - # blocks (say, Categorical) which can only be distinguished by - # the iteration order - def canonicalize(block): - return (block.dtype.name, block.mgr_locs.as_array.tolist()) - - self_blocks = sorted(self.blocks, key=canonicalize) - other_blocks = sorted(other.blocks, key=canonicalize) - return all(block.equals(oblock) - for block, oblock in zip(self_blocks, other_blocks)) - - def unstack(self, unstacker_func): - """Return a blockmanager with all blocks unstacked. - - Parameters - ---------- - unstacker_func : callable - A (partially-applied) ``pd.core.reshape._Unstacker`` class. - - Returns - ------- - unstacked : BlockManager - """ - dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items) - new_columns = dummy.get_new_columns() - new_index = dummy.get_new_index() - new_blocks = [] - columns_mask = [] - - for blk in self.blocks: - blocks, mask = blk._unstack( - partial(unstacker_func, - value_columns=self.items[blk.mgr_locs.indexer]), - new_columns) - - new_blocks.extend(blocks) - columns_mask.extend(mask) - - new_columns = new_columns[columns_mask] - - bm = BlockManager(new_blocks, [new_columns, new_index]) - return bm - - -class SingleBlockManager(BlockManager): - """ manage a single block with """ - - ndim = 1 - _is_consolidated = True - _known_consolidated = True - __slots__ = () - - def __init__(self, block, axis, do_integrity_check=False, fastpath=False): - - if isinstance(axis, list): - if len(axis) != 1: - raise ValueError("cannot create SingleBlockManager with more " - "than 1 axis") - axis = axis[0] - - # passed from constructor, single block, single axis - if fastpath: - self.axes = [axis] - if isinstance(block, list): - - # empty block - if len(block) == 0: - block = [np.array([])] - elif len(block) != 1: - raise ValueError('Cannot create SingleBlockManager with ' - 'more than 1 block') - block = block[0] - else: - self.axes = [_ensure_index(axis)] - - # create the block here - if isinstance(block, list): - - # provide consolidation to the interleaved_dtype - if len(block) > 1: - dtype = _interleaved_dtype(block) - block = [b.astype(dtype) for b in block] - block = _consolidate(block) - - if len(block) != 1: - raise ValueError('Cannot create SingleBlockManager with ' - 'more than 1 block') - block = block[0] - - if not isinstance(block, Block): - block = make_block(block, placement=slice(0, len(axis)), ndim=1, - fastpath=True) - - self.blocks = [block] - - def _post_setstate(self): - pass - - @property - def _block(self): - return self.blocks[0] - - @property - def _values(self): - return self._block.values - - @property - def _blknos(self): - """ compat with BlockManager """ - return None - - @property - def _blklocs(self): - """ compat with BlockManager """ - return None - - def reindex(self, new_axis, indexer=None, method=None, fill_value=None, - limit=None, copy=True): - # if we are the same and don't copy, just return - if self.index.equals(new_axis): - if copy: - return self.copy(deep=True) - else: - return self - - values = self._block.get_values() - - if indexer is None: - indexer = self.items.get_indexer_for(new_axis) - - if fill_value is None: - fill_value = np.nan - - new_values = algos.take_1d(values, indexer, fill_value=fill_value) - - # fill if needed - if method is not None or limit is not None: - new_values = missing.interpolate_2d(new_values, - method=method, - limit=limit, - fill_value=fill_value) - - if self._block.is_sparse: - make_block = self._block.make_block_same_class - - block = make_block(new_values, copy=copy, - placement=slice(0, len(new_axis))) - - mgr = SingleBlockManager(block, new_axis) - mgr._consolidate_inplace() - return mgr - - def get_slice(self, slobj, axis=0): - if axis >= self.ndim: - raise IndexError("Requested axis not found in manager") - - return self.__class__(self._block._slice(slobj), - self.index[slobj], fastpath=True) - - @property - def index(self): - return self.axes[0] - - def convert(self, **kwargs): - """ convert the whole block as one """ - kwargs['by_item'] = False - return self.apply('convert', **kwargs) - - @property - def dtype(self): - return self._block.dtype - - @property - def array_dtype(self): - return self._block.array_dtype - - @property - def ftype(self): - return self._block.ftype - - def get_dtype_counts(self): - return {self.dtype.name: 1} - - def get_ftype_counts(self): - return {self.ftype: 1} - - def get_dtypes(self): - return np.array([self._block.dtype]) - - def get_ftypes(self): - return np.array([self._block.ftype]) - - def external_values(self): - return self._block.external_values() - - def internal_values(self): - return self._block.internal_values() - - def formatting_values(self): - """Return the internal values used by the DataFrame/SeriesFormatter""" - return self._block.formatting_values() - - def get_values(self): - """ return a dense type view """ - return np.array(self._block.to_dense(), copy=False) - - @property - def asobject(self): - """ - return a object dtype array. datetime/timedelta like values are boxed - to Timestamp/Timedelta instances. - """ - return self._block.get_values(dtype=object) - - @property - def itemsize(self): - return self._block.values.itemsize - - @property - def _can_hold_na(self): - return self._block._can_hold_na - - def is_consolidated(self): - return True - - def _consolidate_check(self): - pass - - def _consolidate_inplace(self): - pass - - def delete(self, item): - """ - Delete single item from SingleBlockManager. - - Ensures that self.blocks doesn't become empty. - """ - loc = self.items.get_loc(item) - self._block.delete(loc) - self.axes[0] = self.axes[0].delete(loc) - - def fast_xs(self, loc): - """ - fast path for getting a cross-section - return a view of the data - """ - return self._block.values[loc] - - def concat(self, to_concat, new_axis): - """ - Concatenate a list of SingleBlockManagers into a single - SingleBlockManager. - - Used for pd.concat of Series objects with axis=0. - - Parameters - ---------- - to_concat : list of SingleBlockManagers - new_axis : Index of the result - - Returns - ------- - SingleBlockManager - - """ - non_empties = [x for x in to_concat if len(x) > 0] - - # check if all series are of the same block type: - if len(non_empties) > 0: - blocks = [obj.blocks[0] for obj in non_empties] - - if all(type(b) is type(blocks[0]) for b in blocks[1:]): # noqa - new_block = blocks[0].concat_same_type(blocks) - else: - values = [x.values for x in blocks] - values = _concat._concat_compat(values) - new_block = make_block( - values, placement=slice(0, len(values), 1)) - else: - values = [x._block.values for x in to_concat] - values = _concat._concat_compat(values) - new_block = make_block( - values, placement=slice(0, len(values), 1)) - - mgr = SingleBlockManager(new_block, new_axis) - return mgr - - -def construction_error(tot_items, block_shape, axes, e=None): - """ raise a helpful message about our construction """ - passed = tuple(map(int, [tot_items] + list(block_shape))) - implied = tuple(map(int, [len(ax) for ax in axes])) - if passed == implied and e is not None: - raise e - if block_shape[0] == 0: - raise ValueError("Empty data passed with indices specified.") - raise ValueError("Shape of passed values is {0}, indices imply {1}".format( - passed, implied)) - - -def create_block_manager_from_blocks(blocks, axes): - try: - if len(blocks) == 1 and not isinstance(blocks[0], Block): - # if blocks[0] is of length 0, return empty blocks - if not len(blocks[0]): - blocks = [] - else: - # It's OK if a single block is passed as values, its placement - # is basically "all items", but if there're many, don't bother - # converting, it's an error anyway. - blocks = [make_block(values=blocks[0], - placement=slice(0, len(axes[0])))] - - mgr = BlockManager(blocks, axes) - mgr._consolidate_inplace() - return mgr - - except (ValueError) as e: - blocks = [getattr(b, 'values', b) for b in blocks] - tot_items = sum(b.shape[0] for b in blocks) - construction_error(tot_items, blocks[0].shape[1:], axes, e) - - -def create_block_manager_from_arrays(arrays, names, axes): - - try: - blocks = form_blocks(arrays, names, axes) - mgr = BlockManager(blocks, axes) - mgr._consolidate_inplace() - return mgr - except ValueError as e: - construction_error(len(arrays), arrays[0].shape, axes, e) - - -def form_blocks(arrays, names, axes): - # put "leftover" items in float bucket, where else? - # generalize? - items_dict = defaultdict(list) - extra_locs = [] - - names_idx = Index(names) - if names_idx.equals(axes[0]): - names_indexer = np.arange(len(names_idx)) - else: - assert names_idx.intersection(axes[0]).is_unique - names_indexer = names_idx.get_indexer_for(axes[0]) - - for i, name_idx in enumerate(names_indexer): - if name_idx == -1: - extra_locs.append(i) - continue - - k = names[name_idx] - v = arrays[name_idx] - - block_type = get_block_type(v) - items_dict[block_type.__name__].append((i, k, v)) - - blocks = [] - if len(items_dict['FloatBlock']): - float_blocks = _multi_blockify(items_dict['FloatBlock']) - blocks.extend(float_blocks) - - if len(items_dict['ComplexBlock']): - complex_blocks = _multi_blockify(items_dict['ComplexBlock']) - blocks.extend(complex_blocks) - - if len(items_dict['TimeDeltaBlock']): - timedelta_blocks = _multi_blockify(items_dict['TimeDeltaBlock']) - blocks.extend(timedelta_blocks) - - if len(items_dict['IntBlock']): - int_blocks = _multi_blockify(items_dict['IntBlock']) - blocks.extend(int_blocks) - - if len(items_dict['DatetimeBlock']): - datetime_blocks = _simple_blockify(items_dict['DatetimeBlock'], - _NS_DTYPE) - blocks.extend(datetime_blocks) - - if len(items_dict['DatetimeTZBlock']): - dttz_blocks = [make_block(array, - klass=DatetimeTZBlock, - fastpath=True, - placement=[i]) - for i, _, array in items_dict['DatetimeTZBlock']] - blocks.extend(dttz_blocks) - - if len(items_dict['BoolBlock']): - bool_blocks = _simple_blockify(items_dict['BoolBlock'], np.bool_) - blocks.extend(bool_blocks) - - if len(items_dict['ObjectBlock']) > 0: - object_blocks = _simple_blockify(items_dict['ObjectBlock'], np.object_) - blocks.extend(object_blocks) - - if len(items_dict['SparseBlock']) > 0: - sparse_blocks = _sparse_blockify(items_dict['SparseBlock']) - blocks.extend(sparse_blocks) - - if len(items_dict['CategoricalBlock']) > 0: - cat_blocks = [make_block(array, klass=CategoricalBlock, fastpath=True, - placement=[i]) - for i, _, array in items_dict['CategoricalBlock']] - blocks.extend(cat_blocks) - - if len(extra_locs): - shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) - - # empty items -> dtype object - block_values = np.empty(shape, dtype=object) - block_values.fill(np.nan) - - na_block = make_block(block_values, placement=extra_locs) - blocks.append(na_block) - - return blocks - - -def _simple_blockify(tuples, dtype): - """ return a single array of a block that has a single dtype; if dtype is - not None, coerce to this dtype - """ - values, placement = _stack_arrays(tuples, dtype) - - # CHECK DTYPE? - if dtype is not None and values.dtype != dtype: # pragma: no cover - values = values.astype(dtype) - - block = make_block(values, placement=placement) - return [block] - - -def _multi_blockify(tuples, dtype=None): - """ return an array of blocks that potentially have different dtypes """ - - # group by dtype - grouper = itertools.groupby(tuples, lambda x: x[2].dtype) - - new_blocks = [] - for dtype, tup_block in grouper: - - values, placement = _stack_arrays(list(tup_block), dtype) - - block = make_block(values, placement=placement) - new_blocks.append(block) - - return new_blocks - - -def _sparse_blockify(tuples, dtype=None): - """ return an array of blocks that potentially have different dtypes (and - are sparse) - """ - - new_blocks = [] - for i, names, array in tuples: - array = _maybe_to_sparse(array) - block = make_block(array, klass=SparseBlock, fastpath=True, - placement=[i]) - new_blocks.append(block) - - return new_blocks - - -def _stack_arrays(tuples, dtype): - - # fml - def _asarray_compat(x): - if isinstance(x, ABCSeries): - return x._values - else: - return np.asarray(x) - - def _shape_compat(x): - if isinstance(x, ABCSeries): - return len(x), - else: - return x.shape - - placement, names, arrays = zip(*tuples) - - first = arrays[0] - shape = (len(arrays),) + _shape_compat(first) - - stacked = np.empty(shape, dtype=dtype) - for i, arr in enumerate(arrays): - stacked[i] = _asarray_compat(arr) - - return stacked, placement - - -def _interleaved_dtype(blocks): - if not len(blocks): - return None - - dtype = find_common_type([b.dtype for b in blocks]) - - # only numpy compat - if isinstance(dtype, ExtensionDtype): - dtype = np.object - - return dtype - - -def _consolidate(blocks): - """ - Merge blocks having same dtype, exclude non-consolidating blocks - """ - - # sort by _can_consolidate, dtype - gkey = lambda x: x._consolidate_key - grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) - - new_blocks = [] - for (_can_consolidate, dtype), group_blocks in grouper: - merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype, - _can_consolidate=_can_consolidate) - new_blocks = _extend_blocks(merged_blocks, new_blocks) - return new_blocks - - -def _merge_blocks(blocks, dtype=None, _can_consolidate=True): - - if len(blocks) == 1: - return blocks[0] - - if _can_consolidate: - - if dtype is None: - if len({b.dtype for b in blocks}) != 1: - raise AssertionError("_merge_blocks are invalid!") - dtype = blocks[0].dtype - - # FIXME: optimization potential in case all mgrs contain slices and - # combination of those slices is a slice, too. - new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks]) - new_values = _vstack([b.values for b in blocks], dtype) - - argsort = np.argsort(new_mgr_locs) - new_values = new_values[argsort] - new_mgr_locs = new_mgr_locs[argsort] - - return make_block(new_values, fastpath=True, placement=new_mgr_locs) - - # no merge - return blocks - - -def _extend_blocks(result, blocks=None): - """ return a new extended blocks, givin the result """ - if blocks is None: - blocks = [] - if isinstance(result, list): - for r in result: - if isinstance(r, list): - blocks.extend(r) - else: - blocks.append(r) - elif isinstance(result, BlockManager): - blocks.extend(result.blocks) - else: - blocks.append(result) - return blocks - - -def _block_shape(values, ndim=1, shape=None): - """ guarantee the shape of the values to be at least 1 d """ - if values.ndim < ndim: - if shape is None: - shape = values.shape - values = values.reshape(tuple((1, ) + shape)) - return values - - -def _vstack(to_stack, dtype): - - # work around NumPy 1.6 bug - if dtype == _NS_DTYPE or dtype == _TD_DTYPE: - new_values = np.vstack([x.view('i8') for x in to_stack]) - return new_values.view(dtype) - - else: - return np.vstack(to_stack) - - -def _maybe_compare(a, b, op): - - is_a_array = isinstance(a, np.ndarray) - is_b_array = isinstance(b, np.ndarray) - - # numpy deprecation warning to have i8 vs integer comparisons - if is_datetimelike_v_numeric(a, b): - result = False - - # numpy deprecation warning if comparing numeric vs string-like - elif is_numeric_v_string_like(a, b): - result = False - - else: - result = op(a, b) - - if is_scalar(result) and (is_a_array or is_b_array): - type_names = [type(a).__name__, type(b).__name__] - - if is_a_array: - type_names[0] = 'ndarray(dtype={dtype})'.format(dtype=a.dtype) - - if is_b_array: - type_names[1] = 'ndarray(dtype={dtype})'.format(dtype=b.dtype) - - raise TypeError( - "Cannot compare types {a!r} and {b!r}".format(a=type_names[0], - b=type_names[1])) - return result - - -def _concat_indexes(indexes): - return indexes[0].append(indexes[1:]) - - -def _block2d_to_blocknd(values, placement, shape, labels, ref_items): - """ pivot to the labels shape """ - panel_shape = (len(placement),) + shape - - # TODO: lexsort depth needs to be 2!! - - # Create observation selection vector using major and minor - # labels, for converting to panel format. - selector = _factor_indexer(shape[1:], labels) - mask = np.zeros(np.prod(shape), dtype=bool) - mask.put(selector, True) - - if mask.all(): - pvalues = np.empty(panel_shape, dtype=values.dtype) - else: - dtype, fill_value = maybe_promote(values.dtype) - pvalues = np.empty(panel_shape, dtype=dtype) - pvalues.fill(fill_value) - - for i in range(len(placement)): - pvalues[i].flat[mask] = values[:, i] - - return make_block(pvalues, placement=placement) - - -def _factor_indexer(shape, labels): - """ - given a tuple of shape and a list of Categorical labels, return the - expanded label indexer - """ - mult = np.array(shape)[::-1].cumprod()[::-1] - return _ensure_platform_int( - np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T) - - -def _get_blkno_placements(blknos, blk_count, group=True): - """ - - Parameters - ---------- - blknos : array of int64 - blk_count : int - group : bool - - Returns - ------- - iterator - yield (BlockPlacement, blkno) - - """ - - blknos = _ensure_int64(blknos) - - # FIXME: blk_count is unused, but it may avoid the use of dicts in cython - for blkno, indexer in libinternals.get_blkno_indexers(blknos, group): - yield blkno, BlockPlacement(indexer) - - -def items_overlap_with_suffix(left, lsuffix, right, rsuffix): - """ - If two indices overlap, add suffixes to overlapping entries. - - If corresponding suffix is empty, the entry is simply converted to string. - - """ - to_rename = left.intersection(right) - if len(to_rename) == 0: - return left, right - else: - if not lsuffix and not rsuffix: - raise ValueError('columns overlap but no suffix specified: ' - '{rename}'.format(rename=to_rename)) - - def lrenamer(x): - if x in to_rename: - return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix) - return x - - def rrenamer(x): - if x in to_rename: - return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix) - return x - - return (_transform_index(left, lrenamer), - _transform_index(right, rrenamer)) - - -def _safe_reshape(arr, new_shape): - """ - If possible, reshape `arr` to have shape `new_shape`, - with a couple of exceptions (see gh-13012): - - 1) If `arr` is a Categorical or Index, `arr` will be - returned as is. - 2) If `arr` is a Series, the `_values` attribute will - be reshaped and returned. - - Parameters - ---------- - arr : array-like, object to be reshaped - new_shape : int or tuple of ints, the new shape - """ - if isinstance(arr, ABCSeries): - arr = arr._values - if not isinstance(arr, Categorical): - arr = arr.reshape(new_shape) - return arr - - -def _transform_index(index, func, level=None): - """ - Apply function to all values found in index. - - This includes transforming multiindex entries separately. - Only apply function to one level of the MultiIndex if level is specified. - - """ - if isinstance(index, MultiIndex): - if level is not None: - items = [tuple(func(y) if i == level else y - for i, y in enumerate(x)) for x in index] - else: - items = [tuple(func(y) for y in x) for x in index] - return MultiIndex.from_tuples(items, names=index.names) - else: - items = [func(x) for x in index] - return Index(items, name=index.name) - - -def _putmask_smart(v, m, n): - """ - Return a new ndarray, try to preserve dtype if possible. - - Parameters - ---------- - v : `values`, updated in-place (array like) - m : `mask`, applies to both sides (array like) - n : `new values` either scalar or an array like aligned with `values` - - Returns - ------- - values : ndarray with updated values - this *may* be a copy of the original - - See Also - -------- - ndarray.putmask - """ - - # we cannot use np.asarray() here as we cannot have conversions - # that numpy does when numeric are mixed with strings - - # n should be the length of the mask or a scalar here - if not is_list_like(n): - n = np.repeat(n, len(m)) - elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar - n = np.repeat(np.array(n, ndmin=1), len(m)) - - # see if we are only masking values that if putted - # will work in the current dtype - try: - nn = n[m] - - # make sure that we have a nullable type - # if we have nulls - if not _isna_compat(v, nn[0]): - raise ValueError - - # we ignore ComplexWarning here - with catch_warnings(record=True): - nn_at = nn.astype(v.dtype) - - # avoid invalid dtype comparisons - # between numbers & strings - - # only compare integers/floats - # don't compare integers to datetimelikes - if (not is_numeric_v_string_like(nn, nn_at) and - (is_float_dtype(nn.dtype) or - is_integer_dtype(nn.dtype) and - is_float_dtype(nn_at.dtype) or - is_integer_dtype(nn_at.dtype))): - - comp = (nn == nn_at) - if is_list_like(comp) and comp.all(): - nv = v.copy() - nv[m] = nn_at - return nv - except (ValueError, IndexError, TypeError): - pass - - n = np.asarray(n) - - def _putmask_preserve(nv, n): - try: - nv[m] = n[m] - except (IndexError, ValueError): - nv[m] = n - return nv - - # preserves dtype if possible - if v.dtype.kind == n.dtype.kind: - return _putmask_preserve(v, n) - - # change the dtype if needed - dtype, _ = maybe_promote(n.dtype) - - if is_extension_type(v.dtype) and is_object_dtype(dtype): - v = v.get_values(dtype) - else: - v = v.astype(dtype) + if is_extension_type(v.dtype) and is_object_dtype(dtype): + v = v.get_values(dtype) + else: + v = v.astype(dtype) return _putmask_preserve(v, n) - - -def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): - """ - Concatenate block managers into one. - - Parameters - ---------- - mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples - axes : list of Index - concat_axis : int - copy : bool - - """ - concat_plan = combine_concat_plans( - [get_mgr_concatenation_plan(mgr, indexers) - for mgr, indexers in mgrs_indexers], concat_axis) - - blocks = [] - - for placement, join_units in concat_plan: - - if len(join_units) == 1 and not join_units[0].indexers: - b = join_units[0].block - values = b.values - if copy: - values = values.copy() - elif not copy: - values = values.view() - b = b.make_block_same_class(values, placement=placement) - elif is_uniform_join_units(join_units): - b = join_units[0].block.concat_same_type( - [ju.block for ju in join_units], placement=placement) - else: - b = make_block( - concatenate_join_units(join_units, concat_axis, copy=copy), - placement=placement) - blocks.append(b) - - return BlockManager(blocks, axes) - - -def is_uniform_join_units(join_units): - """ - Check if the join units consist of blocks of uniform type that can - be concatenated using Block.concat_same_type instead of the generic - concatenate_join_units (which uses `_concat._concat_compat`). - - """ - return ( - # all blocks need to have the same type - all(type(ju.block) is type(join_units[0].block) for ju in join_units) and # noqa - # no blocks that would get missing values (can lead to type upcasts) - all(not ju.is_na for ju in join_units) and - # no blocks with indexers (as then the dimensions do not fit) - all(not ju.indexers for ju in join_units) and - # disregard Panels - all(ju.block.ndim <= 2 for ju in join_units) and - # only use this path when there is something to concatenate - len(join_units) > 1) - - -def get_empty_dtype_and_na(join_units): - """ - Return dtype and N/A values to use when concatenating specified units. - - Returned N/A value may be None which means there was no casting involved. - - Returns - ------- - dtype - na - """ - - if len(join_units) == 1: - blk = join_units[0].block - if blk is None: - return np.float64, np.nan - - has_none_blocks = False - dtypes = [None] * len(join_units) - for i, unit in enumerate(join_units): - if unit.block is None: - has_none_blocks = True - else: - dtypes[i] = unit.dtype - - upcast_classes = defaultdict(list) - null_upcast_classes = defaultdict(list) - for dtype, unit in zip(dtypes, join_units): - if dtype is None: - continue - - if is_categorical_dtype(dtype): - upcast_cls = 'category' - elif is_datetimetz(dtype): - upcast_cls = 'datetimetz' - elif issubclass(dtype.type, np.bool_): - upcast_cls = 'bool' - elif issubclass(dtype.type, np.object_): - upcast_cls = 'object' - elif is_datetime64_dtype(dtype): - upcast_cls = 'datetime' - elif is_timedelta64_dtype(dtype): - upcast_cls = 'timedelta' - elif is_float_dtype(dtype) or is_numeric_dtype(dtype): - upcast_cls = dtype.name - else: - upcast_cls = 'float' - - # Null blocks should not influence upcast class selection, unless there - # are only null blocks, when same upcasting rules must be applied to - # null upcast classes. - if unit.is_na: - null_upcast_classes[upcast_cls].append(dtype) - else: - upcast_classes[upcast_cls].append(dtype) - - if not upcast_classes: - upcast_classes = null_upcast_classes - - # create the result - if 'object' in upcast_classes: - return np.dtype(np.object_), np.nan - elif 'bool' in upcast_classes: - if has_none_blocks: - return np.dtype(np.object_), np.nan - else: - return np.dtype(np.bool_), None - elif 'category' in upcast_classes: - return np.dtype(np.object_), np.nan - elif 'datetimetz' in upcast_classes: - dtype = upcast_classes['datetimetz'] - return dtype[0], tslib.iNaT - elif 'datetime' in upcast_classes: - return np.dtype('M8[ns]'), tslib.iNaT - elif 'timedelta' in upcast_classes: - return np.dtype('m8[ns]'), tslib.iNaT - else: # pragma - g = np.find_common_type(upcast_classes, []) - if is_float_dtype(g): - return g, g.type(np.nan) - elif is_numeric_dtype(g): - if has_none_blocks: - return np.float64, np.nan - else: - return g, None - - msg = "invalid dtype determination in get_concat_dtype" - raise AssertionError(msg) - - -def concatenate_join_units(join_units, concat_axis, copy): - """ - Concatenate values from several join units along selected axis. - """ - if concat_axis == 0 and len(join_units) > 1: - # Concatenating join units along ax0 is handled in _merge_blocks. - raise AssertionError("Concatenating join units along axis0") - - empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units) - - to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype, - upcasted_na=upcasted_na) - for ju in join_units] - - if len(to_concat) == 1: - # Only one block, nothing to concatenate. - concat_values = to_concat[0] - if copy and concat_values.base is not None: - concat_values = concat_values.copy() - else: - concat_values = _concat._concat_compat(to_concat, axis=concat_axis) - - return concat_values - - -def get_mgr_concatenation_plan(mgr, indexers): - """ - Construct concatenation plan for given block manager and indexers. - - Parameters - ---------- - mgr : BlockManager - indexers : dict of {axis: indexer} - - Returns - ------- - plan : list of (BlockPlacement, JoinUnit) tuples - - """ - # Calculate post-reindex shape , save for item axis which will be separate - # for each block anyway. - mgr_shape = list(mgr.shape) - for ax, indexer in indexers.items(): - mgr_shape[ax] = len(indexer) - mgr_shape = tuple(mgr_shape) - - if 0 in indexers: - ax0_indexer = indexers.pop(0) - blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1) - blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1) - else: - - if mgr._is_single_block: - blk = mgr.blocks[0] - return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))] - - ax0_indexer = None - blknos = mgr._blknos - blklocs = mgr._blklocs - - plan = [] - for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks), - group=False): - - assert placements.is_slice_like - - join_unit_indexers = indexers.copy() - - shape = list(mgr_shape) - shape[0] = len(placements) - shape = tuple(shape) - - if blkno == -1: - unit = JoinUnit(None, shape) - else: - blk = mgr.blocks[blkno] - ax0_blk_indexer = blklocs[placements.indexer] - - unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and - # Fastpath detection of join unit not - # needing to reindex its block: no ax0 - # reindexing took place and block - # placement was sequential before. - ((ax0_indexer is None and - blk.mgr_locs.is_slice_like and - blk.mgr_locs.as_slice.step == 1) or - # Slow-ish detection: all indexer locs - # are sequential (and length match is - # checked above). - (np.diff(ax0_blk_indexer) == 1).all())) - - # Omit indexer if no item reindexing is required. - if unit_no_ax0_reindexing: - join_unit_indexers.pop(0, None) - else: - join_unit_indexers[0] = ax0_blk_indexer - - unit = JoinUnit(blk, shape, join_unit_indexers) - - plan.append((placements, unit)) - - return plan - - -def combine_concat_plans(plans, concat_axis): - """ - Combine multiple concatenation plans into one. - - existing_plan is updated in-place. - """ - if len(plans) == 1: - for p in plans[0]: - yield p[0], [p[1]] - - elif concat_axis == 0: - offset = 0 - for plan in plans: - last_plc = None - - for plc, unit in plan: - yield plc.add(offset), [unit] - last_plc = plc - - if last_plc is not None: - offset += last_plc.as_slice.stop - - else: - num_ended = [0] - - def _next_or_none(seq): - retval = next(seq, None) - if retval is None: - num_ended[0] += 1 - return retval - - plans = list(map(iter, plans)) - next_items = list(map(_next_or_none, plans)) - - while num_ended[0] != len(next_items): - if num_ended[0] > 0: - raise ValueError("Plan shapes are not aligned") - - placements, units = zip(*next_items) - - lengths = list(map(len, placements)) - min_len, max_len = min(lengths), max(lengths) - - if min_len == max_len: - yield placements[0], units - next_items[:] = map(_next_or_none, plans) - else: - yielded_placement = None - yielded_units = [None] * len(next_items) - for i, (plc, unit) in enumerate(next_items): - yielded_units[i] = unit - if len(plc) > min_len: - # trim_join_unit updates unit in place, so only - # placement needs to be sliced to skip min_len. - next_items[i] = (plc[min_len:], - trim_join_unit(unit, min_len)) - else: - yielded_placement = plc - next_items[i] = _next_or_none(plans[i]) - - yield yielded_placement, yielded_units - - -def trim_join_unit(join_unit, length): - """ - Reduce join_unit's shape along item axis to length. - - Extra items that didn't fit are returned as a separate block. - """ - - if 0 not in join_unit.indexers: - extra_indexers = join_unit.indexers - - if join_unit.block is None: - extra_block = None - else: - extra_block = join_unit.block.getitem_block(slice(length, None)) - join_unit.block = join_unit.block.getitem_block(slice(length)) - else: - extra_block = join_unit.block - - extra_indexers = copy.copy(join_unit.indexers) - extra_indexers[0] = extra_indexers[0][length:] - join_unit.indexers[0] = join_unit.indexers[0][:length] - - extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:] - join_unit.shape = (length,) + join_unit.shape[1:] - - return JoinUnit(block=extra_block, indexers=extra_indexers, - shape=extra_shape) - - -class JoinUnit(object): - - def __init__(self, block, shape, indexers=None): - # Passing shape explicitly is required for cases when block is None. - if indexers is None: - indexers = {} - self.block = block - self.indexers = indexers - self.shape = shape - - def __repr__(self): - return '{name}({block!r}, {indexers})'.format( - name=self.__class__.__name__, block=self.block, - indexers=self.indexers) - - @cache_readonly - def needs_filling(self): - for indexer in self.indexers.values(): - # FIXME: cache results of indexer == -1 checks. - if (indexer == -1).any(): - return True - - return False - - @cache_readonly - def dtype(self): - if self.block is None: - raise AssertionError("Block is None, no dtype") - - if not self.needs_filling: - return self.block.dtype - else: - return _get_dtype(maybe_promote(self.block.dtype, - self.block.fill_value)[0]) - - @cache_readonly - def is_na(self): - if self.block is None: - return True - - if not self.block._can_hold_na: - return False - - # Usually it's enough to check but a small fraction of values to see if - # a block is NOT null, chunks should help in such cases. 1000 value - # was chosen rather arbitrarily. - values = self.block.values - if self.block.is_categorical: - values_flat = values.categories - elif self.block.is_sparse: - # fill_value is not NaN and have holes - if not values._null_fill_value and values.sp_index.ngaps > 0: - return False - values_flat = values.ravel(order='K') - else: - values_flat = values.ravel(order='K') - total_len = values_flat.shape[0] - chunk_len = max(total_len // 40, 1000) - for i in range(0, total_len, chunk_len): - if not isna(values_flat[i:i + chunk_len]).all(): - return False - - return True - - def get_reindexed_values(self, empty_dtype, upcasted_na): - if upcasted_na is None: - # No upcasting is necessary - fill_value = self.block.fill_value - values = self.block.get_values() - else: - fill_value = upcasted_na - - if self.is_na: - if getattr(self.block, 'is_object', False): - # we want to avoid filling with np.nan if we are - # using None; we already know that we are all - # nulls - values = self.block.values.ravel(order='K') - if len(values) and values[0] is None: - fill_value = None - - if getattr(self.block, 'is_datetimetz', False): - pass - elif getattr(self.block, 'is_categorical', False): - pass - elif getattr(self.block, 'is_sparse', False): - pass - else: - missing_arr = np.empty(self.shape, dtype=empty_dtype) - missing_arr.fill(fill_value) - return missing_arr - - if not self.indexers: - if not self.block._can_consolidate: - # preserve these for validation in _concat_compat - return self.block.values - - if self.block.is_bool and not self.block.is_categorical: - # External code requested filling/upcasting, bool values must - # be upcasted to object to avoid being upcasted to numeric. - values = self.block.astype(np.object_).values - elif self.block.is_categorical: - values = self.block.values - else: - # No dtype upcasting is done here, it will be performed during - # concatenation itself. - values = self.block.get_values() - - if not self.indexers: - # If there's no indexing to be done, we want to signal outside - # code that this array must be copied explicitly. This is done - # by returning a view and checking `retval.base`. - values = values.view() - - else: - for ax, indexer in self.indexers.items(): - values = algos.take_nd(values, indexer, axis=ax, - fill_value=fill_value) - - return values - - -def _fast_count_smallints(arr): - """Faster version of set(arr) for sequences of small numbers.""" - if len(arr) == 0: - # Handle empty arr case separately: numpy 1.6 chokes on that. - return np.empty((0, 2), dtype=arr.dtype) - else: - counts = np.bincount(arr.astype(np.int_)) - nz = counts.nonzero()[0] - return np.c_[nz, counts[nz]] - - -def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill): - if isinstance(slice_or_indexer, slice): - return ('slice', slice_or_indexer, - libinternals.slice_len(slice_or_indexer, length)) - elif (isinstance(slice_or_indexer, np.ndarray) and - slice_or_indexer.dtype == np.bool_): - return 'mask', slice_or_indexer, slice_or_indexer.sum() - else: - indexer = np.asanyarray(slice_or_indexer, dtype=np.int64) - if not allow_fill: - indexer = maybe_convert_indices(indexer, length) - return 'fancy', indexer, len(indexer) diff --git a/pandas/core/internals/joins.py b/pandas/core/internals/joins.py new file mode 100644 index 0000000000000..261505e31e768 --- /dev/null +++ b/pandas/core/internals/joins.py @@ -0,0 +1,493 @@ +# -*- coding: utf-8 -*- +import copy +from collections import defaultdict + +import numpy as np + +from pandas._libs import tslib +from pandas.util._decorators import cache_readonly +from pandas.compat import range, map, zip + +from pandas.core.dtypes.common import ( + is_timedelta64_dtype, + is_datetime64_dtype, is_datetimetz, + is_categorical_dtype, + is_float_dtype, is_numeric_dtype, + _get_dtype) +from pandas.core.dtypes.cast import maybe_promote +from pandas.core.dtypes.missing import isna +import pandas.core.dtypes.concat as _concat + +import pandas.core.algorithms as algos + + +from .blocks import make_block +from .managers import BlockManager, _get_blkno_placements + + +class JoinUnit(object): + + def __init__(self, block, shape, indexers=None): + # Passing shape explicitly is required for cases when block is None. + if indexers is None: + indexers = {} + self.block = block + self.indexers = indexers + self.shape = shape + + def __repr__(self): + return '{name}({block!r}, {indexers})'.format( + name=self.__class__.__name__, block=self.block, + indexers=self.indexers) + + @cache_readonly + def needs_filling(self): + for indexer in self.indexers.values(): + # FIXME: cache results of indexer == -1 checks. + if (indexer == -1).any(): + return True + + return False + + @cache_readonly + def dtype(self): + if self.block is None: + raise AssertionError("Block is None, no dtype") + + if not self.needs_filling: + return self.block.dtype + else: + return _get_dtype(maybe_promote(self.block.dtype, + self.block.fill_value)[0]) + + @cache_readonly + def is_na(self): + if self.block is None: + return True + + if not self.block._can_hold_na: + return False + + # Usually it's enough to check but a small fraction of values to see if + # a block is NOT null, chunks should help in such cases. 1000 value + # was chosen rather arbitrarily. + values = self.block.values + if self.block.is_categorical: + values_flat = values.categories + elif self.block.is_sparse: + # fill_value is not NaN and have holes + if not values._null_fill_value and values.sp_index.ngaps > 0: + return False + values_flat = values.ravel(order='K') + else: + values_flat = values.ravel(order='K') + total_len = values_flat.shape[0] + chunk_len = max(total_len // 40, 1000) + for i in range(0, total_len, chunk_len): + if not isna(values_flat[i:i + chunk_len]).all(): + return False + + return True + + def get_reindexed_values(self, empty_dtype, upcasted_na): + if upcasted_na is None: + # No upcasting is necessary + fill_value = self.block.fill_value + values = self.block.get_values() + else: + fill_value = upcasted_na + + if self.is_na: + if getattr(self.block, 'is_object', False): + # we want to avoid filling with np.nan if we are + # using None; we already know that we are all + # nulls + values = self.block.values.ravel(order='K') + if len(values) and values[0] is None: + fill_value = None + + if getattr(self.block, 'is_datetimetz', False): + pass + elif getattr(self.block, 'is_categorical', False): + pass + elif getattr(self.block, 'is_sparse', False): + pass + else: + missing_arr = np.empty(self.shape, dtype=empty_dtype) + missing_arr.fill(fill_value) + return missing_arr + + if not self.indexers: + if not self.block._can_consolidate: + # preserve these for validation in _concat_compat + return self.block.values + + if self.block.is_bool and not self.block.is_categorical: + # External code requested filling/upcasting, bool values must + # be upcasted to object to avoid being upcasted to numeric. + values = self.block.astype(np.object_).values + elif self.block.is_categorical: + values = self.block.values + else: + # No dtype upcasting is done here, it will be performed during + # concatenation itself. + values = self.block.get_values() + + if not self.indexers: + # If there's no indexing to be done, we want to signal outside + # code that this array must be copied explicitly. This is done + # by returning a view and checking `retval.base`. + values = values.view() + + else: + for ax, indexer in self.indexers.items(): + values = algos.take_nd(values, indexer, axis=ax, + fill_value=fill_value) + + return values + + +def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy): + """ + Concatenate block managers into one. + + Parameters + ---------- + mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples + axes : list of Index + concat_axis : int + copy : bool + + """ + concat_plan = combine_concat_plans( + [get_mgr_concatenation_plan(mgr, indexers) + for mgr, indexers in mgrs_indexers], concat_axis) + + blocks = [] + + for placement, join_units in concat_plan: + + if len(join_units) == 1 and not join_units[0].indexers: + b = join_units[0].block + values = b.values + if copy: + values = values.copy() + elif not copy: + values = values.view() + b = b.make_block_same_class(values, placement=placement) + elif is_uniform_join_units(join_units): + b = join_units[0].block.concat_same_type( + [ju.block for ju in join_units], placement=placement) + else: + b = make_block( + concatenate_join_units(join_units, concat_axis, copy=copy), + placement=placement) + blocks.append(b) + + return BlockManager(blocks, axes) + + +def get_mgr_concatenation_plan(mgr, indexers): + """ + Construct concatenation plan for given block manager and indexers. + + Parameters + ---------- + mgr : BlockManager + indexers : dict of {axis: indexer} + + Returns + ------- + plan : list of (BlockPlacement, JoinUnit) tuples + + """ + # Calculate post-reindex shape , save for item axis which will be separate + # for each block anyway. + mgr_shape = list(mgr.shape) + for ax, indexer in indexers.items(): + mgr_shape[ax] = len(indexer) + mgr_shape = tuple(mgr_shape) + + if 0 in indexers: + ax0_indexer = indexers.pop(0) + blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1) + blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1) + else: + + if mgr._is_single_block: + blk = mgr.blocks[0] + return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))] + + ax0_indexer = None + blknos = mgr._blknos + blklocs = mgr._blklocs + + plan = [] + for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks), + group=False): + + assert placements.is_slice_like + + join_unit_indexers = indexers.copy() + + shape = list(mgr_shape) + shape[0] = len(placements) + shape = tuple(shape) + + if blkno == -1: + unit = JoinUnit(None, shape) + else: + blk = mgr.blocks[blkno] + ax0_blk_indexer = blklocs[placements.indexer] + + unit_no_ax0_reindexing = (len(placements) == len(blk.mgr_locs) and + # Fastpath detection of join unit not + # needing to reindex its block: no ax0 + # reindexing took place and block + # placement was sequential before. + ((ax0_indexer is None and + blk.mgr_locs.is_slice_like and + blk.mgr_locs.as_slice.step == 1) or + # Slow-ish detection: all indexer locs + # are sequential (and length match is + # checked above). + (np.diff(ax0_blk_indexer) == 1).all())) + + # Omit indexer if no item reindexing is required. + if unit_no_ax0_reindexing: + join_unit_indexers.pop(0, None) + else: + join_unit_indexers[0] = ax0_blk_indexer + + unit = JoinUnit(blk, shape, join_unit_indexers) + + plan.append((placements, unit)) + + return plan + + +def combine_concat_plans(plans, concat_axis): + """ + Combine multiple concatenation plans into one. + + existing_plan is updated in-place. + """ + if len(plans) == 1: + for p in plans[0]: + yield p[0], [p[1]] + + elif concat_axis == 0: + offset = 0 + for plan in plans: + last_plc = None + + for plc, unit in plan: + yield plc.add(offset), [unit] + last_plc = plc + + if last_plc is not None: + offset += last_plc.as_slice.stop + + else: + num_ended = [0] + + def _next_or_none(seq): + retval = next(seq, None) + if retval is None: + num_ended[0] += 1 + return retval + + plans = list(map(iter, plans)) + next_items = list(map(_next_or_none, plans)) + + while num_ended[0] != len(next_items): + if num_ended[0] > 0: + raise ValueError("Plan shapes are not aligned") + + placements, units = zip(*next_items) + + lengths = list(map(len, placements)) + min_len, max_len = min(lengths), max(lengths) + + if min_len == max_len: + yield placements[0], units + next_items[:] = map(_next_or_none, plans) + else: + yielded_placement = None + yielded_units = [None] * len(next_items) + for i, (plc, unit) in enumerate(next_items): + yielded_units[i] = unit + if len(plc) > min_len: + # trim_join_unit updates unit in place, so only + # placement needs to be sliced to skip min_len. + next_items[i] = (plc[min_len:], + trim_join_unit(unit, min_len)) + else: + yielded_placement = plc + next_items[i] = _next_or_none(plans[i]) + + yield yielded_placement, yielded_units + + +def concatenate_join_units(join_units, concat_axis, copy): + """ + Concatenate values from several join units along selected axis. + """ + if concat_axis == 0 and len(join_units) > 1: + # Concatenating join units along ax0 is handled in _merge_blocks. + raise AssertionError("Concatenating join units along axis0") + + empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units) + + to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype, + upcasted_na=upcasted_na) + for ju in join_units] + + if len(to_concat) == 1: + # Only one block, nothing to concatenate. + concat_values = to_concat[0] + if copy and concat_values.base is not None: + concat_values = concat_values.copy() + else: + concat_values = _concat._concat_compat(to_concat, axis=concat_axis) + + return concat_values + + +def get_empty_dtype_and_na(join_units): + """ + Return dtype and N/A values to use when concatenating specified units. + + Returned N/A value may be None which means there was no casting involved. + + Returns + ------- + dtype + na + """ + + if len(join_units) == 1: + blk = join_units[0].block + if blk is None: + return np.float64, np.nan + + has_none_blocks = False + dtypes = [None] * len(join_units) + for i, unit in enumerate(join_units): + if unit.block is None: + has_none_blocks = True + else: + dtypes[i] = unit.dtype + + upcast_classes = defaultdict(list) + null_upcast_classes = defaultdict(list) + for dtype, unit in zip(dtypes, join_units): + if dtype is None: + continue + + if is_categorical_dtype(dtype): + upcast_cls = 'category' + elif is_datetimetz(dtype): + upcast_cls = 'datetimetz' + elif issubclass(dtype.type, np.bool_): + upcast_cls = 'bool' + elif issubclass(dtype.type, np.object_): + upcast_cls = 'object' + elif is_datetime64_dtype(dtype): + upcast_cls = 'datetime' + elif is_timedelta64_dtype(dtype): + upcast_cls = 'timedelta' + elif is_float_dtype(dtype) or is_numeric_dtype(dtype): + upcast_cls = dtype.name + else: + upcast_cls = 'float' + + # Null blocks should not influence upcast class selection, unless there + # are only null blocks, when same upcasting rules must be applied to + # null upcast classes. + if unit.is_na: + null_upcast_classes[upcast_cls].append(dtype) + else: + upcast_classes[upcast_cls].append(dtype) + + if not upcast_classes: + upcast_classes = null_upcast_classes + + # create the result + if 'object' in upcast_classes: + return np.dtype(np.object_), np.nan + elif 'bool' in upcast_classes: + if has_none_blocks: + return np.dtype(np.object_), np.nan + else: + return np.dtype(np.bool_), None + elif 'category' in upcast_classes: + return np.dtype(np.object_), np.nan + elif 'datetimetz' in upcast_classes: + dtype = upcast_classes['datetimetz'] + return dtype[0], tslib.iNaT + elif 'datetime' in upcast_classes: + return np.dtype('M8[ns]'), tslib.iNaT + elif 'timedelta' in upcast_classes: + return np.dtype('m8[ns]'), tslib.iNaT + else: # pragma + g = np.find_common_type(upcast_classes, []) + if is_float_dtype(g): + return g, g.type(np.nan) + elif is_numeric_dtype(g): + if has_none_blocks: + return np.float64, np.nan + else: + return g, None + + msg = "invalid dtype determination in get_concat_dtype" + raise AssertionError(msg) + + +def is_uniform_join_units(join_units): + """ + Check if the join units consist of blocks of uniform type that can + be concatenated using Block.concat_same_type instead of the generic + concatenate_join_units (which uses `_concat._concat_compat`). + + """ + return ( + # all blocks need to have the same type + all(type(ju.block) is type(join_units[0].block) for ju in join_units) and # noqa + # no blocks that would get missing values (can lead to type upcasts) + all(not ju.is_na for ju in join_units) and + # no blocks with indexers (as then the dimensions do not fit) + all(not ju.indexers for ju in join_units) and + # disregard Panels + all(ju.block.ndim <= 2 for ju in join_units) and + # only use this path when there is something to concatenate + len(join_units) > 1) + + +def trim_join_unit(join_unit, length): + """ + Reduce join_unit's shape along item axis to length. + + Extra items that didn't fit are returned as a separate block. + """ + + if 0 not in join_unit.indexers: + extra_indexers = join_unit.indexers + + if join_unit.block is None: + extra_block = None + else: + extra_block = join_unit.block.getitem_block(slice(length, None)) + join_unit.block = join_unit.block.getitem_block(slice(length)) + else: + extra_block = join_unit.block + + extra_indexers = copy.copy(join_unit.indexers) + extra_indexers[0] = extra_indexers[0][length:] + join_unit.indexers[0] = join_unit.indexers[0][:length] + + extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:] + join_unit.shape = (length,) + join_unit.shape[1:] + + return JoinUnit(block=extra_block, indexers=extra_indexers, + shape=extra_shape) diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py new file mode 100644 index 0000000000000..0a3583121bec5 --- /dev/null +++ b/pandas/core/internals/managers.py @@ -0,0 +1,2066 @@ +# -*- coding: utf-8 -*- +import itertools +import operator +from collections import defaultdict +from functools import partial + +import numpy as np + +from pandas._libs import lib, internals as libinternals +from pandas._libs.internals import BlockPlacement + +from pandas.util._validators import validate_bool_kwarg +from pandas.compat import range, map, zip, u + +from pandas.io.formats.printing import pprint_thing + +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.common import ( + _NS_DTYPE, + _ensure_int64, + is_datetimelike_v_numeric, + is_numeric_v_string_like, is_extension_type, + is_scalar) +from pandas.core.dtypes.cast import ( + maybe_promote, + infer_dtype_from_scalar, + find_common_type) +from pandas.core.dtypes.missing import isna +import pandas.core.dtypes.concat as _concat +from pandas.core.dtypes.generic import ABCSeries + +import pandas.core.algorithms as algos +from pandas.core import missing +from pandas.core.base import PandasObject + +from pandas.core.index import Index, MultiIndex, _ensure_index +from pandas.core.indexing import maybe_convert_indices + +from pandas.core.sparse.array import _maybe_to_sparse + + +from .blocks import (make_block, get_block_type, + _merge_blocks, _extend_blocks, _safe_reshape, + Block, SparseBlock, DatetimeTZBlock, CategoricalBlock) + + +class BlockManager(PandasObject): + """ + Core internal data structure to implement DataFrame, Series, Panel, etc. + + Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a + lightweight blocked set of labeled data to be manipulated by the DataFrame + public API class + + Attributes + ---------- + shape + ndim + axes + values + items + + Methods + ------- + set_axis(axis, new_labels) + copy(deep=True) + + get_dtype_counts + get_ftype_counts + get_dtypes + get_ftypes + + apply(func, axes, block_filter_fn) + + get_bool_data + get_numeric_data + + get_slice(slice_like, axis) + get(label) + iget(loc) + get_scalar(label_tup) + + take(indexer, axis) + reindex_axis(new_labels, axis) + reindex_indexer(new_labels, indexer, axis) + + delete(label) + insert(loc, label, value) + set(label, value) + + Parameters + ---------- + + + Notes + ----- + This is *not* a public API class + """ + __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', + '_is_consolidated', '_blknos', '_blklocs'] + + def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True): + self.axes = [_ensure_index(ax) for ax in axes] + self.blocks = tuple(blocks) + + for block in blocks: + if block.is_sparse: + if len(block.mgr_locs) != 1: + raise AssertionError("Sparse block refers to multiple " + "items") + else: + if self.ndim != block.ndim: + raise AssertionError( + 'Number of Block dimensions ({block}) must equal ' + 'number of axes ({self})'.format(block=block.ndim, + self=self.ndim)) + + if do_integrity_check: + self._verify_integrity() + + self._consolidate_check() + + self._rebuild_blknos_and_blklocs() + + def make_empty(self, axes=None): + """ return an empty BlockManager with the items axis of len 0 """ + if axes is None: + axes = [_ensure_index([])] + [_ensure_index(a) + for a in self.axes[1:]] + + # preserve dtype if possible + if self.ndim == 1: + blocks = np.array([], dtype=self.array_dtype) + else: + blocks = [] + return self.__class__(blocks, axes) + + def __nonzero__(self): + return True + + # Python3 compat + __bool__ = __nonzero__ + + @property + def shape(self): + return tuple(len(ax) for ax in self.axes) + + @property + def ndim(self): + return len(self.axes) + + def set_axis(self, axis, new_labels): + new_labels = _ensure_index(new_labels) + old_len = len(self.axes[axis]) + new_len = len(new_labels) + + if new_len != old_len: + raise ValueError( + 'Length mismatch: Expected axis has {old} elements, new ' + 'values have {new} elements'.format(old=old_len, new=new_len)) + + self.axes[axis] = new_labels + + def rename_axis(self, mapper, axis, copy=True, level=None): + """ + Rename one of axes. + + Parameters + ---------- + mapper : unary callable + axis : int + copy : boolean, default True + level : int, default None + + """ + obj = self.copy(deep=copy) + obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level)) + return obj + + def add_prefix(self, prefix): + f = partial('{prefix}{}'.format, prefix=prefix) + return self.rename_axis(f, axis=0) + + def add_suffix(self, suffix): + f = partial('{}{suffix}'.format, suffix=suffix) + return self.rename_axis(f, axis=0) + + @property + def _is_single_block(self): + if self.ndim == 1: + return True + + if len(self.blocks) != 1: + return False + + blk = self.blocks[0] + return (blk.mgr_locs.is_slice_like and + blk.mgr_locs.as_slice == slice(0, len(self), 1)) + + def _rebuild_blknos_and_blklocs(self): + """ + Update mgr._blknos / mgr._blklocs. + """ + new_blknos = np.empty(self.shape[0], dtype=np.int64) + new_blklocs = np.empty(self.shape[0], dtype=np.int64) + new_blknos.fill(-1) + new_blklocs.fill(-1) + + for blkno, blk in enumerate(self.blocks): + rl = blk.mgr_locs + new_blknos[rl.indexer] = blkno + new_blklocs[rl.indexer] = np.arange(len(rl)) + + if (new_blknos == -1).any(): + raise AssertionError("Gaps in blk ref_locs") + + self._blknos = new_blknos + self._blklocs = new_blklocs + + # make items read only for now + def _get_items(self): + return self.axes[0] + + items = property(fget=_get_items) + + def _get_counts(self, f): + """ return a dict of the counts of the function in BlockManager """ + self._consolidate_inplace() + counts = dict() + for b in self.blocks: + v = f(b) + counts[v] = counts.get(v, 0) + b.shape[0] + return counts + + def get_dtype_counts(self): + return self._get_counts(lambda b: b.dtype.name) + + def get_ftype_counts(self): + return self._get_counts(lambda b: b.ftype) + + def get_dtypes(self): + dtypes = np.array([blk.dtype for blk in self.blocks]) + return algos.take_1d(dtypes, self._blknos, allow_fill=False) + + def get_ftypes(self): + ftypes = np.array([blk.ftype for blk in self.blocks]) + return algos.take_1d(ftypes, self._blknos, allow_fill=False) + + def __getstate__(self): + block_values = [b.values for b in self.blocks] + block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] + axes_array = [ax for ax in self.axes] + + extra_state = { + '0.14.1': { + 'axes': axes_array, + 'blocks': [dict(values=b.values, mgr_locs=b.mgr_locs.indexer) + for b in self.blocks] + } + } + + # First three elements of the state are to maintain forward + # compatibility with 0.13.1. + return axes_array, block_values, block_items, extra_state + + def __setstate__(self, state): + def unpickle_block(values, mgr_locs): + # numpy < 1.7 pickle compat + if values.dtype == 'M8[us]': + values = values.astype('M8[ns]') + return make_block(values, placement=mgr_locs) + + if (isinstance(state, tuple) and len(state) >= 4 and + '0.14.1' in state[3]): + state = state[3]['0.14.1'] + self.axes = [_ensure_index(ax) for ax in state['axes']] + self.blocks = tuple(unpickle_block(b['values'], b['mgr_locs']) + for b in state['blocks']) + else: + # discard anything after 3rd, support beta pickling format for a + # little while longer + ax_arrays, bvalues, bitems = state[:3] + + self.axes = [_ensure_index(ax) for ax in ax_arrays] + + if len(bitems) == 1 and self.axes[0].equals(bitems[0]): + # This is a workaround for pre-0.14.1 pickles that didn't + # support unpickling multi-block frames/panels with non-unique + # columns/items, because given a manager with items ["a", "b", + # "a"] there's no way of knowing which block's "a" is where. + # + # Single-block case can be supported under the assumption that + # block items corresponded to manager items 1-to-1. + all_mgr_locs = [slice(0, len(bitems[0]))] + else: + all_mgr_locs = [self.axes[0].get_indexer(blk_items) + for blk_items in bitems] + + self.blocks = tuple( + unpickle_block(values, mgr_locs) + for values, mgr_locs in zip(bvalues, all_mgr_locs)) + + self._post_setstate() + + def _post_setstate(self): + self._is_consolidated = False + self._known_consolidated = False + self._rebuild_blknos_and_blklocs() + + def __len__(self): + return len(self.items) + + def __unicode__(self): + output = pprint_thing(self.__class__.__name__) + for i, ax in enumerate(self.axes): + if i == 0: + output += u('\nItems: {ax}'.format(ax=ax)) + else: + output += u('\nAxis {i}: {ax}'.format(i=i, ax=ax)) + + for block in self.blocks: + output += u('\n{block}'.format(block=pprint_thing(block))) + return output + + def _verify_integrity(self): + mgr_shape = self.shape + tot_items = sum(len(x.mgr_locs) for x in self.blocks) + for block in self.blocks: + if block._verify_integrity and block.shape[1:] != mgr_shape[1:]: + construction_error(tot_items, block.shape[1:], self.axes) + if len(self.items) != tot_items: + raise AssertionError('Number of manager items must equal union of ' + 'block items\n# manager items: {0}, # ' + 'tot_items: {1}'.format( + len(self.items), tot_items)) + + def apply(self, f, axes=None, filter=None, do_integrity_check=False, + consolidate=True, **kwargs): + """ + iterate over the blocks, collect and create a new block manager + + Parameters + ---------- + f : the callable or function name to operate on at the block level + axes : optional (if not supplied, use self.axes) + filter : list, if supplied, only call the block if the filter is in + the block + do_integrity_check : boolean, default False. Do the block manager + integrity check + consolidate: boolean, default True. Join together blocks having same + dtype + + Returns + ------- + Block Manager (new object) + + """ + + result_blocks = [] + + # filter kwarg is used in replace-* family of methods + if filter is not None: + filter_locs = set(self.items.get_indexer_for(filter)) + if len(filter_locs) == len(self.items): + # All items are included, as if there were no filtering + filter = None + else: + kwargs['filter'] = filter_locs + + if consolidate: + self._consolidate_inplace() + + if f == 'where': + align_copy = True + if kwargs.get('align', True): + align_keys = ['other', 'cond'] + else: + align_keys = ['cond'] + elif f == 'putmask': + align_copy = False + if kwargs.get('align', True): + align_keys = ['new', 'mask'] + else: + align_keys = ['mask'] + elif f == 'eval': + align_copy = False + align_keys = ['other'] + elif f == 'fillna': + # fillna internally does putmask, maybe it's better to do this + # at mgr, not block level? + align_copy = False + align_keys = ['value'] + else: + align_keys = [] + + aligned_args = dict((k, kwargs[k]) + for k in align_keys + if hasattr(kwargs[k], 'reindex_axis')) + + for b in self.blocks: + if filter is not None: + if not b.mgr_locs.isin(filter_locs).any(): + result_blocks.append(b) + continue + + if aligned_args: + b_items = self.items[b.mgr_locs.indexer] + + for k, obj in aligned_args.items(): + axis = getattr(obj, '_info_axis_number', 0) + kwargs[k] = obj.reindex(b_items, axis=axis, + copy=align_copy) + + kwargs['mgr'] = self + applied = getattr(b, f)(**kwargs) + result_blocks = _extend_blocks(applied, result_blocks) + + if len(result_blocks) == 0: + return self.make_empty(axes or self.axes) + bm = self.__class__(result_blocks, axes or self.axes, + do_integrity_check=do_integrity_check) + bm._consolidate_inplace() + return bm + + def reduction(self, f, axis=0, consolidate=True, transposed=False, + **kwargs): + """ + iterate over the blocks, collect and create a new block manager. + This routine is intended for reduction type operations and + will do inference on the generated blocks. + + Parameters + ---------- + f: the callable or function name to operate on at the block level + axis: reduction axis, default 0 + consolidate: boolean, default True. Join together blocks having same + dtype + transposed: boolean, default False + we are holding transposed data + + Returns + ------- + Block Manager (new object) + + """ + + if consolidate: + self._consolidate_inplace() + + axes, blocks = [], [] + for b in self.blocks: + kwargs['mgr'] = self + axe, block = getattr(b, f)(axis=axis, **kwargs) + + axes.append(axe) + blocks.append(block) + + # note that some DatetimeTZ, Categorical are always ndim==1 + ndim = {b.ndim for b in blocks} + + if 2 in ndim: + + new_axes = list(self.axes) + + # multiple blocks that are reduced + if len(blocks) > 1: + new_axes[1] = axes[0] + + # reset the placement to the original + for b, sb in zip(blocks, self.blocks): + b.mgr_locs = sb.mgr_locs + + else: + new_axes[axis] = Index(np.concatenate( + [ax.values for ax in axes])) + + if transposed: + new_axes = new_axes[::-1] + blocks = [b.make_block(b.values.T, + placement=np.arange(b.shape[1]) + ) for b in blocks] + + return self.__class__(blocks, new_axes) + + # 0 ndim + if 0 in ndim and 1 not in ndim: + values = np.array([b.values for b in blocks]) + if len(values) == 1: + return values.item() + blocks = [make_block(values, ndim=1)] + axes = Index([ax[0] for ax in axes]) + + # single block + values = _concat._concat_compat([b.values for b in blocks]) + + # compute the orderings of our original data + if len(self.blocks) > 1: + + indexer = np.empty(len(self.axes[0]), dtype=np.intp) + i = 0 + for b in self.blocks: + for j in b.mgr_locs: + indexer[j] = i + i = i + 1 + + values = values.take(indexer) + + return SingleBlockManager( + [make_block(values, + ndim=1, + placement=np.arange(len(values)))], + axes[0]) + + def isna(self, **kwargs): + return self.apply('apply', **kwargs) + + def where(self, **kwargs): + return self.apply('where', **kwargs) + + def eval(self, **kwargs): + return self.apply('eval', **kwargs) + + def quantile(self, **kwargs): + return self.reduction('quantile', **kwargs) + + def setitem(self, **kwargs): + return self.apply('setitem', **kwargs) + + def putmask(self, **kwargs): + return self.apply('putmask', **kwargs) + + def diff(self, **kwargs): + return self.apply('diff', **kwargs) + + def interpolate(self, **kwargs): + return self.apply('interpolate', **kwargs) + + def shift(self, **kwargs): + return self.apply('shift', **kwargs) + + def fillna(self, **kwargs): + return self.apply('fillna', **kwargs) + + def downcast(self, **kwargs): + return self.apply('downcast', **kwargs) + + def astype(self, dtype, **kwargs): + return self.apply('astype', dtype=dtype, **kwargs) + + def convert(self, **kwargs): + return self.apply('convert', **kwargs) + + def replace(self, **kwargs): + return self.apply('replace', **kwargs) + + def replace_list(self, src_list, dest_list, inplace=False, regex=False, + mgr=None): + """ do a list replace """ + + inplace = validate_bool_kwarg(inplace, 'inplace') + + if mgr is None: + mgr = self + + # figure out our mask a-priori to avoid repeated replacements + values = self.as_array() + + def comp(s): + if isna(s): + return isna(values) + return _maybe_compare(values, getattr(s, 'asm8', s), operator.eq) + + masks = [comp(s) for i, s in enumerate(src_list)] + + result_blocks = [] + src_len = len(src_list) - 1 + for blk in self.blocks: + + # its possible to get multiple result blocks here + # replace ALWAYS will return a list + rb = [blk if inplace else blk.copy()] + for i, (s, d) in enumerate(zip(src_list, dest_list)): + new_rb = [] + for b in rb: + if b.dtype == np.object_: + convert = i == src_len + result = b.replace(s, d, inplace=inplace, regex=regex, + mgr=mgr, convert=convert) + new_rb = _extend_blocks(result, new_rb) + else: + # get our mask for this element, sized to this + # particular block + m = masks[i][b.mgr_locs.indexer] + if m.any(): + b = b.coerce_to_target_dtype(d) + new_rb.extend(b.putmask(m, d, inplace=True)) + else: + new_rb.append(b) + rb = new_rb + result_blocks.extend(rb) + + bm = self.__class__(result_blocks, self.axes) + bm._consolidate_inplace() + return bm + + def reshape_nd(self, axes, **kwargs): + """ a 2d-nd reshape operation on a BlockManager """ + return self.apply('reshape_nd', axes=axes, **kwargs) + + def is_consolidated(self): + """ + Return True if more than one block with the same dtype + """ + if not self._known_consolidated: + self._consolidate_check() + return self._is_consolidated + + def _consolidate_check(self): + ftypes = [blk.ftype for blk in self.blocks] + self._is_consolidated = len(ftypes) == len(set(ftypes)) + self._known_consolidated = True + + @property + def is_mixed_type(self): + # Warning, consolidation needs to get checked upstairs + self._consolidate_inplace() + return len(self.blocks) > 1 + + @property + def is_numeric_mixed_type(self): + # Warning, consolidation needs to get checked upstairs + self._consolidate_inplace() + return all(block.is_numeric for block in self.blocks) + + @property + def is_datelike_mixed_type(self): + # Warning, consolidation needs to get checked upstairs + self._consolidate_inplace() + return any(block.is_datelike for block in self.blocks) + + @property + def is_view(self): + """ return a boolean if we are a single block and are a view """ + if len(self.blocks) == 1: + return self.blocks[0].is_view + + # It is technically possible to figure out which blocks are views + # e.g. [ b.values.base is not None for b in self.blocks ] + # but then we have the case of possibly some blocks being a view + # and some blocks not. setting in theory is possible on the non-view + # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit + # complicated + + return False + + def get_bool_data(self, copy=False): + """ + Parameters + ---------- + copy : boolean, default False + Whether to copy the blocks + """ + self._consolidate_inplace() + return self.combine([b for b in self.blocks if b.is_bool], copy) + + def get_numeric_data(self, copy=False): + """ + Parameters + ---------- + copy : boolean, default False + Whether to copy the blocks + """ + self._consolidate_inplace() + return self.combine([b for b in self.blocks if b.is_numeric], copy) + + def combine(self, blocks, copy=True): + """ return a new manager with the blocks """ + if len(blocks) == 0: + return self.make_empty() + + # FIXME: optimization potential + indexer = np.sort(np.concatenate([b.mgr_locs.as_array + for b in blocks])) + inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) + + new_blocks = [] + for b in blocks: + b = b.copy(deep=copy) + b.mgr_locs = algos.take_1d(inv_indexer, b.mgr_locs.as_array, + axis=0, allow_fill=False) + new_blocks.append(b) + + axes = list(self.axes) + axes[0] = self.items.take(indexer) + + return self.__class__(new_blocks, axes, do_integrity_check=False) + + def get_slice(self, slobj, axis=0): + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + if axis == 0: + new_blocks = self._slice_take_blocks_ax0(slobj) + else: + slicer = [slice(None)] * (axis + 1) + slicer[axis] = slobj + slicer = tuple(slicer) + new_blocks = [blk.getitem_block(slicer) for blk in self.blocks] + + new_axes = list(self.axes) + new_axes[axis] = new_axes[axis][slobj] + + bm = self.__class__(new_blocks, new_axes, do_integrity_check=False, + fastpath=True) + bm._consolidate_inplace() + return bm + + def __contains__(self, item): + return item in self.items + + @property + def nblocks(self): + return len(self.blocks) + + def copy(self, deep=True, mgr=None): + """ + Make deep or shallow copy of BlockManager + + Parameters + ---------- + deep : boolean o rstring, default True + If False, return shallow copy (do not copy data) + If 'all', copy data and a deep copy of the index + + Returns + ------- + copy : BlockManager + """ + + # this preserves the notion of view copying of axes + if deep: + if deep == 'all': + copy = lambda ax: ax.copy(deep=True) + else: + copy = lambda ax: ax.view() + new_axes = [copy(ax) for ax in self.axes] + else: + new_axes = list(self.axes) + return self.apply('copy', axes=new_axes, deep=deep, + do_integrity_check=False) + + def as_array(self, transpose=False, items=None): + """Convert the blockmanager data into an numpy array. + + Parameters + ---------- + transpose : boolean, default False + If True, transpose the return array + items : list of strings or None + Names of block items that will be included in the returned + array. ``None`` means that all block items will be used + + Returns + ------- + arr : ndarray + """ + if len(self.blocks) == 0: + arr = np.empty(self.shape, dtype=float) + return arr.transpose() if transpose else arr + + if items is not None: + mgr = self.reindex_axis(items, axis=0) + else: + mgr = self + + if self._is_single_block or not self.is_mixed_type: + arr = mgr.blocks[0].get_values() + else: + arr = mgr._interleave() + + return arr.transpose() if transpose else arr + + def _interleave(self): + """ + Return ndarray from blocks with specified item order + Items must be contained in the blocks + """ + dtype = _interleaved_dtype(self.blocks) + + result = np.empty(self.shape, dtype=dtype) + + if result.shape[0] == 0: + # Workaround for numpy 1.7 bug: + # + # >>> a = np.empty((0,10)) + # >>> a[slice(0,0)] + # array([], shape=(0, 10), dtype=float64) + # >>> a[[]] + # Traceback (most recent call last): + # File "<stdin>", line 1, in <module> + # IndexError: index 0 is out of bounds for axis 0 with size 0 + return result + + itemmask = np.zeros(self.shape[0]) + + for blk in self.blocks: + rl = blk.mgr_locs + result[rl.indexer] = blk.get_values(dtype) + itemmask[rl.indexer] = 1 + + if not itemmask.all(): + raise AssertionError('Some items were not contained in blocks') + + return result + + def to_dict(self, copy=True): + """ + Return a dict of str(dtype) -> BlockManager + + Parameters + ---------- + copy : boolean, default True + + Returns + ------- + values : a dict of dtype -> BlockManager + + Notes + ----- + This consolidates based on str(dtype) + """ + self._consolidate_inplace() + + bd = {} + for b in self.blocks: + bd.setdefault(str(b.dtype), []).append(b) + + return {dtype: self.combine(blocks, copy=copy) + for dtype, blocks in bd.items()} + + def xs(self, key, axis=1, copy=True, takeable=False): + if axis < 1: + raise AssertionError( + 'Can only take xs across axis >= 1, got {ax}'.format(ax=axis)) + + # take by position + if takeable: + loc = key + else: + loc = self.axes[axis].get_loc(key) + + slicer = [slice(None, None) for _ in range(self.ndim)] + slicer[axis] = loc + slicer = tuple(slicer) + + new_axes = list(self.axes) + + # could be an array indexer! + if isinstance(loc, (slice, np.ndarray)): + new_axes[axis] = new_axes[axis][loc] + else: + new_axes.pop(axis) + + new_blocks = [] + if len(self.blocks) > 1: + # we must copy here as we are mixed type + for blk in self.blocks: + newb = make_block(values=blk.values[slicer], + klass=blk.__class__, fastpath=True, + placement=blk.mgr_locs) + new_blocks.append(newb) + elif len(self.blocks) == 1: + block = self.blocks[0] + vals = block.values[slicer] + if copy: + vals = vals.copy() + new_blocks = [make_block(values=vals, + placement=block.mgr_locs, + klass=block.__class__, + fastpath=True, )] + + return self.__class__(new_blocks, new_axes) + + def fast_xs(self, loc): + """ + get a cross sectional for a given location in the + items ; handle dups + + return the result, is *could* be a view in the case of a + single block + """ + if len(self.blocks) == 1: + return self.blocks[0].iget((slice(None), loc)) + + items = self.items + + # non-unique (GH4726) + if not items.is_unique: + result = self._interleave() + if self.ndim == 2: + result = result.T + return result[loc] + + # unique + dtype = _interleaved_dtype(self.blocks) + n = len(items) + result = np.empty(n, dtype=dtype) + for blk in self.blocks: + # Such assignment may incorrectly coerce NaT to None + # result[blk.mgr_locs] = blk._slice((slice(None), loc)) + for i, rl in enumerate(blk.mgr_locs): + result[rl] = blk._try_coerce_result(blk.iget((i, loc))) + + return result + + def consolidate(self): + """ + Join together blocks having same dtype + + Returns + ------- + y : BlockManager + """ + if self.is_consolidated(): + return self + + bm = self.__class__(self.blocks, self.axes) + bm._is_consolidated = False + bm._consolidate_inplace() + return bm + + def _consolidate_inplace(self): + if not self.is_consolidated(): + self.blocks = tuple(_consolidate(self.blocks)) + self._is_consolidated = True + self._known_consolidated = True + self._rebuild_blknos_and_blklocs() + + def get(self, item, fastpath=True): + """ + Return values for selected item (ndarray or BlockManager). + """ + if self.items.is_unique: + + if not isna(item): + loc = self.items.get_loc(item) + else: + indexer = np.arange(len(self.items))[isna(self.items)] + + # allow a single nan location indexer + if not is_scalar(indexer): + if len(indexer) == 1: + loc = indexer.item() + else: + raise ValueError("cannot label index with a null key") + + return self.iget(loc, fastpath=fastpath) + else: + + if isna(item): + raise TypeError("cannot label index with a null key") + + indexer = self.items.get_indexer_for([item]) + return self.reindex_indexer(new_axis=self.items[indexer], + indexer=indexer, axis=0, + allow_dups=True) + + def iget(self, i, fastpath=True): + """ + Return the data as a SingleBlockManager if fastpath=True and possible + + Otherwise return as a ndarray + """ + block = self.blocks[self._blknos[i]] + values = block.iget(self._blklocs[i]) + if not fastpath or not block._box_to_block_values or values.ndim != 1: + return values + + # fastpath shortcut for select a single-dim from a 2-dim BM + return SingleBlockManager( + [block.make_block_same_class(values, + placement=slice(0, len(values)), + ndim=1, fastpath=True)], + self.axes[1]) + + def get_scalar(self, tup): + """ + Retrieve single item + """ + full_loc = [ax.get_loc(x) for ax, x in zip(self.axes, tup)] + blk = self.blocks[self._blknos[full_loc[0]]] + values = blk.values + + # FIXME: this may return non-upcasted types? + if values.ndim == 1: + return values[full_loc[1]] + + full_loc[0] = self._blklocs[full_loc[0]] + return values[tuple(full_loc)] + + def delete(self, item): + """ + Delete selected item (items if non-unique) in-place. + """ + indexer = self.items.get_loc(item) + + is_deleted = np.zeros(self.shape[0], dtype=np.bool_) + is_deleted[indexer] = True + ref_loc_offset = -is_deleted.cumsum() + + is_blk_deleted = [False] * len(self.blocks) + + if isinstance(indexer, int): + affected_start = indexer + else: + affected_start = is_deleted.nonzero()[0][0] + + for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]): + blk = self.blocks[blkno] + bml = blk.mgr_locs + blk_del = is_deleted[bml.indexer].nonzero()[0] + + if len(blk_del) == len(bml): + is_blk_deleted[blkno] = True + continue + elif len(blk_del) != 0: + blk.delete(blk_del) + bml = blk.mgr_locs + + blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer]) + + # FIXME: use Index.delete as soon as it uses fastpath=True + self.axes[0] = self.items[~is_deleted] + self.blocks = tuple(b for blkno, b in enumerate(self.blocks) + if not is_blk_deleted[blkno]) + self._shape = None + self._rebuild_blknos_and_blklocs() + + def set(self, item, value, check=False): + """ + Set new item in-place. Does not consolidate. Adds new Block if not + contained in the current set of items + if check, then validate that we are not setting the same data in-place + """ + # FIXME: refactor, clearly separate broadcasting & zip-like assignment + # can prob also fix the various if tests for sparse/categorical + + value_is_extension_type = is_extension_type(value) + + # categorical/spares/datetimetz + if value_is_extension_type: + + def value_getitem(placement): + return value + else: + if value.ndim == self.ndim - 1: + value = _safe_reshape(value, (1,) + value.shape) + + def value_getitem(placement): + return value + else: + + def value_getitem(placement): + return value[placement.indexer] + + if value.shape[1:] != self.shape[1:]: + raise AssertionError('Shape of new values must be compatible ' + 'with manager shape') + + try: + loc = self.items.get_loc(item) + except KeyError: + # This item wasn't present, just insert at end + self.insert(len(self.items), item, value) + return + + if isinstance(loc, int): + loc = [loc] + + blknos = self._blknos[loc] + blklocs = self._blklocs[loc].copy() + + unfit_mgr_locs = [] + unfit_val_locs = [] + removed_blknos = [] + for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks), + group=True): + blk = self.blocks[blkno] + blk_locs = blklocs[val_locs.indexer] + if blk.should_store(value): + blk.set(blk_locs, value_getitem(val_locs), check=check) + else: + unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) + unfit_val_locs.append(val_locs) + + # If all block items are unfit, schedule the block for removal. + if len(val_locs) == len(blk.mgr_locs): + removed_blknos.append(blkno) + else: + self._blklocs[blk.mgr_locs.indexer] = -1 + blk.delete(blk_locs) + self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk)) + + if len(removed_blknos): + # Remove blocks & update blknos accordingly + is_deleted = np.zeros(self.nblocks, dtype=np.bool_) + is_deleted[removed_blknos] = True + + new_blknos = np.empty(self.nblocks, dtype=np.int64) + new_blknos.fill(-1) + new_blknos[~is_deleted] = np.arange(self.nblocks - + len(removed_blknos)) + self._blknos = algos.take_1d(new_blknos, self._blknos, axis=0, + allow_fill=False) + self.blocks = tuple(blk for i, blk in enumerate(self.blocks) + if i not in set(removed_blknos)) + + if unfit_val_locs: + unfit_mgr_locs = np.concatenate(unfit_mgr_locs) + unfit_count = len(unfit_mgr_locs) + + new_blocks = [] + if value_is_extension_type: + # This code (ab-)uses the fact that sparse blocks contain only + # one item. + new_blocks.extend( + make_block(values=value.copy(), ndim=self.ndim, + placement=slice(mgr_loc, mgr_loc + 1)) + for mgr_loc in unfit_mgr_locs) + + self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) + + len(self.blocks)) + self._blklocs[unfit_mgr_locs] = 0 + + else: + # unfit_val_locs contains BlockPlacement objects + unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) + + new_blocks.append( + make_block(values=value_getitem(unfit_val_items), + ndim=self.ndim, placement=unfit_mgr_locs)) + + self._blknos[unfit_mgr_locs] = len(self.blocks) + self._blklocs[unfit_mgr_locs] = np.arange(unfit_count) + + self.blocks += tuple(new_blocks) + + # Newly created block's dtype may already be present. + self._known_consolidated = False + + def insert(self, loc, item, value, allow_duplicates=False): + """ + Insert item at selected position. + + Parameters + ---------- + loc : int + item : hashable + value : array_like + allow_duplicates: bool + If False, trying to insert non-unique item will raise + + """ + if not allow_duplicates and item in self.items: + # Should this be a different kind of error?? + raise ValueError('cannot insert {}, already exists'.format(item)) + + if not isinstance(loc, int): + raise TypeError("loc must be int") + + # insert to the axis; this could possibly raise a TypeError + new_axis = self.items.insert(loc, item) + + block = make_block(values=value, ndim=self.ndim, + placement=slice(loc, loc + 1)) + + for blkno, count in _fast_count_smallints(self._blknos[loc:]): + blk = self.blocks[blkno] + if count == len(blk.mgr_locs): + blk.mgr_locs = blk.mgr_locs.add(1) + else: + new_mgr_locs = blk.mgr_locs.as_array.copy() + new_mgr_locs[new_mgr_locs >= loc] += 1 + blk.mgr_locs = new_mgr_locs + + if loc == self._blklocs.shape[0]: + # np.append is a lot faster (at least in numpy 1.7.1), let's use it + # if we can. + self._blklocs = np.append(self._blklocs, 0) + self._blknos = np.append(self._blknos, len(self.blocks)) + else: + self._blklocs = np.insert(self._blklocs, loc, 0) + self._blknos = np.insert(self._blknos, loc, len(self.blocks)) + + self.axes[0] = new_axis + self.blocks += (block,) + self._shape = None + + self._known_consolidated = False + + if len(self.blocks) > 100: + self._consolidate_inplace() + + def reindex_axis(self, new_index, axis, method=None, limit=None, + fill_value=None, copy=True): + """ + Conform block manager to new index. + """ + new_index = _ensure_index(new_index) + new_index, indexer = self.axes[axis].reindex(new_index, method=method, + limit=limit) + + return self.reindex_indexer(new_index, indexer, axis=axis, + fill_value=fill_value, copy=copy) + + def reindex_indexer(self, new_axis, indexer, axis, fill_value=None, + allow_dups=False, copy=True): + """ + Parameters + ---------- + new_axis : Index + indexer : ndarray of int64 or None + axis : int + fill_value : object + allow_dups : bool + + pandas-indexer with -1's only. + """ + if indexer is None: + if new_axis is self.axes[axis] and not copy: + return self + + result = self.copy(deep=copy) + result.axes = list(self.axes) + result.axes[axis] = new_axis + return result + + self._consolidate_inplace() + + # some axes don't allow reindexing with dups + if not allow_dups: + self.axes[axis]._can_reindex(indexer) + + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + if axis == 0: + new_blocks = self._slice_take_blocks_ax0(indexer, + fill_tuple=(fill_value,)) + else: + new_blocks = [blk.take_nd(indexer, axis=axis, fill_tuple=( + fill_value if fill_value is not None else blk.fill_value,)) + for blk in self.blocks] + + new_axes = list(self.axes) + new_axes[axis] = new_axis + return self.__class__(new_blocks, new_axes) + + def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): + """ + Slice/take blocks along axis=0. + + Overloaded for SingleBlock + + Returns + ------- + new_blocks : list of Block + + """ + + allow_fill = fill_tuple is not None + + sl_type, slobj, sllen = _preprocess_slice_or_indexer( + slice_or_indexer, self.shape[0], allow_fill=allow_fill) + + if self._is_single_block: + blk = self.blocks[0] + + if sl_type in ('slice', 'mask'): + return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))] + elif not allow_fill or self.ndim == 1: + if allow_fill and fill_tuple[0] is None: + _, fill_value = maybe_promote(blk.dtype) + fill_tuple = (fill_value, ) + + return [blk.take_nd(slobj, axis=0, + new_mgr_locs=slice(0, sllen), + fill_tuple=fill_tuple)] + + if sl_type in ('slice', 'mask'): + blknos = self._blknos[slobj] + blklocs = self._blklocs[slobj] + else: + blknos = algos.take_1d(self._blknos, slobj, fill_value=-1, + allow_fill=allow_fill) + blklocs = algos.take_1d(self._blklocs, slobj, fill_value=-1, + allow_fill=allow_fill) + + # When filling blknos, make sure blknos is updated before appending to + # blocks list, that way new blkno is exactly len(blocks). + # + # FIXME: mgr_groupby_blknos must return mgr_locs in ascending order, + # pytables serialization will break otherwise. + blocks = [] + for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks), + group=True): + if blkno == -1: + # If we've got here, fill_tuple was not None. + fill_value = fill_tuple[0] + + blocks.append(self._make_na_block(placement=mgr_locs, + fill_value=fill_value)) + else: + blk = self.blocks[blkno] + + # Otherwise, slicing along items axis is necessary. + if not blk._can_consolidate: + # A non-consolidatable block, it's easy, because there's + # only one item and each mgr loc is a copy of that single + # item. + for mgr_loc in mgr_locs: + newblk = blk.copy(deep=True) + newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1) + blocks.append(newblk) + + else: + blocks.append(blk.take_nd(blklocs[mgr_locs.indexer], + axis=0, new_mgr_locs=mgr_locs, + fill_tuple=None)) + + return blocks + + def _make_na_block(self, placement, fill_value=None): + # TODO: infer dtypes other than float64 from fill_value + + if fill_value is None: + fill_value = np.nan + block_shape = list(self.shape) + block_shape[0] = len(placement) + + dtype, fill_value = infer_dtype_from_scalar(fill_value) + block_values = np.empty(block_shape, dtype=dtype) + block_values.fill(fill_value) + return make_block(block_values, placement=placement) + + def take(self, indexer, axis=1, verify=True, convert=True): + """ + Take items along any axis. + """ + self._consolidate_inplace() + indexer = (np.arange(indexer.start, indexer.stop, indexer.step, + dtype='int64') + if isinstance(indexer, slice) + else np.asanyarray(indexer, dtype='int64')) + + n = self.shape[axis] + if convert: + indexer = maybe_convert_indices(indexer, n) + + if verify: + if ((indexer == -1) | (indexer >= n)).any(): + raise Exception('Indices must be nonzero and less than ' + 'the axis length') + + new_labels = self.axes[axis].take(indexer) + return self.reindex_indexer(new_axis=new_labels, indexer=indexer, + axis=axis, allow_dups=True) + + def merge(self, other, lsuffix='', rsuffix=''): + if not self._is_indexed_like(other): + raise AssertionError('Must have same axes to merge managers') + + l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix, + right=other.items, rsuffix=rsuffix) + new_items = _concat_indexes([l, r]) + + new_blocks = [blk.copy(deep=False) for blk in self.blocks] + + offset = self.shape[0] + for blk in other.blocks: + blk = blk.copy(deep=False) + blk.mgr_locs = blk.mgr_locs.add(offset) + new_blocks.append(blk) + + new_axes = list(self.axes) + new_axes[0] = new_items + + return self.__class__(_consolidate(new_blocks), new_axes) + + def _is_indexed_like(self, other): + """ + Check all axes except items + """ + if self.ndim != other.ndim: + raise AssertionError( + 'Number of dimensions must agree got {ndim} and ' + '{oth_ndim}'.format(ndim=self.ndim, oth_ndim=other.ndim)) + for ax, oax in zip(self.axes[1:], other.axes[1:]): + if not ax.equals(oax): + return False + return True + + def equals(self, other): + self_axes, other_axes = self.axes, other.axes + if len(self_axes) != len(other_axes): + return False + if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): + return False + self._consolidate_inplace() + other._consolidate_inplace() + if len(self.blocks) != len(other.blocks): + return False + + # canonicalize block order, using a tuple combining the type + # name and then mgr_locs because there might be unconsolidated + # blocks (say, Categorical) which can only be distinguished by + # the iteration order + def canonicalize(block): + return (block.dtype.name, block.mgr_locs.as_array.tolist()) + + self_blocks = sorted(self.blocks, key=canonicalize) + other_blocks = sorted(other.blocks, key=canonicalize) + return all(block.equals(oblock) + for block, oblock in zip(self_blocks, other_blocks)) + + def unstack(self, unstacker_func): + """Return a blockmanager with all blocks unstacked. + + Parameters + ---------- + unstacker_func : callable + A (partially-applied) ``pd.core.reshape._Unstacker`` class. + + Returns + ------- + unstacked : BlockManager + """ + dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items) + new_columns = dummy.get_new_columns() + new_index = dummy.get_new_index() + new_blocks = [] + columns_mask = [] + + for blk in self.blocks: + blocks, mask = blk._unstack( + partial(unstacker_func, + value_columns=self.items[blk.mgr_locs.indexer]), + new_columns) + + new_blocks.extend(blocks) + columns_mask.extend(mask) + + new_columns = new_columns[columns_mask] + + bm = BlockManager(new_blocks, [new_columns, new_index]) + return bm + + +class SingleBlockManager(BlockManager): + """ manage a single block with """ + + ndim = 1 + _is_consolidated = True + _known_consolidated = True + __slots__ = () + + def __init__(self, block, axis, do_integrity_check=False, fastpath=False): + + if isinstance(axis, list): + if len(axis) != 1: + raise ValueError("cannot create SingleBlockManager with more " + "than 1 axis") + axis = axis[0] + + # passed from constructor, single block, single axis + if fastpath: + self.axes = [axis] + if isinstance(block, list): + + # empty block + if len(block) == 0: + block = [np.array([])] + elif len(block) != 1: + raise ValueError('Cannot create SingleBlockManager with ' + 'more than 1 block') + block = block[0] + else: + self.axes = [_ensure_index(axis)] + + # create the block here + if isinstance(block, list): + + # provide consolidation to the interleaved_dtype + if len(block) > 1: + dtype = _interleaved_dtype(block) + block = [b.astype(dtype) for b in block] + block = _consolidate(block) + + if len(block) != 1: + raise ValueError('Cannot create SingleBlockManager with ' + 'more than 1 block') + block = block[0] + + if not isinstance(block, Block): + block = make_block(block, placement=slice(0, len(axis)), ndim=1, + fastpath=True) + + self.blocks = [block] + + def _post_setstate(self): + pass + + @property + def _block(self): + return self.blocks[0] + + @property + def _values(self): + return self._block.values + + @property + def _blknos(self): + """ compat with BlockManager """ + return None + + @property + def _blklocs(self): + """ compat with BlockManager """ + return None + + def reindex(self, new_axis, indexer=None, method=None, fill_value=None, + limit=None, copy=True): + # if we are the same and don't copy, just return + if self.index.equals(new_axis): + if copy: + return self.copy(deep=True) + else: + return self + + values = self._block.get_values() + + if indexer is None: + indexer = self.items.get_indexer_for(new_axis) + + if fill_value is None: + fill_value = np.nan + + new_values = algos.take_1d(values, indexer, fill_value=fill_value) + + # fill if needed + if method is not None or limit is not None: + new_values = missing.interpolate_2d(new_values, + method=method, + limit=limit, + fill_value=fill_value) + + if self._block.is_sparse: + make_block = self._block.make_block_same_class + + block = make_block(new_values, copy=copy, + placement=slice(0, len(new_axis))) + + mgr = SingleBlockManager(block, new_axis) + mgr._consolidate_inplace() + return mgr + + def get_slice(self, slobj, axis=0): + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + return self.__class__(self._block._slice(slobj), + self.index[slobj], fastpath=True) + + @property + def index(self): + return self.axes[0] + + def convert(self, **kwargs): + """ convert the whole block as one """ + kwargs['by_item'] = False + return self.apply('convert', **kwargs) + + @property + def dtype(self): + return self._block.dtype + + @property + def array_dtype(self): + return self._block.array_dtype + + @property + def ftype(self): + return self._block.ftype + + def get_dtype_counts(self): + return {self.dtype.name: 1} + + def get_ftype_counts(self): + return {self.ftype: 1} + + def get_dtypes(self): + return np.array([self._block.dtype]) + + def get_ftypes(self): + return np.array([self._block.ftype]) + + def external_values(self): + return self._block.external_values() + + def internal_values(self): + return self._block.internal_values() + + def formatting_values(self): + """Return the internal values used by the DataFrame/SeriesFormatter""" + return self._block.formatting_values() + + def get_values(self): + """ return a dense type view """ + return np.array(self._block.to_dense(), copy=False) + + @property + def asobject(self): + """ + return a object dtype array. datetime/timedelta like values are boxed + to Timestamp/Timedelta instances. + """ + return self._block.get_values(dtype=object) + + @property + def itemsize(self): + return self._block.values.itemsize + + @property + def _can_hold_na(self): + return self._block._can_hold_na + + def is_consolidated(self): + return True + + def _consolidate_check(self): + pass + + def _consolidate_inplace(self): + pass + + def delete(self, item): + """ + Delete single item from SingleBlockManager. + + Ensures that self.blocks doesn't become empty. + """ + loc = self.items.get_loc(item) + self._block.delete(loc) + self.axes[0] = self.axes[0].delete(loc) + + def fast_xs(self, loc): + """ + fast path for getting a cross-section + return a view of the data + """ + return self._block.values[loc] + + def concat(self, to_concat, new_axis): + """ + Concatenate a list of SingleBlockManagers into a single + SingleBlockManager. + + Used for pd.concat of Series objects with axis=0. + + Parameters + ---------- + to_concat : list of SingleBlockManagers + new_axis : Index of the result + + Returns + ------- + SingleBlockManager + + """ + non_empties = [x for x in to_concat if len(x) > 0] + + # check if all series are of the same block type: + if len(non_empties) > 0: + blocks = [obj.blocks[0] for obj in non_empties] + + if all(type(b) is type(blocks[0]) for b in blocks[1:]): # noqa + new_block = blocks[0].concat_same_type(blocks) + else: + values = [x.values for x in blocks] + values = _concat._concat_compat(values) + new_block = make_block( + values, placement=slice(0, len(values), 1)) + else: + values = [x._block.values for x in to_concat] + values = _concat._concat_compat(values) + new_block = make_block( + values, placement=slice(0, len(values), 1)) + + mgr = SingleBlockManager(new_block, new_axis) + return mgr + + +def construction_error(tot_items, block_shape, axes, e=None): + """ raise a helpful message about our construction """ + passed = tuple(map(int, [tot_items] + list(block_shape))) + implied = tuple(map(int, [len(ax) for ax in axes])) + if passed == implied and e is not None: + raise e + if block_shape[0] == 0: + raise ValueError("Empty data passed with indices specified.") + raise ValueError("Shape of passed values is {0}, indices imply {1}".format( + passed, implied)) + + +def create_block_manager_from_blocks(blocks, axes): + try: + if len(blocks) == 1 and not isinstance(blocks[0], Block): + # if blocks[0] is of length 0, return empty blocks + if not len(blocks[0]): + blocks = [] + else: + # It's OK if a single block is passed as values, its placement + # is basically "all items", but if there're many, don't bother + # converting, it's an error anyway. + blocks = [make_block(values=blocks[0], + placement=slice(0, len(axes[0])))] + + mgr = BlockManager(blocks, axes) + mgr._consolidate_inplace() + return mgr + + except (ValueError) as e: + blocks = [getattr(b, 'values', b) for b in blocks] + tot_items = sum(b.shape[0] for b in blocks) + construction_error(tot_items, blocks[0].shape[1:], axes, e) + + +def create_block_manager_from_arrays(arrays, names, axes): + + try: + blocks = form_blocks(arrays, names, axes) + mgr = BlockManager(blocks, axes) + mgr._consolidate_inplace() + return mgr + except ValueError as e: + construction_error(len(arrays), arrays[0].shape, axes, e) + + +def form_blocks(arrays, names, axes): + # put "leftover" items in float bucket, where else? + # generalize? + items_dict = defaultdict(list) + extra_locs = [] + + names_idx = Index(names) + if names_idx.equals(axes[0]): + names_indexer = np.arange(len(names_idx)) + else: + assert names_idx.intersection(axes[0]).is_unique + names_indexer = names_idx.get_indexer_for(axes[0]) + + for i, name_idx in enumerate(names_indexer): + if name_idx == -1: + extra_locs.append(i) + continue + + k = names[name_idx] + v = arrays[name_idx] + + block_type = get_block_type(v) + items_dict[block_type.__name__].append((i, k, v)) + + blocks = [] + if len(items_dict['FloatBlock']): + float_blocks = _multi_blockify(items_dict['FloatBlock']) + blocks.extend(float_blocks) + + if len(items_dict['ComplexBlock']): + complex_blocks = _multi_blockify(items_dict['ComplexBlock']) + blocks.extend(complex_blocks) + + if len(items_dict['TimeDeltaBlock']): + timedelta_blocks = _multi_blockify(items_dict['TimeDeltaBlock']) + blocks.extend(timedelta_blocks) + + if len(items_dict['IntBlock']): + int_blocks = _multi_blockify(items_dict['IntBlock']) + blocks.extend(int_blocks) + + if len(items_dict['DatetimeBlock']): + datetime_blocks = _simple_blockify(items_dict['DatetimeBlock'], + _NS_DTYPE) + blocks.extend(datetime_blocks) + + if len(items_dict['DatetimeTZBlock']): + dttz_blocks = [make_block(array, + klass=DatetimeTZBlock, + fastpath=True, + placement=[i]) + for i, _, array in items_dict['DatetimeTZBlock']] + blocks.extend(dttz_blocks) + + if len(items_dict['BoolBlock']): + bool_blocks = _simple_blockify(items_dict['BoolBlock'], np.bool_) + blocks.extend(bool_blocks) + + if len(items_dict['ObjectBlock']) > 0: + object_blocks = _simple_blockify(items_dict['ObjectBlock'], np.object_) + blocks.extend(object_blocks) + + if len(items_dict['SparseBlock']) > 0: + sparse_blocks = _sparse_blockify(items_dict['SparseBlock']) + blocks.extend(sparse_blocks) + + if len(items_dict['CategoricalBlock']) > 0: + cat_blocks = [make_block(array, klass=CategoricalBlock, fastpath=True, + placement=[i]) + for i, _, array in items_dict['CategoricalBlock']] + blocks.extend(cat_blocks) + + if len(extra_locs): + shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) + + # empty items -> dtype object + block_values = np.empty(shape, dtype=object) + block_values.fill(np.nan) + + na_block = make_block(block_values, placement=extra_locs) + blocks.append(na_block) + + return blocks + + +def _simple_blockify(tuples, dtype): + """ return a single array of a block that has a single dtype; if dtype is + not None, coerce to this dtype + """ + values, placement = _stack_arrays(tuples, dtype) + + # CHECK DTYPE? + if dtype is not None and values.dtype != dtype: # pragma: no cover + values = values.astype(dtype) + + block = make_block(values, placement=placement) + return [block] + + +def _multi_blockify(tuples, dtype=None): + """ return an array of blocks that potentially have different dtypes """ + + # group by dtype + grouper = itertools.groupby(tuples, lambda x: x[2].dtype) + + new_blocks = [] + for dtype, tup_block in grouper: + + values, placement = _stack_arrays(list(tup_block), dtype) + + block = make_block(values, placement=placement) + new_blocks.append(block) + + return new_blocks + + +def _sparse_blockify(tuples, dtype=None): + """ return an array of blocks that potentially have different dtypes (and + are sparse) + """ + + new_blocks = [] + for i, names, array in tuples: + array = _maybe_to_sparse(array) + block = make_block(array, klass=SparseBlock, fastpath=True, + placement=[i]) + new_blocks.append(block) + + return new_blocks + + +def _stack_arrays(tuples, dtype): + + # fml + def _asarray_compat(x): + if isinstance(x, ABCSeries): + return x._values + else: + return np.asarray(x) + + def _shape_compat(x): + if isinstance(x, ABCSeries): + return len(x), + else: + return x.shape + + placement, names, arrays = zip(*tuples) + + first = arrays[0] + shape = (len(arrays),) + _shape_compat(first) + + stacked = np.empty(shape, dtype=dtype) + for i, arr in enumerate(arrays): + stacked[i] = _asarray_compat(arr) + + return stacked, placement + + +def _interleaved_dtype(blocks): + if not len(blocks): + return None + + dtype = find_common_type([b.dtype for b in blocks]) + + # only numpy compat + if isinstance(dtype, ExtensionDtype): + dtype = np.object + + return dtype + + +def _consolidate(blocks): + """ + Merge blocks having same dtype, exclude non-consolidating blocks + """ + + # sort by _can_consolidate, dtype + gkey = lambda x: x._consolidate_key + grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) + + new_blocks = [] + for (_can_consolidate, dtype), group_blocks in grouper: + merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype, + _can_consolidate=_can_consolidate) + new_blocks = _extend_blocks(merged_blocks, new_blocks) + return new_blocks + + +def _maybe_compare(a, b, op): + + is_a_array = isinstance(a, np.ndarray) + is_b_array = isinstance(b, np.ndarray) + + # numpy deprecation warning to have i8 vs integer comparisons + if is_datetimelike_v_numeric(a, b): + result = False + + # numpy deprecation warning if comparing numeric vs string-like + elif is_numeric_v_string_like(a, b): + result = False + + else: + result = op(a, b) + + if is_scalar(result) and (is_a_array or is_b_array): + type_names = [type(a).__name__, type(b).__name__] + + if is_a_array: + type_names[0] = 'ndarray(dtype={dtype})'.format(dtype=a.dtype) + + if is_b_array: + type_names[1] = 'ndarray(dtype={dtype})'.format(dtype=b.dtype) + + raise TypeError( + "Cannot compare types {a!r} and {b!r}".format(a=type_names[0], + b=type_names[1])) + return result + + +def _concat_indexes(indexes): + return indexes[0].append(indexes[1:]) + + +def _get_blkno_placements(blknos, blk_count, group=True): + """ + + Parameters + ---------- + blknos : array of int64 + blk_count : int + group : bool + + Returns + ------- + iterator + yield (BlockPlacement, blkno) + + """ + + blknos = _ensure_int64(blknos) + + # FIXME: blk_count is unused, but it may avoid the use of dicts in cython + for blkno, indexer in libinternals.get_blkno_indexers(blknos, group): + yield blkno, BlockPlacement(indexer) + + +def items_overlap_with_suffix(left, lsuffix, right, rsuffix): + """ + If two indices overlap, add suffixes to overlapping entries. + + If corresponding suffix is empty, the entry is simply converted to string. + + """ + to_rename = left.intersection(right) + if len(to_rename) == 0: + return left, right + else: + if not lsuffix and not rsuffix: + raise ValueError('columns overlap but no suffix specified: ' + '{rename}'.format(rename=to_rename)) + + def lrenamer(x): + if x in to_rename: + return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix) + return x + + def rrenamer(x): + if x in to_rename: + return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix) + return x + + return (_transform_index(left, lrenamer), + _transform_index(right, rrenamer)) + + +def _transform_index(index, func, level=None): + """ + Apply function to all values found in index. + + This includes transforming multiindex entries separately. + Only apply function to one level of the MultiIndex if level is specified. + + """ + if isinstance(index, MultiIndex): + if level is not None: + items = [tuple(func(y) if i == level else y + for i, y in enumerate(x)) for x in index] + else: + items = [tuple(func(y) for y in x) for x in index] + return MultiIndex.from_tuples(items, names=index.names) + else: + items = [func(x) for x in index] + return Index(items, name=index.name) + + +def _fast_count_smallints(arr): + """Faster version of set(arr) for sequences of small numbers.""" + if len(arr) == 0: + # Handle empty arr case separately: numpy 1.6 chokes on that. + return np.empty((0, 2), dtype=arr.dtype) + else: + counts = np.bincount(arr.astype(np.int_)) + nz = counts.nonzero()[0] + return np.c_[nz, counts[nz]] + + +def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill): + if isinstance(slice_or_indexer, slice): + return ('slice', slice_or_indexer, + libinternals.slice_len(slice_or_indexer, length)) + elif (isinstance(slice_or_indexer, np.ndarray) and + slice_or_indexer.dtype == np.bool_): + return 'mask', slice_or_indexer, slice_or_indexer.sum() + else: + indexer = np.asanyarray(slice_or_indexer, dtype=np.int64) + if not allow_fill: + indexer = maybe_convert_indices(indexer, length) + return 'fancy', indexer, len(indexer)
Discussed in #19268, not sure if the time is right for this since it causes some rebase headaches, but may be worth it. Note that git is of the opinion that the file core/internals.py was moved to core/internals/blocks.py, not sure if that matters for e.g. blame.
https://api.github.com/repos/pandas-dev/pandas/pulls/19316
2018-01-19T18:29:06Z
2018-01-21T07:29:43Z
null
2018-02-11T21:58:52Z
Standardize accessor naming convention self._parent, avoid self._data
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 708f903cd73cb..932485de47f55 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -2163,6 +2163,7 @@ class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin): def __init__(self, data): self._validate(data) + self._parent = data self.categorical = data.values self.index = data.index self.name = data.name diff --git a/pandas/core/indexes/accessors.py b/pandas/core/indexes/accessors.py index d40230386216c..fdaa3b25265f8 100644 --- a/pandas/core/indexes/accessors.py +++ b/pandas/core/indexes/accessors.py @@ -28,14 +28,14 @@ def __init__(self, data, orig): raise TypeError("cannot convert an object of type {0} to a " "datetimelike index".format(type(data))) - self.values = data + self._parent = data self.orig = orig self.name = getattr(data, 'name', None) self.index = getattr(data, 'index', None) self._freeze() def _get_values(self): - data = self.values + data = self._parent if is_datetime64_dtype(data.dtype): return DatetimeIndex(data, copy=False, name=self.name) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 278b220753196..fc70b6f17126f 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -690,7 +690,7 @@ def str_extract(arr, pat, flags=0, expand=None): if expand: return _str_extract_frame(arr._orig, pat, flags=flags) else: - result, name = _str_extract_noexpand(arr._data, pat, flags=flags) + result, name = _str_extract_noexpand(arr._parent, pat, flags=flags) return arr._wrap_result(result, name=name, expand=expand) @@ -1312,7 +1312,7 @@ def str_encode(arr, encoding, errors="strict"): def _noarg_wrapper(f, docstring=None, **kargs): def wrapper(self): - result = _na_map(f, self._data, **kargs) + result = _na_map(f, self._parent, **kargs) return self._wrap_result(result) wrapper.__name__ = f.__name__ @@ -1326,15 +1326,15 @@ def wrapper(self): def _pat_wrapper(f, flags=False, na=False, **kwargs): def wrapper1(self, pat): - result = f(self._data, pat) + result = f(self._parent, pat) return self._wrap_result(result) def wrapper2(self, pat, flags=0, **kwargs): - result = f(self._data, pat, flags=flags, **kwargs) + result = f(self._parent, pat, flags=flags, **kwargs) return self._wrap_result(result) def wrapper3(self, pat, na=np.nan): - result = f(self._data, pat, na=na) + result = f(self._parent, pat, na=na) return self._wrap_result(result) wrapper = wrapper3 if na else wrapper2 if flags else wrapper1 @@ -1372,7 +1372,7 @@ class StringMethods(NoNewAttributesMixin): def __init__(self, data): self._validate(data) self._is_categorical = is_categorical_dtype(data) - self._data = data.cat.categories if self._is_categorical else data + self._parent = data.cat.categories if self._is_categorical else data # save orig to blow up categoricals to the right type self._orig = data self._freeze() @@ -1502,18 +1502,18 @@ def cons_row(x): @copy(str_cat) def cat(self, others=None, sep=None, na_rep=None): - data = self._orig if self._is_categorical else self._data + data = self._orig if self._is_categorical else self._parent result = str_cat(data, others=others, sep=sep, na_rep=na_rep) return self._wrap_result(result, use_codes=(not self._is_categorical)) @copy(str_split) def split(self, pat=None, n=-1, expand=False): - result = str_split(self._data, pat, n=n) + result = str_split(self._parent, pat, n=n) return self._wrap_result(result, expand=expand) @copy(str_rsplit) def rsplit(self, pat=None, n=-1, expand=False): - result = str_rsplit(self._data, pat, n=n) + result = str_rsplit(self._parent, pat, n=n) return self._wrap_result(result, expand=expand) _shared_docs['str_partition'] = (""" @@ -1568,7 +1568,7 @@ def rsplit(self, pat=None, n=-1, expand=False): }) def partition(self, pat=' ', expand=True): f = lambda x: x.partition(pat) - result = _na_map(f, self._data) + result = _na_map(f, self._parent) return self._wrap_result(result, expand=expand) @Appender(_shared_docs['str_partition'] % { @@ -1579,45 +1579,45 @@ def partition(self, pat=' ', expand=True): }) def rpartition(self, pat=' ', expand=True): f = lambda x: x.rpartition(pat) - result = _na_map(f, self._data) + result = _na_map(f, self._parent) return self._wrap_result(result, expand=expand) @copy(str_get) def get(self, i): - result = str_get(self._data, i) + result = str_get(self._parent, i) return self._wrap_result(result) @copy(str_join) def join(self, sep): - result = str_join(self._data, sep) + result = str_join(self._parent, sep) return self._wrap_result(result) @copy(str_contains) def contains(self, pat, case=True, flags=0, na=np.nan, regex=True): - result = str_contains(self._data, pat, case=case, flags=flags, na=na, + result = str_contains(self._parent, pat, case=case, flags=flags, na=na, regex=regex) return self._wrap_result(result) @copy(str_match) def match(self, pat, case=True, flags=0, na=np.nan, as_indexer=None): - result = str_match(self._data, pat, case=case, flags=flags, na=na, + result = str_match(self._parent, pat, case=case, flags=flags, na=na, as_indexer=as_indexer) return self._wrap_result(result) @copy(str_replace) def replace(self, pat, repl, n=-1, case=None, flags=0): - result = str_replace(self._data, pat, repl, n=n, case=case, + result = str_replace(self._parent, pat, repl, n=n, case=case, flags=flags) return self._wrap_result(result) @copy(str_repeat) def repeat(self, repeats): - result = str_repeat(self._data, repeats) + result = str_repeat(self._parent, repeats) return self._wrap_result(result) @copy(str_pad) def pad(self, width, side='left', fillchar=' '): - result = str_pad(self._data, width, side=side, fillchar=fillchar) + result = str_pad(self._parent, width, side=side, fillchar=fillchar) return self._wrap_result(result) _shared_docs['str_pad'] = (""" @@ -1665,27 +1665,27 @@ def zfill(self, width): ------- filled : Series/Index of objects """ - result = str_pad(self._data, width, side='left', fillchar='0') + result = str_pad(self._parent, width, side='left', fillchar='0') return self._wrap_result(result) @copy(str_slice) def slice(self, start=None, stop=None, step=None): - result = str_slice(self._data, start, stop, step) + result = str_slice(self._parent, start, stop, step) return self._wrap_result(result) @copy(str_slice_replace) def slice_replace(self, start=None, stop=None, repl=None): - result = str_slice_replace(self._data, start, stop, repl) + result = str_slice_replace(self._parent, start, stop, repl) return self._wrap_result(result) @copy(str_decode) def decode(self, encoding, errors="strict"): - result = str_decode(self._data, encoding, errors) + result = str_decode(self._parent, encoding, errors) return self._wrap_result(result) @copy(str_encode) def encode(self, encoding, errors="strict"): - result = str_encode(self._data, encoding, errors) + result = str_encode(self._parent, encoding, errors) return self._wrap_result(result) _shared_docs['str_strip'] = (""" @@ -1700,38 +1700,38 @@ def encode(self, encoding, errors="strict"): @Appender(_shared_docs['str_strip'] % dict(side='left and right sides', method='strip')) def strip(self, to_strip=None): - result = str_strip(self._data, to_strip, side='both') + result = str_strip(self._parent, to_strip, side='both') return self._wrap_result(result) @Appender(_shared_docs['str_strip'] % dict(side='left side', method='lstrip')) def lstrip(self, to_strip=None): - result = str_strip(self._data, to_strip, side='left') + result = str_strip(self._parent, to_strip, side='left') return self._wrap_result(result) @Appender(_shared_docs['str_strip'] % dict(side='right side', method='rstrip')) def rstrip(self, to_strip=None): - result = str_strip(self._data, to_strip, side='right') + result = str_strip(self._parent, to_strip, side='right') return self._wrap_result(result) @copy(str_wrap) def wrap(self, width, **kwargs): - result = str_wrap(self._data, width, **kwargs) + result = str_wrap(self._parent, width, **kwargs) return self._wrap_result(result) @copy(str_get_dummies) def get_dummies(self, sep='|'): # we need to cast to Series of strings as only that has all # methods available for making the dummies... - data = self._orig.astype(str) if self._is_categorical else self._data + data = self._orig.astype(str) if self._is_categorical else self._parent result, name = str_get_dummies(data, sep) return self._wrap_result(result, use_codes=(not self._is_categorical), name=name, expand=True) @copy(str_translate) def translate(self, table, deletechars=None): - result = str_translate(self._data, table, deletechars) + result = str_translate(self._parent, table, deletechars) return self._wrap_result(result) count = _pat_wrapper(str_count, flags=True) @@ -1774,14 +1774,15 @@ def extractall(self, pat, flags=0): dict(side='lowest', method='find', also='rfind : Return highest indexes in each strings')) def find(self, sub, start=0, end=None): - result = str_find(self._data, sub, start=start, end=end, side='left') + result = str_find(self._parent, sub, start=start, end=end, side='left') return self._wrap_result(result) @Appender(_shared_docs['find'] % dict(side='highest', method='rfind', also='find : Return lowest indexes in each strings')) def rfind(self, sub, start=0, end=None): - result = str_find(self._data, sub, start=start, end=end, side='right') + result = str_find(self._parent, sub, + start=start, end=end, side='right') return self._wrap_result(result) def normalize(self, form): @@ -1800,7 +1801,7 @@ def normalize(self, form): """ import unicodedata f = lambda x: unicodedata.normalize(form, compat.u_safe(x)) - result = _na_map(f, self._data) + result = _na_map(f, self._parent) return self._wrap_result(result) _shared_docs['index'] = (""" @@ -1831,14 +1832,16 @@ def normalize(self, form): dict(side='lowest', similar='find', method='index', also='rindex : Return highest indexes in each strings')) def index(self, sub, start=0, end=None): - result = str_index(self._data, sub, start=start, end=end, side='left') + result = str_index(self._parent, sub, + start=start, end=end, side='left') return self._wrap_result(result) @Appender(_shared_docs['index'] % dict(side='highest', similar='rfind', method='rindex', also='index : Return lowest indexes in each strings')) def rindex(self, sub, start=0, end=None): - result = str_index(self._data, sub, start=start, end=end, side='right') + result = str_index(self._parent, sub, + start=start, end=end, side='right') return self._wrap_result(result) _shared_docs['len'] = (""" diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 3094d7d0ab1c6..61342d8070956 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -2470,7 +2470,7 @@ def _grouped_plot_by_column(plotf, data, columns=None, by=None, class BasePlotMethods(PandasObject): def __init__(self, data): - self._data = data + self._parent = data def __call__(self, *args, **kwargs): raise NotImplementedError @@ -2498,7 +2498,7 @@ def __call__(self, kind='line', ax=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, label=None, secondary_y=False, **kwds): - return plot_series(self._data, kind=kind, ax=ax, figsize=figsize, + return plot_series(self._parent, kind=kind, ax=ax, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, style=style, logx=logx, logy=logy, loglog=loglog, xticks=xticks, yticks=yticks, @@ -2655,7 +2655,7 @@ def __call__(self, x=None, y=None, kind='line', ax=None, rot=None, fontsize=None, colormap=None, table=False, yerr=None, xerr=None, secondary_y=False, sort_columns=False, **kwds): - return plot_frame(self._data, kind=kind, x=x, y=y, ax=ax, + return plot_frame(self._parent, kind=kind, x=x, y=y, ax=ax, subplots=subplots, sharex=sharex, sharey=sharey, layout=layout, figsize=figsize, use_index=use_index, title=title, grid=grid, legend=legend, style=style,
As discussed in #19294, getting rid of these `._data` cases goes a long way towards making the contents of a `._data` attribute predictable.
https://api.github.com/repos/pandas-dev/pandas/pulls/19309
2018-01-19T05:57:35Z
2018-01-19T11:29:31Z
null
2018-02-11T21:58:53Z
FIX: Raise errors when wrong string arguments are passed to `resample`
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 86fc47dee09fc..148fabfb96d68 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -486,7 +486,7 @@ Groupby/Resample/Rolling - Bug when grouping by a single column and aggregating with a class like ``list`` or ``tuple`` (:issue:`18079`) - Fixed regression in :func:`DataFrame.groupby` which would not emit an error when called with a tuple key not in the index (:issue:`18798`) -- +- Bug in :func:`DataFrame.resample` which silently ignored unsupported (or mistyped) options for ``label``, ``closed`` and ``convention`` (:issue:`19303`) - Sparse diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 5447ce7470b9d..04f5c124deccc 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -1061,6 +1061,17 @@ class TimeGrouper(Grouper): def __init__(self, freq='Min', closed=None, label=None, how='mean', axis=0, fill_method=None, limit=None, loffset=None, kind=None, convention=None, base=0, **kwargs): + # Check for correctness of the keyword arguments which would + # otherwise silently use the default if misspelled + if label not in {None, 'left', 'right'}: + raise ValueError('Unsupported value {} for `label`'.format(label)) + if closed not in {None, 'left', 'right'}: + raise ValueError('Unsupported value {} for `closed`'.format( + closed)) + if convention not in {None, 'start', 'end', 'e', 's'}: + raise ValueError('Unsupported value {} for `convention`' + .format(convention)) + freq = to_offset(freq) end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W']) diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index e9a517605020a..6f77e7854cf76 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -963,6 +963,7 @@ def test_resample_basic(self): rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min', name='index') s = Series(np.random.randn(14), index=rng) + result = s.resample('5min', closed='right', label='right').mean() exp_idx = date_range('1/1/2000', periods=4, freq='5min', name='index') @@ -985,6 +986,20 @@ def test_resample_basic(self): expect = s.groupby(grouper).agg(lambda x: x[-1]) assert_series_equal(result, expect) + def test_resample_string_kwargs(self): + # Test for issue #19303 + rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min', + name='index') + s = Series(np.random.randn(14), index=rng) + + # Check that wrong keyword argument strings raise an error + with pytest.raises(ValueError): + s.resample('5min', label='righttt').mean() + with pytest.raises(ValueError): + s.resample('5min', closed='righttt').mean() + with pytest.raises(ValueError): + s.resample('5min', convention='starttt').mean() + def test_resample_how(self): rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min', name='index')
- [x] closes #19303 - [x] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry As a first step, the initial commit just extends the resampling tests so that they fail when a misstyped string argument, e.g. for `label`, passes silently without raising. I have the fix for the code ready and will commit when the tests have failed once.
https://api.github.com/repos/pandas-dev/pandas/pulls/19307
2018-01-18T23:36:00Z
2018-01-21T15:47:34Z
2018-01-21T15:47:34Z
2018-01-23T14:44:15Z
separate _libs/src/reduce.pyx to _libs.reduction
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 9c0791c3eb8ce..1632f5d016439 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -6,7 +6,7 @@ from cython cimport Py_ssize_t import numpy as np cimport numpy as np -from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM, PyArray_SETITEM, +from numpy cimport (ndarray, PyArray_NDIM, PyArray_GETITEM, PyArray_ITER_DATA, PyArray_ITER_NEXT, PyArray_IterNew, flatiter, NPY_OBJECT, int64_t, @@ -57,8 +57,6 @@ cimport util cdef int64_t NPY_NAT = util.get_nat() from util cimport is_array, _checknull -from libc.math cimport fabs, sqrt - def values_from_object(object o): """ return my values or the object if we are say an ndarray """ @@ -1119,5 +1117,4 @@ def indices_fast(object index, ndarray[int64_t] labels, list keys, return result -include "reduce.pyx" include "inference.pyx" diff --git a/pandas/_libs/src/reduce.pyx b/pandas/_libs/reduction.pyx similarity index 97% rename from pandas/_libs/src/reduce.pyx rename to pandas/_libs/reduction.pyx index f0ec8d284ef0e..d51583c7aa473 100644 --- a/pandas/_libs/src/reduce.pyx +++ b/pandas/_libs/reduction.pyx @@ -1,9 +1,24 @@ # -*- coding: utf-8 -*- # cython: profile=False -import numpy as np - from distutils.version import LooseVersion +from cython cimport Py_ssize_t +from cpython cimport Py_INCREF + +from libc.stdlib cimport malloc, free + +import numpy as np +cimport numpy as np +from numpy cimport (ndarray, + int64_t, + PyArray_SETITEM, + PyArray_ITER_NEXT, PyArray_ITER_DATA, PyArray_IterNew, + flatiter) +np.import_array() + +cimport util +from lib import maybe_convert_objects + is_numpy_prior_1_6_2 = LooseVersion(np.__version__) < '1.6.2' diff --git a/pandas/core/apply.py b/pandas/core/apply.py index 2f43087f7dff9..4cdec54b9a07a 100644 --- a/pandas/core/apply.py +++ b/pandas/core/apply.py @@ -1,6 +1,6 @@ import numpy as np from pandas import compat -from pandas._libs import lib +from pandas._libs import reduction from pandas.core.dtypes.common import ( is_extension_type, is_sequence) @@ -114,7 +114,7 @@ def apply_empty_result(self): def apply_raw(self): try: - result = lib.reduce(self.values, self.f, axis=self.axis) + result = reduction.reduce(self.values, self.f, axis=self.axis) except Exception: result = np.apply_along_axis(self.f, self.axis, self.values) @@ -150,10 +150,10 @@ def apply_standard(self): try: labels = self.agg_axis - result = lib.reduce(values, self.f, - axis=self.axis, - dummy=dummy, - labels=labels) + result = reduction.reduce(values, self.f, + axis=self.axis, + dummy=dummy, + labels=labels) return Series(result, index=labels) except Exception: pass diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 25e44589488ee..66162af1e7314 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -66,7 +66,9 @@ from pandas.plotting._core import boxplot_frame_groupby -from pandas._libs import lib, groupby as libgroupby, Timestamp, NaT, iNaT +from pandas._libs import (lib, reduction, + groupby as libgroupby, + Timestamp, NaT, iNaT) from pandas._libs.lib import count_level_2d _doc_template = """ @@ -1981,7 +1983,7 @@ def apply(self, f, data, axis=0): try: values, mutated = splitter.fast_apply(f, group_keys) return group_keys, values, mutated - except (lib.InvalidApply): + except reduction.InvalidApply: # we detect a mutation of some kind # so take slow path pass @@ -2404,8 +2406,8 @@ def _aggregate_series_fast(self, obj, func): obj = obj._take(indexer, convert=False).to_dense() group_index = algorithms.take_nd( group_index, indexer, allow_fill=False) - grouper = lib.SeriesGrouper(obj, func, group_index, ngroups, - dummy) + grouper = reduction.SeriesGrouper(obj, func, group_index, ngroups, + dummy) result, counts = grouper.get_result() return result, counts @@ -2618,7 +2620,7 @@ def groupings(self): def agg_series(self, obj, func): dummy = obj[:0] - grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy) + grouper = reduction.SeriesBinGrouper(obj, func, self.bins, dummy) return grouper.get_result() # ---------------------------------------------------------------------- @@ -4758,7 +4760,8 @@ def fast_apply(self, f, names): return [], True sdata = self._get_sorted_data() - results, mutated = lib.apply_frame_axis0(sdata, f, names, starts, ends) + results, mutated = reduction.apply_frame_axis0(sdata, f, names, + starts, ends) return results, mutated diff --git a/pandas/tests/groupby/test_bin_groupby.py b/pandas/tests/groupby/test_bin_groupby.py index 8b95455b53d22..979b2f7a539af 100644 --- a/pandas/tests/groupby/test_bin_groupby.py +++ b/pandas/tests/groupby/test_bin_groupby.py @@ -9,7 +9,7 @@ from pandas import Index, isna from pandas.util.testing import assert_almost_equal import pandas.util.testing as tm -from pandas._libs import lib, groupby +from pandas._libs import lib, groupby, reduction def test_series_grouper(): @@ -19,7 +19,7 @@ def test_series_grouper(): labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64) - grouper = lib.SeriesGrouper(obj, np.mean, labels, 2, dummy) + grouper = reduction.SeriesGrouper(obj, np.mean, labels, 2, dummy) result, counts = grouper.get_result() expected = np.array([obj[3:6].mean(), obj[6:].mean()]) @@ -36,7 +36,7 @@ def test_series_bin_grouper(): bins = np.array([3, 6]) - grouper = lib.SeriesBinGrouper(obj, np.mean, bins, dummy) + grouper = reduction.SeriesBinGrouper(obj, np.mean, bins, dummy) result, counts = grouper.get_result() expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()]) @@ -127,26 +127,27 @@ def test_int_index(self): from pandas.core.series import Series arr = np.random.randn(100, 4) - result = lib.reduce(arr, np.sum, labels=Index(np.arange(4))) + result = reduction.reduce(arr, np.sum, labels=Index(np.arange(4))) expected = arr.sum(0) assert_almost_equal(result, expected) - result = lib.reduce(arr, np.sum, axis=1, labels=Index(np.arange(100))) + result = reduction.reduce(arr, np.sum, axis=1, + labels=Index(np.arange(100))) expected = arr.sum(1) assert_almost_equal(result, expected) dummy = Series(0., index=np.arange(100)) - result = lib.reduce(arr, np.sum, dummy=dummy, - labels=Index(np.arange(4))) + result = reduction.reduce(arr, np.sum, dummy=dummy, + labels=Index(np.arange(4))) expected = arr.sum(0) assert_almost_equal(result, expected) dummy = Series(0., index=np.arange(4)) - result = lib.reduce(arr, np.sum, axis=1, dummy=dummy, - labels=Index(np.arange(100))) + result = reduction.reduce(arr, np.sum, axis=1, dummy=dummy, + labels=Index(np.arange(100))) expected = arr.sum(1) assert_almost_equal(result, expected) - result = lib.reduce(arr, np.sum, axis=1, dummy=dummy, - labels=Index(np.arange(100))) + result = reduction.reduce(arr, np.sum, axis=1, dummy=dummy, + labels=Index(np.arange(100))) assert_almost_equal(result, expected) diff --git a/setup.py b/setup.py index 16ca0c132eaa9..7ade1544ec5cd 100755 --- a/setup.py +++ b/setup.py @@ -309,6 +309,7 @@ class CheckSDist(sdist_class): 'pandas/_libs/interval.pyx', 'pandas/_libs/hashing.pyx', 'pandas/_libs/missing.pyx', + 'pandas/_libs/reduction.pyx', 'pandas/_libs/testing.pyx', 'pandas/_libs/window.pyx', 'pandas/_libs/skiplist.pyx', @@ -506,6 +507,8 @@ def pxd(name): 'pandas/_libs/src/numpy_helper.h'], 'sources': ['pandas/_libs/src/parser/tokenizer.c', 'pandas/_libs/src/parser/io.c']}, + '_libs.reduction': { + 'pyxfile': '_libs/reduction'}, '_libs.tslibs.period': { 'pyxfile': '_libs/tslibs/period', 'pxdfiles': ['_libs/src/util',
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19306
2018-01-18T23:18:30Z
2018-01-21T17:57:37Z
2018-01-21T17:57:37Z
2018-01-23T04:39:54Z
Remove duplicate is_lexsorted function
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index f6c70027ae6f1..5f8c761157e88 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -896,38 +896,6 @@ def write_csv_rows(list data, ndarray data_index, # ------------------------------------------------------------------------------ # Groupby-related functions -@cython.wraparound(False) -@cython.boundscheck(False) -def is_lexsorted(list list_of_arrays): - cdef: - int i - Py_ssize_t n, nlevels - int64_t k, cur, pre - ndarray arr - - nlevels = len(list_of_arrays) - n = len(list_of_arrays[0]) - - cdef int64_t **vecs = <int64_t**> malloc(nlevels * sizeof(int64_t*)) - for i from 0 <= i < nlevels: - arr = list_of_arrays[i] - vecs[i] = <int64_t *> arr.data - - # Assume uniqueness?? - for i from 1 <= i < n: - for k from 0 <= k < nlevels: - cur = vecs[k][i] - pre = vecs[k][i - 1] - if cur == pre: - continue - elif cur > pre: - break - else: - return False - free(vecs) - return True - - # TODO: could do even better if we know something about the data. eg, index has # 1-min data, binner has 5-min data, then bins are just strides in index. This # is a general, O(max(len(values), len(binner))) method. diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 608553b9c3bf2..e50e87f8bd571 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -5,7 +5,7 @@ from sys import getsizeof import numpy as np -from pandas._libs import index as libindex, lib, Timestamp +from pandas._libs import algos as libalgos, index as libindex, lib, Timestamp from pandas.compat import range, zip, lrange, lzip, map from pandas.compat.numpy import function as nv @@ -1137,7 +1137,7 @@ def lexsort_depth(self): int64_labels = [_ensure_int64(lab) for lab in self.labels] for k in range(self.nlevels, 0, -1): - if lib.is_lexsorted(int64_labels[:k]): + if libalgos.is_lexsorted(int64_labels[:k]): return k return 0
`_libs.lib` and `_libs.algos` have near-identical `is_lexsorted` functions. The only differences appear to be small optimizations/modernizations in the `algos` version. AFAICT the `algos` version is only used in `tests.test_algos` ATM. This PR removes the `libs._lib` version and changes the one usage (in `indexes.multi`) to use the `algos` version.
https://api.github.com/repos/pandas-dev/pandas/pulls/19305
2018-01-18T22:34:26Z
2018-01-19T11:01:49Z
2018-01-19T11:01:49Z
2018-01-19T16:03:39Z
REF: Change how pandas.core.common shim works
diff --git a/pandas/__init__.py b/pandas/__init__.py index 78501620d780b..9d76e4ec31922 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -58,6 +58,9 @@ from pandas.io.api import * from pandas.util._tester import test import pandas.testing +from pandas.core.common import _add_deprecated +_add_deprecated() +del _add_deprecated # extension module deprecations from pandas.util._depr_module import _DeprecatedModule diff --git a/pandas/core/common.py b/pandas/core/common.py index e606be3cc2a23..581d784daa3c9 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -19,7 +19,6 @@ from pandas.core.dtypes.common import _NS_DTYPE from pandas.core.dtypes.inference import _iterable_not_string from pandas.core.dtypes.missing import isna, isnull, notnull # noqa -from pandas.api import types from pandas.core.dtypes import common from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike @@ -27,48 +26,54 @@ from pandas.errors import ( # noqa PerformanceWarning, UnsupportedFunctionCall, UnsortedIndexError) -# back-compat of public API -# deprecate these functions -m = sys.modules['pandas.core.common'] -for t in [t for t in dir(types) if not t.startswith('_')]: - - def outer(t=t): - - def wrapper(*args, **kwargs): - warnings.warn("pandas.core.common.{t} is deprecated. " - "import from the public API: " - "pandas.api.types.{t} instead".format(t=t), - DeprecationWarning, stacklevel=3) - return getattr(types, t)(*args, **kwargs) - return wrapper - - setattr(m, t, outer(t)) - -# back-compat for non-public functions -# deprecate these functions -for t in ['is_datetime_arraylike', - 'is_datetime_or_timedelta_dtype', - 'is_datetimelike', - 'is_datetimelike_v_numeric', - 'is_datetimelike_v_object', - 'is_datetimetz', - 'is_int_or_datetime_dtype', - 'is_period_arraylike', - 'is_string_like', - 'is_string_like_dtype']: - - def outer(t=t): - - def wrapper(*args, **kwargs): - warnings.warn("pandas.core.common.{t} is deprecated. " - "These are not longer public API functions, " - "but can be imported from " - "pandas.api.types.{t} instead".format(t=t), - DeprecationWarning, stacklevel=3) - return getattr(common, t)(*args, **kwargs) - return wrapper - - setattr(m, t, outer(t)) + +def _add_deprecated(): + # back-compat of public API + # deprecate these functions + # This is called at the end of pandas.__init__, after all the imports + # have already been resolved, to avoid any circular imports. + from pandas.api import types + + m = sys.modules['pandas.core.common'] + for t in [t for t in dir(types) if not t.startswith('_')]: + + def outer(t=t): + + def wrapper(*args, **kwargs): + warnings.warn("pandas.core.common.{t} is deprecated. " + "import from the public API: " + "pandas.api.types.{t} instead".format(t=t), + DeprecationWarning, stacklevel=3) + return getattr(types, t)(*args, **kwargs) + return wrapper + + setattr(m, t, outer(t)) + + # back-compat for non-public functions + # deprecate these functions + for t in ['is_datetime_arraylike', + 'is_datetime_or_timedelta_dtype', + 'is_datetimelike', + 'is_datetimelike_v_numeric', + 'is_datetimelike_v_object', + 'is_datetimetz', + 'is_int_or_datetime_dtype', + 'is_period_arraylike', + 'is_string_like', + 'is_string_like_dtype']: + + def outer(t=t): + + def wrapper(*args, **kwargs): + warnings.warn("pandas.core.common.{t} is deprecated. " + "These are not longer public API functions, " + "but can be imported from " + "pandas.api.types.{t} instead".format(t=t), + DeprecationWarning, stacklevel=3) + return getattr(common, t)(*args, **kwargs) + return wrapper + + setattr(m, t, outer(t)) # deprecate array_equivalent @@ -646,6 +651,7 @@ def _random_state(state=None): ------- np.random.RandomState """ + from pandas.api import types if types.is_integer(state): return np.random.RandomState(state)
I have a PR incoming that would introduce a circular dependency between pandas.core.arrays.categorical, pandas.api, and pandas.core.common. This change will allow that PR to avoid hacky workarounds, since the real problem is pandas.core importing pandas.api.types. xref https://github.com/pandas-dev/pandas/pull/13990
https://api.github.com/repos/pandas-dev/pandas/pulls/19304
2018-01-18T21:44:38Z
2018-02-19T19:37:11Z
null
2018-02-19T19:37:15Z
Fix DTI comparison with None, datetime.date
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 66e88e181ac0f..89276e3c241d0 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -452,6 +452,8 @@ Datetimelike - Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`) - Bug in :func:`Series.truncate` which raises ``TypeError`` with a monotonic ``PeriodIndex`` (:issue:`17717`) - Bug in :func:`~DataFrame.pct_change` using ``periods`` and ``freq`` returned different length outputs (:issue:`7292`) +- Bug in comparison of :class:`DatetimeIndex` against ``None`` or ``datetime.date`` objects raising ``TypeError`` for ``==`` and ``!=`` comparisons instead of all-``False`` and all-``True``, respectively (:issue:`19301`) +- Timezones ^^^^^^^^^ @@ -483,8 +485,6 @@ Numeric - Bug in the :class:`DataFrame` constructor in which data containing very large positive or very large negative numbers was causing ``OverflowError`` (:issue:`18584`) - Bug in :class:`Index` constructor with ``dtype='uint64'`` where int-like floats were not coerced to :class:`UInt64Index` (:issue:`18400`) -- - Indexing ^^^^^^^^ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 8dd41c022d163..8fd5794f2637b 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -118,8 +118,16 @@ def wrapper(self, other): else: if isinstance(other, list): other = DatetimeIndex(other) - elif not isinstance(other, (np.ndarray, Index, ABCSeries)): - other = _ensure_datetime64(other) + elif not isinstance(other, (np.datetime64, np.ndarray, + Index, ABCSeries)): + # Following Timestamp convention, __eq__ is all-False + # and __ne__ is all True, others raise TypeError. + if opname == '__eq__': + return np.zeros(shape=self.shape, dtype=bool) + elif opname == '__ne__': + return np.ones(shape=self.shape, dtype=bool) + raise TypeError('%s type object %s' % + (type(other), str(other))) if is_datetimelike(other): self._assert_tzawareness_compat(other) @@ -146,12 +154,6 @@ def wrapper(self, other): return compat.set_function_name(wrapper, opname, cls) -def _ensure_datetime64(other): - if isinstance(other, np.datetime64): - return other - raise TypeError('%s type object %s' % (type(other), str(other))) - - _midnight = time(0, 0) diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index 671071b5e4945..09a6b35a0ff0e 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -14,6 +14,7 @@ from pandas import (Timestamp, Timedelta, Series, DatetimeIndex, TimedeltaIndex, date_range) +from pandas._libs import tslib @pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo', @@ -44,7 +45,83 @@ def addend(request): class TestDatetimeIndexComparisons(object): - # TODO: De-duplicate with test_comparisons_nat below + @pytest.mark.parametrize('other', [datetime(2016, 1, 1), + Timestamp('2016-01-01'), + np.datetime64('2016-01-01')]) + def test_dti_cmp_datetimelike(self, other, tz): + dti = pd.date_range('2016-01-01', periods=2, tz=tz) + if tz is not None: + if isinstance(other, np.datetime64): + # no tzaware version available + return + elif isinstance(other, Timestamp): + other = other.tz_localize(dti.tzinfo) + else: + other = tslib._localize_pydatetime(other, dti.tzinfo) + + result = dti == other + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = dti > other + expected = np.array([False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = dti >= other + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = dti < other + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = dti <= other + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + def dti_cmp_non_datetime(self, tz): + # GH#19301 by convention datetime.date is not considered comparable + # to Timestamp or DatetimeIndex. This may change in the future. + dti = pd.date_range('2016-01-01', periods=2, tz=tz) + + other = datetime(2016, 1, 1).date() + assert not (dti == other).any() + assert (dti != other).all() + with pytest.raises(TypeError): + dti < other + with pytest.raises(TypeError): + dti <= other + with pytest.raises(TypeError): + dti > other + with pytest.raises(TypeError): + dti >= other + + @pytest.mark.parametrize('other', [None, np.nan, pd.NaT]) + def test_dti_eq_null_scalar(self, other, tz): + # GH#19301 + dti = pd.date_range('2016-01-01', periods=2, tz=tz) + assert not (dti == other).any() + + @pytest.mark.parametrize('other', [None, np.nan, pd.NaT]) + def test_dti_ne_null_scalar(self, other, tz): + # GH#19301 + dti = pd.date_range('2016-01-01', periods=2, tz=tz) + assert (dti != other).all() + + @pytest.mark.parametrize('other', [None, np.nan]) + def test_dti_cmp_null_scalar_inequality(self, tz, other): + # GH#19301 + dti = pd.date_range('2016-01-01', periods=2, tz=tz) + + with pytest.raises(TypeError): + dti < other + with pytest.raises(TypeError): + dti <= other + with pytest.raises(TypeError): + dti > other + with pytest.raises(TypeError): + dti >= other + def test_dti_cmp_nat(self): left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]) @@ -72,69 +149,7 @@ def test_dti_cmp_nat(self): tm.assert_numpy_array_equal(lhs < pd.NaT, expected) tm.assert_numpy_array_equal(pd.NaT > lhs, expected) - @pytest.mark.parametrize('op', [operator.eq, operator.ne, - operator.gt, operator.ge, - operator.lt, operator.le]) - def test_comparison_tzawareness_compat(self, op): - # GH#18162 - dr = pd.date_range('2016-01-01', periods=6) - dz = dr.tz_localize('US/Pacific') - - with pytest.raises(TypeError): - op(dr, dz) - with pytest.raises(TypeError): - op(dr, list(dz)) - with pytest.raises(TypeError): - op(dz, dr) - with pytest.raises(TypeError): - op(dz, list(dr)) - - # Check that there isn't a problem aware-aware and naive-naive do not - # raise - assert (dr == dr).all() - assert (dr == list(dr)).all() - assert (dz == dz).all() - assert (dz == list(dz)).all() - - # Check comparisons against scalar Timestamps - ts = pd.Timestamp('2000-03-14 01:59') - ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam') - - assert (dr > ts).all() - with pytest.raises(TypeError): - op(dr, ts_tz) - - assert (dz > ts_tz).all() - with pytest.raises(TypeError): - op(dz, ts) - - @pytest.mark.parametrize('op', [operator.eq, operator.ne, - operator.gt, operator.ge, - operator.lt, operator.le]) - def test_nat_comparison_tzawareness(self, op): - # GH#19276 - # tzaware DatetimeIndex should not raise when compared to NaT - dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT, - '2014-05-01', '2014-07-01']) - expected = np.array([op == operator.ne] * len(dti)) - result = op(dti, pd.NaT) - tm.assert_numpy_array_equal(result, expected) - - result = op(dti.tz_localize('US/Pacific'), pd.NaT) - tm.assert_numpy_array_equal(result, expected) - - def test_comparisons_coverage(self): - rng = date_range('1/1/2000', periods=10) - - # raise TypeError for now - pytest.raises(TypeError, rng.__lt__, rng[3].value) - - result = rng == list(rng) - exp = rng == rng - tm.assert_numpy_array_equal(result, exp) - - def test_comparisons_nat(self): - + def test_dti_cmp_nat_behaves_like_float_cmp_nan(self): fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0]) fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0]) @@ -223,6 +238,71 @@ def test_comparisons_nat(self): expected = np.array([True, True, False, True, True, True]) tm.assert_numpy_array_equal(result, expected) + @pytest.mark.parametrize('op', [operator.eq, operator.ne, + operator.gt, operator.ge, + operator.lt, operator.le]) + def test_comparison_tzawareness_compat(self, op): + # GH#18162 + dr = pd.date_range('2016-01-01', periods=6) + dz = dr.tz_localize('US/Pacific') + + with pytest.raises(TypeError): + op(dr, dz) + with pytest.raises(TypeError): + op(dr, list(dz)) + with pytest.raises(TypeError): + op(dz, dr) + with pytest.raises(TypeError): + op(dz, list(dr)) + + # Check that there isn't a problem aware-aware and naive-naive do not + # raise + assert (dr == dr).all() + assert (dr == list(dr)).all() + assert (dz == dz).all() + assert (dz == list(dz)).all() + + # Check comparisons against scalar Timestamps + ts = pd.Timestamp('2000-03-14 01:59') + ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam') + + assert (dr > ts).all() + with pytest.raises(TypeError): + op(dr, ts_tz) + + assert (dz > ts_tz).all() + with pytest.raises(TypeError): + op(dz, ts) + + @pytest.mark.parametrize('op', [operator.eq, operator.ne, + operator.gt, operator.ge, + operator.lt, operator.le]) + def test_nat_comparison_tzawareness(self, op): + # GH#19276 + # tzaware DatetimeIndex should not raise when compared to NaT + dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT, + '2014-05-01', '2014-07-01']) + expected = np.array([op == operator.ne] * len(dti)) + result = op(dti, pd.NaT) + tm.assert_numpy_array_equal(result, expected) + + result = op(dti.tz_localize('US/Pacific'), pd.NaT) + tm.assert_numpy_array_equal(result, expected) + + def test_dti_cmp_int_raises(self): + rng = date_range('1/1/2000', periods=10) + + # raise TypeError for now + with pytest.raises(TypeError): + rng < rng[3].value + + def test_dti_cmp_list(self): + rng = date_range('1/1/2000', periods=10) + + result = rng == list(rng) + expected = rng == rng + tm.assert_numpy_array_equal(result, expected) + class TestDatetimeIndexArithmetic(object):
Discussed in #19288 - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19301
2018-01-18T17:21:09Z
2018-02-02T11:38:05Z
2018-02-02T11:38:05Z
2018-02-04T16:41:08Z
Bug: adds support for unary plus
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 1c6b698605521..f70e1198aa999 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -253,6 +253,7 @@ Current Behavior: Other Enhancements ^^^^^^^^^^^^^^^^^^ +- Unary ``+`` now permitted for ``Series`` and ``DataFrame`` as numeric operator (:issue:`16073`) - Better support for :func:`Dataframe.style.to_excel` output with the ``xlsxwriter`` engine. (:issue:`16149`) - :func:`pandas.tseries.frequencies.to_offset` now accepts leading '+' signs e.g. '+1h'. (:issue:`18171`) - :func:`MultiIndex.unique` now supports the ``level=`` argument, to get unique values from a specific index level (:issue:`17896`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index cb4bbb7b27c42..35f866c9e7d58 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -25,6 +25,7 @@ is_list_like, is_dict_like, is_re_compilable, + is_period_arraylike, pandas_dtype) from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask from pandas.core.dtypes.inference import is_hashable @@ -1027,10 +1028,24 @@ def _indexed_same(self, other): def __neg__(self): values = com._values_from_object(self) - if values.dtype == np.bool_: + if is_bool_dtype(values): arr = operator.inv(values) - else: + elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)): arr = operator.neg(values) + else: + raise TypeError("Unary negative expects numeric dtype, not {}" + .format(values.dtype)) + return self.__array_wrap__(arr) + + def __pos__(self): + values = com._values_from_object(self) + if (is_bool_dtype(values) or is_period_arraylike(values)): + arr = values + elif (is_numeric_dtype(values) or is_timedelta64_dtype(values)): + arr = operator.pos(values) + else: + raise TypeError("Unary plus expects numeric dtype, not {}" + .format(values.dtype)) return self.__array_wrap__(arr) def __invert__(self): diff --git a/pandas/tests/computation/test_eval.py b/pandas/tests/computation/test_eval.py index 9c3572f9ffe72..07ba0b681418e 100644 --- a/pandas/tests/computation/test_eval.py +++ b/pandas/tests/computation/test_eval.py @@ -542,66 +542,42 @@ def test_frame_pos(self): # float lhs = DataFrame(randn(5, 2)) - if self.engine == 'python': - with pytest.raises(TypeError): - result = pd.eval(expr, engine=self.engine, parser=self.parser) - else: - expect = lhs - result = pd.eval(expr, engine=self.engine, parser=self.parser) - assert_frame_equal(expect, result) + expect = lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_frame_equal(expect, result) # int lhs = DataFrame(randint(5, size=(5, 2))) - if self.engine == 'python': - with pytest.raises(TypeError): - result = pd.eval(expr, engine=self.engine, parser=self.parser) - else: - expect = lhs - result = pd.eval(expr, engine=self.engine, parser=self.parser) - assert_frame_equal(expect, result) + expect = lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_frame_equal(expect, result) # bool doesn't work with numexpr but works elsewhere lhs = DataFrame(rand(5, 2) > 0.5) - if self.engine == 'python': - with pytest.raises(TypeError): - result = pd.eval(expr, engine=self.engine, parser=self.parser) - else: - expect = lhs - result = pd.eval(expr, engine=self.engine, parser=self.parser) - assert_frame_equal(expect, result) + expect = lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_frame_equal(expect, result) def test_series_pos(self): expr = self.ex('+') # float lhs = Series(randn(5)) - if self.engine == 'python': - with pytest.raises(TypeError): - result = pd.eval(expr, engine=self.engine, parser=self.parser) - else: - expect = lhs - result = pd.eval(expr, engine=self.engine, parser=self.parser) - assert_series_equal(expect, result) + expect = lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_series_equal(expect, result) # int lhs = Series(randint(5, size=5)) - if self.engine == 'python': - with pytest.raises(TypeError): - result = pd.eval(expr, engine=self.engine, parser=self.parser) - else: - expect = lhs - result = pd.eval(expr, engine=self.engine, parser=self.parser) - assert_series_equal(expect, result) + expect = lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_series_equal(expect, result) # bool doesn't work with numexpr but works elsewhere lhs = Series(rand(5) > 0.5) - if self.engine == 'python': - with pytest.raises(TypeError): - result = pd.eval(expr, engine=self.engine, parser=self.parser) - else: - expect = lhs - result = pd.eval(expr, engine=self.engine, parser=self.parser) - assert_series_equal(expect, result) + expect = lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_series_equal(expect, result) def test_scalar_unary(self): with pytest.raises(TypeError): diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 1bb8e8edffc6e..a3a799aed1c55 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -245,7 +245,7 @@ def test_ops_frame_period(self): exp = pd.DataFrame({'A': np.array([2, 1], dtype=object), 'B': np.array([14, 13], dtype=object)}) tm.assert_frame_equal(p - df, exp) - tm.assert_frame_equal(df - p, -exp) + tm.assert_frame_equal(df - p, -1 * exp) df2 = pd.DataFrame({'A': [pd.Period('2015-05', freq='M'), pd.Period('2015-06', freq='M')], @@ -257,4 +257,4 @@ def test_ops_frame_period(self): exp = pd.DataFrame({'A': np.array([4, 4], dtype=object), 'B': np.array([16, 16], dtype=object)}) tm.assert_frame_equal(df2 - df, exp) - tm.assert_frame_equal(df - df2, -exp) + tm.assert_frame_equal(df - df2, -1 * exp) diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index 26974b6398694..5df50f3d7835b 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -271,13 +271,50 @@ def test_logical_with_nas(self): expected = Series([True, True]) assert_series_equal(result, expected) - def test_neg(self): - # what to do? - assert_frame_equal(-self.frame, -1 * self.frame) + @pytest.mark.parametrize('df,expected', [ + (pd.DataFrame({'a': [-1, 1]}), pd.DataFrame({'a': [1, -1]})), + (pd.DataFrame({'a': [False, True]}), + pd.DataFrame({'a': [True, False]})), + (pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}), + pd.DataFrame({'a': pd.Series(pd.to_timedelta([1, -1]))})) + ]) + def test_neg_numeric(self, df, expected): + assert_frame_equal(-df, expected) + assert_series_equal(-df['a'], expected['a']) + + @pytest.mark.parametrize('df', [ + pd.DataFrame({'a': ['a', 'b']}), + pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}), + ]) + def test_neg_raises(self, df): + with pytest.raises(TypeError): + (- df) + with pytest.raises(TypeError): + (- df['a']) def test_invert(self): assert_frame_equal(-(self.frame < 0), ~(self.frame < 0)) + @pytest.mark.parametrize('df', [ + pd.DataFrame({'a': [-1, 1]}), + pd.DataFrame({'a': [False, True]}), + pd.DataFrame({'a': pd.Series(pd.to_timedelta([-1, 1]))}), + ]) + def test_pos_numeric(self, df): + # GH 16073 + assert_frame_equal(+df, df) + assert_series_equal(+df['a'], df['a']) + + @pytest.mark.parametrize('df', [ + pd.DataFrame({'a': ['a', 'b']}), + pd.DataFrame({'a': pd.to_datetime(['2017-01-22', '1970-01-01'])}), + ]) + def test_pos_raises(self, df): + with pytest.raises(TypeError): + (+ df) + with pytest.raises(TypeError): + (+ df['a']) + def test_arith_flex_frame(self): ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod'] if not compat.PY3: diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index 1d9fa9dc15531..94da97ef45301 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -315,7 +315,7 @@ def test_ops_series_period(self): # dtype will be object because of original dtype expected = pd.Series([9, 8], name='xxx', dtype=object) tm.assert_series_equal(per - ser, expected) - tm.assert_series_equal(ser - per, -expected) + tm.assert_series_equal(ser - per, -1 * expected) s2 = pd.Series([pd.Period('2015-01-05', freq='D'), pd.Period('2015-01-04', freq='D')], name='xxx') @@ -323,7 +323,7 @@ def test_ops_series_period(self): expected = pd.Series([4, 2], name='xxx', dtype=object) tm.assert_series_equal(s2 - ser, expected) - tm.assert_series_equal(ser - s2, -expected) + tm.assert_series_equal(ser - s2, -1 * expected) class TestTimestampSeriesArithmetic(object):
- [ ] closes #16073 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Adds missing unary plus operator from #16073. Adds typechecking behavior to unary neg, along with tests for both.
https://api.github.com/repos/pandas-dev/pandas/pulls/19297
2018-01-18T13:02:38Z
2018-02-08T11:32:05Z
2018-02-08T11:32:04Z
2018-02-08T12:53:18Z
Implement libinternals
diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx new file mode 100644 index 0000000000000..93a45335efc9c --- /dev/null +++ b/pandas/_libs/internals.pyx @@ -0,0 +1,438 @@ +# -*- coding: utf-8 -*- + +cimport cython +from cython cimport Py_ssize_t + +from cpython cimport PyObject + +cdef extern from "Python.h": + Py_ssize_t PY_SSIZE_T_MAX + +import numpy as np +cimport numpy as np +from numpy cimport int64_t + +cdef extern from "compat_helper.h": + cdef int slice_get_indices(PyObject* s, Py_ssize_t length, + Py_ssize_t *start, Py_ssize_t *stop, + Py_ssize_t *step, + Py_ssize_t *slicelength) except -1 + + +cdef class BlockPlacement: + # __slots__ = '_as_slice', '_as_array', '_len' + cdef slice _as_slice + cdef object _as_array + + cdef bint _has_slice, _has_array, _is_known_slice_like + + def __init__(self, val): + cdef slice slc + + self._has_slice = False + self._has_array = False + + if isinstance(val, slice): + slc = slice_canonize(val) + + if slc.start != slc.stop: + self._as_slice = slc + self._has_slice = True + else: + arr = np.empty(0, dtype=np.int64) + self._as_array = arr + self._has_array = True + else: + # Cython memoryview interface requires ndarray to be writeable. + arr = np.require(val, dtype=np.int64, requirements='W') + assert arr.ndim == 1 + self._as_array = arr + self._has_array = True + + def __str__(self): + cdef slice s = self._ensure_has_slice() + if s is not None: + v = self._as_slice + else: + v = self._as_array + + return '%s(%r)' % (self.__class__.__name__, v) + + __repr__ = __str__ + + def __len__(self): + cdef slice s = self._ensure_has_slice() + if s is not None: + return slice_len(s) + else: + return len(self._as_array) + + def __iter__(self): + cdef slice s = self._ensure_has_slice() + cdef Py_ssize_t start, stop, step, _ + if s is not None: + start, stop, step, _ = slice_get_indices_ex(s) + return iter(range(start, stop, step)) + else: + return iter(self._as_array) + + @property + def as_slice(self): + cdef slice s = self._ensure_has_slice() + if s is None: + raise TypeError('Not slice-like') + else: + return s + + @property + def indexer(self): + cdef slice s = self._ensure_has_slice() + if s is not None: + return s + else: + return self._as_array + + def isin(self, arr): + from pandas.core.index import Int64Index + return Int64Index(self.as_array, copy=False).isin(arr) + + @property + def as_array(self): + cdef Py_ssize_t start, stop, end, _ + if not self._has_array: + start, stop, step, _ = slice_get_indices_ex(self._as_slice) + self._as_array = np.arange(start, stop, step, + dtype=np.int64) + self._has_array = True + return self._as_array + + @property + def is_slice_like(self): + cdef slice s = self._ensure_has_slice() + return s is not None + + def __getitem__(self, loc): + cdef slice s = self._ensure_has_slice() + if s is not None: + val = slice_getitem(s, loc) + else: + val = self._as_array[loc] + + if not isinstance(val, slice) and val.ndim == 0: + return val + + return BlockPlacement(val) + + def delete(self, loc): + return BlockPlacement(np.delete(self.as_array, loc, axis=0)) + + def append(self, others): + if len(others) == 0: + return self + + return BlockPlacement(np.concatenate([self.as_array] + + [o.as_array for o in others])) + + cdef iadd(self, other): + cdef slice s = self._ensure_has_slice() + cdef Py_ssize_t other_int, start, stop, step, l + + if isinstance(other, int) and s is not None: + other_int = <Py_ssize_t>other + + if other_int == 0: + return self + + start, stop, step, l = slice_get_indices_ex(s) + start += other_int + stop += other_int + + if ((step > 0 and start < 0) or + (step < 0 and stop < step)): + raise ValueError("iadd causes length change") + + if stop < 0: + self._as_slice = slice(start, None, step) + else: + self._as_slice = slice(start, stop, step) + + self._has_array = False + self._as_array = None + else: + newarr = self.as_array + other + if (newarr < 0).any(): + raise ValueError("iadd causes length change") + + self._as_array = newarr + self._has_array = True + self._has_slice = False + self._as_slice = None + + return self + + cdef BlockPlacement copy(self): + cdef slice s = self._ensure_has_slice() + if s is not None: + return BlockPlacement(s) + else: + return BlockPlacement(self._as_array) + + def add(self, other): + return self.copy().iadd(other) + + def sub(self, other): + return self.add(-other) + + cdef slice _ensure_has_slice(self): + if not self._has_slice: + self._as_slice = indexer_as_slice(self._as_array) + self._has_slice = True + return self._as_slice + + +cpdef slice_canonize(slice s): + """ + Convert slice to canonical bounded form. + """ + cdef: + Py_ssize_t start = 0, stop = 0, step = 1, length + + if s.step is None: + step = 1 + else: + step = <Py_ssize_t>s.step + if step == 0: + raise ValueError("slice step cannot be zero") + + if step > 0: + if s.stop is None: + raise ValueError("unbounded slice") + + stop = <Py_ssize_t>s.stop + if s.start is None: + start = 0 + else: + start = <Py_ssize_t>s.start + if start > stop: + start = stop + elif step < 0: + if s.start is None: + raise ValueError("unbounded slice") + + start = <Py_ssize_t>s.start + if s.stop is None: + stop = -1 + else: + stop = <Py_ssize_t>s.stop + if stop > start: + stop = start + + if start < 0 or (stop < 0 and s.stop is not None): + raise ValueError("unbounded slice") + + if stop < 0: + return slice(start, None, step) + else: + return slice(start, stop, step) + + +cpdef Py_ssize_t slice_len( + slice slc, Py_ssize_t objlen=PY_SSIZE_T_MAX) except -1: + """ + Get length of a bounded slice. + + The slice must not have any "open" bounds that would create dependency on + container size, i.e.: + - if ``s.step is None or s.step > 0``, ``s.stop`` is not ``None`` + - if ``s.step < 0``, ``s.start`` is not ``None`` + + Otherwise, the result is unreliable. + + """ + cdef: + Py_ssize_t start, stop, step, length + + if slc is None: + raise TypeError("slc must be slice") + + slice_get_indices(<PyObject *>slc, objlen, + &start, &stop, &step, &length) + + return length + + +cpdef slice_get_indices_ex(slice slc, Py_ssize_t objlen=PY_SSIZE_T_MAX): + """ + Get (start, stop, step, length) tuple for a slice. + + If `objlen` is not specified, slice must be bounded, otherwise the result + will be wrong. + + """ + cdef: + Py_ssize_t start, stop, step, length + + if slc is None: + raise TypeError("slc should be a slice") + + slice_get_indices(<PyObject *>slc, objlen, + &start, &stop, &step, &length) + + return start, stop, step, length + + +def slice_getitem(slice slc not None, ind): + cdef: + Py_ssize_t s_start, s_stop, s_step, s_len + Py_ssize_t ind_start, ind_stop, ind_step, ind_len + + s_start, s_stop, s_step, s_len = slice_get_indices_ex(slc) + + if isinstance(ind, slice): + ind_start, ind_stop, ind_step, ind_len = slice_get_indices_ex(ind, + s_len) + + if ind_step > 0 and ind_len == s_len: + # short-cut for no-op slice + if ind_len == s_len: + return slc + + if ind_step < 0: + s_start = s_stop - s_step + ind_step = -ind_step + + s_step *= ind_step + s_stop = s_start + ind_stop * s_step + s_start = s_start + ind_start * s_step + + if s_step < 0 and s_stop < 0: + return slice(s_start, None, s_step) + else: + return slice(s_start, s_stop, s_step) + + else: + return np.arange(s_start, s_stop, s_step, dtype=np.int64)[ind] + + +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef slice indexer_as_slice(int64_t[:] vals): + cdef: + Py_ssize_t i, n, start, stop + int64_t d + + if vals is None: + raise TypeError("vals must be ndarray") + + n = vals.shape[0] + + if n == 0 or vals[0] < 0: + return None + + if n == 1: + return slice(vals[0], vals[0] + 1, 1) + + if vals[1] < 0: + return None + + # n > 2 + d = vals[1] - vals[0] + + if d == 0: + return None + + for i in range(2, n): + if vals[i] < 0 or vals[i] - vals[i - 1] != d: + return None + + start = vals[0] + stop = start + n * d + if stop < 0 and d < 0: + return slice(start, None, d) + else: + return slice(start, stop, d) + + +@cython.boundscheck(False) +@cython.wraparound(False) +def get_blkno_indexers(int64_t[:] blknos, bint group=True): + """ + Enumerate contiguous runs of integers in ndarray. + + Iterate over elements of `blknos` yielding ``(blkno, slice(start, stop))`` + pairs for each contiguous run found. + + If `group` is True and there is more than one run for a certain blkno, + ``(blkno, array)`` with an array containing positions of all elements equal + to blkno. + + Returns + ------- + iter : iterator of (int, slice or array) + + """ + # There's blkno in this function's name because it's used in block & + # blockno handling. + cdef: + int64_t cur_blkno + Py_ssize_t i, start, stop, n, diff + + object blkno + list group_order + dict group_slices + int64_t[:] res_view + + n = blknos.shape[0] + + if n == 0: + return + + start = 0 + cur_blkno = blknos[start] + + if group == False: + for i in range(1, n): + if blknos[i] != cur_blkno: + yield cur_blkno, slice(start, i) + + start = i + cur_blkno = blknos[i] + + yield cur_blkno, slice(start, n) + else: + group_order = [] + group_dict = {} + + for i in range(1, n): + if blknos[i] != cur_blkno: + if cur_blkno not in group_dict: + group_order.append(cur_blkno) + group_dict[cur_blkno] = [(start, i)] + else: + group_dict[cur_blkno].append((start, i)) + + start = i + cur_blkno = blknos[i] + + if cur_blkno not in group_dict: + group_order.append(cur_blkno) + group_dict[cur_blkno] = [(start, n)] + else: + group_dict[cur_blkno].append((start, n)) + + for blkno in group_order: + slices = group_dict[blkno] + if len(slices) == 1: + yield blkno, slice(slices[0][0], slices[0][1]) + else: + tot_len = sum(stop - start for start, stop in slices) + result = np.empty(tot_len, dtype=np.int64) + res_view = result + + i = 0 + for start, stop in slices: + for diff in range(start, stop): + res_view[i] = diff + i += 1 + + yield blkno, result \ No newline at end of file diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index f6c70027ae6f1..5a4feca4f236a 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -35,16 +35,6 @@ try: except ImportError: from cpython cimport PyUnicode_GET_SIZE as PyString_GET_SIZE -cdef extern from "Python.h": - Py_ssize_t PY_SSIZE_T_MAX - -cdef extern from "compat_helper.h": - - cdef int slice_get_indices( - PyObject* s, Py_ssize_t length, - Py_ssize_t *start, Py_ssize_t *stop, Py_ssize_t *step, - Py_ssize_t *slicelength) except -1 - cimport cpython isnan = np.isnan @@ -1161,424 +1151,5 @@ def indices_fast(object index, ndarray[int64_t] labels, list keys, return result -@cython.boundscheck(False) -@cython.wraparound(False) -def get_blkno_indexers(int64_t[:] blknos, bint group=True): - """ - Enumerate contiguous runs of integers in ndarray. - - Iterate over elements of `blknos` yielding ``(blkno, slice(start, stop))`` - pairs for each contiguous run found. - - If `group` is True and there is more than one run for a certain blkno, - ``(blkno, array)`` with an array containing positions of all elements equal - to blkno. - - Returns - ------- - iter : iterator of (int, slice or array) - - """ - # There's blkno in this function's name because it's used in block & - # blockno handling. - cdef: - int64_t cur_blkno - Py_ssize_t i, start, stop, n, diff - - object blkno - list group_order - dict group_slices - int64_t[:] res_view - - n = blknos.shape[0] - - if n == 0: - return - - start = 0 - cur_blkno = blknos[start] - - if group == False: - for i in range(1, n): - if blknos[i] != cur_blkno: - yield cur_blkno, slice(start, i) - - start = i - cur_blkno = blknos[i] - - yield cur_blkno, slice(start, n) - else: - group_order = [] - group_dict = {} - - for i in range(1, n): - if blknos[i] != cur_blkno: - if cur_blkno not in group_dict: - group_order.append(cur_blkno) - group_dict[cur_blkno] = [(start, i)] - else: - group_dict[cur_blkno].append((start, i)) - - start = i - cur_blkno = blknos[i] - - if cur_blkno not in group_dict: - group_order.append(cur_blkno) - group_dict[cur_blkno] = [(start, n)] - else: - group_dict[cur_blkno].append((start, n)) - - for blkno in group_order: - slices = group_dict[blkno] - if len(slices) == 1: - yield blkno, slice(slices[0][0], slices[0][1]) - else: - tot_len = sum(stop - start for start, stop in slices) - result = np.empty(tot_len, dtype=np.int64) - res_view = result - - i = 0 - for start, stop in slices: - for diff in range(start, stop): - res_view[i] = diff - i += 1 - - yield blkno, result - - -@cython.boundscheck(False) -@cython.wraparound(False) -cpdef slice indexer_as_slice(int64_t[:] vals): - cdef: - Py_ssize_t i, n, start, stop - int64_t d - - if vals is None: - raise TypeError("vals must be ndarray") - - n = vals.shape[0] - - if n == 0 or vals[0] < 0: - return None - - if n == 1: - return slice(vals[0], vals[0] + 1, 1) - - if vals[1] < 0: - return None - - # n > 2 - d = vals[1] - vals[0] - - if d == 0: - return None - - for i in range(2, n): - if vals[i] < 0 or vals[i] - vals[i - 1] != d: - return None - - start = vals[0] - stop = start + n * d - if stop < 0 and d < 0: - return slice(start, None, d) - else: - return slice(start, stop, d) - - -cpdef slice_canonize(slice s): - """ - Convert slice to canonical bounded form. - """ - cdef: - Py_ssize_t start = 0, stop = 0, step = 1, length - - if s.step is None: - step = 1 - else: - step = <Py_ssize_t>s.step - if step == 0: - raise ValueError("slice step cannot be zero") - - if step > 0: - if s.stop is None: - raise ValueError("unbounded slice") - - stop = <Py_ssize_t>s.stop - if s.start is None: - start = 0 - else: - start = <Py_ssize_t>s.start - if start > stop: - start = stop - elif step < 0: - if s.start is None: - raise ValueError("unbounded slice") - - start = <Py_ssize_t>s.start - if s.stop is None: - stop = -1 - else: - stop = <Py_ssize_t>s.stop - if stop > start: - stop = start - - if start < 0 or (stop < 0 and s.stop is not None): - raise ValueError("unbounded slice") - - if stop < 0: - return slice(start, None, step) - else: - return slice(start, stop, step) - - -cpdef slice_get_indices_ex(slice slc, Py_ssize_t objlen=PY_SSIZE_T_MAX): - """ - Get (start, stop, step, length) tuple for a slice. - - If `objlen` is not specified, slice must be bounded, otherwise the result - will be wrong. - - """ - cdef: - Py_ssize_t start, stop, step, length - - if slc is None: - raise TypeError("slc should be a slice") - - slice_get_indices(<PyObject *>slc, objlen, - &start, &stop, &step, &length) - - return start, stop, step, length - - -cpdef Py_ssize_t slice_len( - slice slc, Py_ssize_t objlen=PY_SSIZE_T_MAX) except -1: - """ - Get length of a bounded slice. - - The slice must not have any "open" bounds that would create dependency on - container size, i.e.: - - if ``s.step is None or s.step > 0``, ``s.stop`` is not ``None`` - - if ``s.step < 0``, ``s.start`` is not ``None`` - - Otherwise, the result is unreliable. - - """ - cdef: - Py_ssize_t start, stop, step, length - - if slc is None: - raise TypeError("slc must be slice") - - slice_get_indices(<PyObject *>slc, objlen, - &start, &stop, &step, &length) - - return length - - -def slice_getitem(slice slc not None, ind): - cdef: - Py_ssize_t s_start, s_stop, s_step, s_len - Py_ssize_t ind_start, ind_stop, ind_step, ind_len - - s_start, s_stop, s_step, s_len = slice_get_indices_ex(slc) - - if isinstance(ind, slice): - ind_start, ind_stop, ind_step, ind_len = slice_get_indices_ex(ind, - s_len) - - if ind_step > 0 and ind_len == s_len: - # short-cut for no-op slice - if ind_len == s_len: - return slc - - if ind_step < 0: - s_start = s_stop - s_step - ind_step = -ind_step - - s_step *= ind_step - s_stop = s_start + ind_stop * s_step - s_start = s_start + ind_start * s_step - - if s_step < 0 and s_stop < 0: - return slice(s_start, None, s_step) - else: - return slice(s_start, s_stop, s_step) - - else: - return np.arange(s_start, s_stop, s_step, dtype=np.int64)[ind] - - -cdef class BlockPlacement: - # __slots__ = '_as_slice', '_as_array', '_len' - cdef slice _as_slice - cdef object _as_array - - cdef bint _has_slice, _has_array, _is_known_slice_like - - def __init__(self, val): - cdef slice slc - - self._has_slice = False - self._has_array = False - - if isinstance(val, slice): - slc = slice_canonize(val) - - if slc.start != slc.stop: - self._as_slice = slc - self._has_slice = True - else: - arr = np.empty(0, dtype=np.int64) - self._as_array = arr - self._has_array = True - else: - # Cython memoryview interface requires ndarray to be writeable. - arr = np.require(val, dtype=np.int64, requirements='W') - assert arr.ndim == 1 - self._as_array = arr - self._has_array = True - - def __str__(self): - cdef slice s = self._ensure_has_slice() - if s is not None: - v = self._as_slice - else: - v = self._as_array - - return '%s(%r)' % (self.__class__.__name__, v) - - __repr__ = __str__ - - def __len__(self): - cdef slice s = self._ensure_has_slice() - if s is not None: - return slice_len(s) - else: - return len(self._as_array) - - def __iter__(self): - cdef slice s = self._ensure_has_slice() - cdef Py_ssize_t start, stop, step, _ - if s is not None: - start, stop, step, _ = slice_get_indices_ex(s) - return iter(range(start, stop, step)) - else: - return iter(self._as_array) - - @property - def as_slice(self): - cdef slice s = self._ensure_has_slice() - if s is None: - raise TypeError('Not slice-like') - else: - return s - - @property - def indexer(self): - cdef slice s = self._ensure_has_slice() - if s is not None: - return s - else: - return self._as_array - - def isin(self, arr): - from pandas.core.index import Int64Index - return Int64Index(self.as_array, copy=False).isin(arr) - - @property - def as_array(self): - cdef Py_ssize_t start, stop, end, _ - if not self._has_array: - start, stop, step, _ = slice_get_indices_ex(self._as_slice) - self._as_array = np.arange(start, stop, step, - dtype=np.int64) - self._has_array = True - return self._as_array - - @property - def is_slice_like(self): - cdef slice s = self._ensure_has_slice() - return s is not None - - def __getitem__(self, loc): - cdef slice s = self._ensure_has_slice() - if s is not None: - val = slice_getitem(s, loc) - else: - val = self._as_array[loc] - - if not isinstance(val, slice) and val.ndim == 0: - return val - - return BlockPlacement(val) - - def delete(self, loc): - return BlockPlacement(np.delete(self.as_array, loc, axis=0)) - - def append(self, others): - if len(others) == 0: - return self - - return BlockPlacement(np.concatenate([self.as_array] + - [o.as_array for o in others])) - - cdef iadd(self, other): - cdef slice s = self._ensure_has_slice() - cdef Py_ssize_t other_int, start, stop, step, l - - if isinstance(other, int) and s is not None: - other_int = <Py_ssize_t>other - - if other_int == 0: - return self - - start, stop, step, l = slice_get_indices_ex(s) - start += other_int - stop += other_int - - if ((step > 0 and start < 0) or - (step < 0 and stop < step)): - raise ValueError("iadd causes length change") - - if stop < 0: - self._as_slice = slice(start, None, step) - else: - self._as_slice = slice(start, stop, step) - - self._has_array = False - self._as_array = None - else: - newarr = self.as_array + other - if (newarr < 0).any(): - raise ValueError("iadd causes length change") - - self._as_array = newarr - self._has_array = True - self._has_slice = False - self._as_slice = None - - return self - - cdef BlockPlacement copy(self): - cdef slice s = self._ensure_has_slice() - if s is not None: - return BlockPlacement(s) - else: - return BlockPlacement(self._as_array) - - def add(self, other): - return self.copy().iadd(other) - - def sub(self, other): - return self.add(-other) - - cdef slice _ensure_has_slice(self): - if not self._has_slice: - self._as_slice = indexer_as_slice(self._as_array) - self._has_slice = True - return self._as_slice - - include "reduce.pyx" include "inference.pyx" diff --git a/pandas/core/internals.py b/pandas/core/internals.py index bc75a110354c0..698623bd8cd7f 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -11,6 +11,8 @@ import numpy as np +from pandas._libs import internals as libinternals + from pandas.core.base import PandasObject from pandas.core.dtypes.dtypes import ( @@ -67,7 +69,7 @@ from pandas.core.sparse.array import _maybe_to_sparse, SparseArray from pandas._libs import lib, tslib from pandas._libs.tslib import Timedelta -from pandas._libs.lib import BlockPlacement +from pandas._libs.internals import BlockPlacement from pandas._libs.tslibs import conversion from pandas.util._decorators import cache_readonly @@ -1228,7 +1230,7 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): if new_mgr_locs is None: if axis == 0: - slc = lib.indexer_as_slice(indexer) + slc = libinternals.indexer_as_slice(indexer) if slc is not None: new_mgr_locs = self.mgr_locs[slc] else: @@ -5023,7 +5025,7 @@ def _get_blkno_placements(blknos, blk_count, group=True): blknos = _ensure_int64(blknos) # FIXME: blk_count is unused, but it may avoid the use of dicts in cython - for blkno, indexer in lib.get_blkno_indexers(blknos, group): + for blkno, indexer in libinternals.get_blkno_indexers(blknos, group): yield blkno, BlockPlacement(indexer) @@ -5665,8 +5667,8 @@ def _fast_count_smallints(arr): def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill): if isinstance(slice_or_indexer, slice): - return 'slice', slice_or_indexer, lib.slice_len(slice_or_indexer, - length) + return ('slice', slice_or_indexer, + libinternals.slice_len(slice_or_indexer, length)) elif (isinstance(slice_or_indexer, np.ndarray) and slice_or_indexer.dtype == np.bool_): return 'mask', slice_or_indexer, slice_or_indexer.sum() diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 623d2d39607c2..dcbd19954ed60 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -39,8 +39,8 @@ def mgr(): def assert_block_equal(left, right): tm.assert_numpy_array_equal(left.values, right.values) assert left.dtype == right.dtype - assert isinstance(left.mgr_locs, lib.BlockPlacement) - assert isinstance(right.mgr_locs, lib.BlockPlacement) + assert isinstance(left.mgr_locs, BlockPlacement) + assert isinstance(right.mgr_locs, BlockPlacement) tm.assert_numpy_array_equal(left.mgr_locs.as_array, right.mgr_locs.as_array) @@ -222,7 +222,7 @@ def _check(blk): _check(self.bool_block) def test_mgr_locs(self): - assert isinstance(self.fblock.mgr_locs, lib.BlockPlacement) + assert isinstance(self.fblock.mgr_locs, BlockPlacement) tm.assert_numpy_array_equal(self.fblock.mgr_locs.as_array, np.array([0, 2, 4], dtype=np.int64)) @@ -264,14 +264,14 @@ def test_insert(self): def test_delete(self): newb = self.fblock.copy() newb.delete(0) - assert isinstance(newb.mgr_locs, lib.BlockPlacement) + assert isinstance(newb.mgr_locs, BlockPlacement) tm.assert_numpy_array_equal(newb.mgr_locs.as_array, np.array([2, 4], dtype=np.int64)) assert (newb.values[0] == 1).all() newb = self.fblock.copy() newb.delete(1) - assert isinstance(newb.mgr_locs, lib.BlockPlacement) + assert isinstance(newb.mgr_locs, BlockPlacement) tm.assert_numpy_array_equal(newb.mgr_locs.as_array, np.array([0, 4], dtype=np.int64)) assert (newb.values[1] == 2).all() @@ -679,7 +679,7 @@ def test_consolidate_ordering_issues(self, mgr): assert cons.nblocks == 4 cons = mgr.consolidate().get_numeric_data() assert cons.nblocks == 1 - assert isinstance(cons.blocks[0].mgr_locs, lib.BlockPlacement) + assert isinstance(cons.blocks[0].mgr_locs, BlockPlacement) tm.assert_numpy_array_equal(cons.blocks[0].mgr_locs.as_array, np.arange(len(cons.items), dtype=np.int64)) diff --git a/setup.py b/setup.py index 7dbf6c84a0451..16ca0c132eaa9 100755 --- a/setup.py +++ b/setup.py @@ -302,6 +302,7 @@ class CheckSDist(sdist_class): 'pandas/_libs/hashtable.pyx', 'pandas/_libs/tslib.pyx', 'pandas/_libs/index.pyx', + 'pandas/_libs/internals.pyx', 'pandas/_libs/algos.pyx', 'pandas/_libs/join.pyx', 'pandas/_libs/indexing.pyx', @@ -478,6 +479,8 @@ def pxd(name): 'sources': np_datetime_sources}, '_libs.indexing': { 'pyxfile': '_libs/indexing'}, + '_libs.internals': { + 'pyxfile': '_libs/internals'}, '_libs.interval': { 'pyxfile': '_libs/interval', 'pxdfiles': ['_libs/hashtable'],
A bunch of functions from `lib` that are only used in core.internals, have light dependencies. Small optimizations: made `slice_getitem` a `cdef` instead of `def`, use PySlice_Check instead of isinstance.
https://api.github.com/repos/pandas-dev/pandas/pulls/19293
2018-01-18T07:36:42Z
2018-01-19T11:06:15Z
null
2018-01-19T16:56:30Z
cleanup inconsistently used imports
diff --git a/ci/lint.sh b/ci/lint.sh index 35b39e2abb3c6..a96e0961304e7 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -91,6 +91,15 @@ if [ "$LINT" ]; then fi echo "Check for invalid testing DONE" + # Check for imports from pandas.core.common instead + # of `import pandas.core.common as com` + echo "Check for non-standard imports" + grep -R --include="*.py*" -E "from pandas.core.common import " pandas + if [ $? = "0" ]; then + RET=1 + fi + echo "Check for non-standard imports DONE" + echo "Check for use of lists instead of generators in built-in Python functions" # Example: Avoid `any([i for i in some_iterator])` in favor of `any(i for i in some_iterator)` diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 708f903cd73cb..b50e01b0fb55a 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -28,7 +28,6 @@ is_list_like, is_sequence, is_scalar, is_dict_like) -from pandas.core.common import is_null_slice, _maybe_box_datetimelike from pandas.core.algorithms import factorize, take_1d, unique1d from pandas.core.accessor import PandasDelegate @@ -468,7 +467,7 @@ def tolist(self): (for Timestamp/Timedelta/Interval/Period) """ if is_datetimelike(self.categories): - return [_maybe_box_datetimelike(x) for x in self] + return [com._maybe_box_datetimelike(x) for x in self] return np.array(self).tolist() @property @@ -1686,7 +1685,7 @@ def _slice(self, slicer): # only allow 1 dimensional slicing, but can # in a 2-d case be passd (slice(None),....) if isinstance(slicer, tuple) and len(slicer) == 2: - if not is_null_slice(slicer[0]): + if not com.is_null_slice(slicer[0]): raise AssertionError("invalid slicing for a 1-ndim " "categorical") slicer = slicer[1] @@ -1847,7 +1846,7 @@ def __setitem__(self, key, value): # only allow 1 dimensional slicing, but can # in a 2-d case be passd (slice(None),....) if len(key) == 2: - if not is_null_slice(key[0]): + if not com.is_null_slice(key[0]): raise AssertionError("invalid slicing for a 1-ndim " "categorical") key = key[1] diff --git a/pandas/core/base.py b/pandas/core/base.py index 4b3e74eae36b8..54d25a16a10a3 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -24,7 +24,6 @@ from pandas.compat import PYPY from pandas.util._decorators import (Appender, cache_readonly, deprecate_kwarg, Substitution) -from pandas.core.common import AbstractMethodError, _maybe_box_datetimelike from pandas.core.accessor import DirNamesMixin @@ -46,7 +45,7 @@ class StringMixin(object): # Formatting def __unicode__(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def __str__(self): """ @@ -278,10 +277,10 @@ def _gotitem(self, key, ndim, subset=None): subset to act on """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def aggregate(self, func, *args, **kwargs): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) agg = aggregate @@ -815,7 +814,7 @@ def tolist(self): """ if is_datetimelike(self): - return [_maybe_box_datetimelike(x) for x in self._values] + return [com._maybe_box_datetimelike(x) for x in self._values] else: return self._values.tolist() @@ -1238,4 +1237,4 @@ def duplicated(self, keep='first'): # abstracts def _update_inplace(self, result, **kwargs): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) diff --git a/pandas/core/computation/align.py b/pandas/core/computation/align.py index 2e912b0075bfd..22c8b641cf974 100644 --- a/pandas/core/computation/align.py +++ b/pandas/core/computation/align.py @@ -10,7 +10,7 @@ import pandas as pd from pandas import compat from pandas.errors import PerformanceWarning -from pandas.core.common import flatten +import pandas.core.common as com from pandas.core.computation.common import _result_type_many @@ -117,7 +117,7 @@ def _align(terms): """Align a set of terms""" try: # flatten the parse tree (a nested list, really) - terms = list(flatten(terms)) + terms = list(com.flatten(terms)) except TypeError: # can't iterate so it must just be a constant or single variable if isinstance(terms.value, pd.core.generic.NDFrame): diff --git a/pandas/core/computation/expressions.py b/pandas/core/computation/expressions.py index 1dc19d33f3365..781101f5804e6 100644 --- a/pandas/core/computation/expressions.py +++ b/pandas/core/computation/expressions.py @@ -8,7 +8,8 @@ import warnings import numpy as np -from pandas.core.common import _values_from_object + +import pandas.core.common as com from pandas.core.computation.check import _NUMEXPR_INSTALLED from pandas.core.config import get_option @@ -122,8 +123,8 @@ def _evaluate_numexpr(op, op_str, a, b, truediv=True, def _where_standard(cond, a, b): - return np.where(_values_from_object(cond), _values_from_object(a), - _values_from_object(b)) + return np.where(com._values_from_object(cond), com._values_from_object(a), + com._values_from_object(b)) def _where_numexpr(cond, a, b): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f0919871218f5..847779b1747cf 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -62,12 +62,6 @@ from pandas.core.dtypes.missing import isna, notna -from pandas.core.common import (_try_sort, - _default_index, - _values_from_object, - _maybe_box_datetimelike, - _dict_compat, - standardize_mapping) from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, _ensure_index, _ensure_index_from_sequences) @@ -387,9 +381,9 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, if isinstance(data[0], Series): index = _get_names_from_index(data) elif isinstance(data[0], Categorical): - index = _default_index(len(data[0])) + index = com._default_index(len(data[0])) else: - index = _default_index(len(data)) + index = com._default_index(len(data)) mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) @@ -466,7 +460,7 @@ def _init_dict(self, data, index, columns, dtype=None): else: keys = list(data.keys()) if not isinstance(data, OrderedDict): - keys = _try_sort(keys) + keys = com._try_sort(keys) columns = data_names = Index(keys) arrays = [data[k] for k in keys] @@ -493,12 +487,12 @@ def _get_axes(N, K, index=index, columns=columns): # return axes or defaults if index is None: - index = _default_index(N) + index = com._default_index(N) else: index = _ensure_index(index) if columns is None: - columns = _default_index(K) + columns = com._default_index(K) else: columns = _ensure_index(columns) return index, columns @@ -990,7 +984,7 @@ def to_dict(self, orient='dict', into=dict): "columns will be omitted.", UserWarning, stacklevel=2) # GH16122 - into_c = standardize_mapping(into) + into_c = com.standardize_mapping(into) if orient.lower().startswith('d'): return into_c( (k, v.to_dict(into)) for k, v in compat.iteritems(self)) @@ -1000,13 +994,13 @@ def to_dict(self, orient='dict', into=dict): return into_c((('index', self.index.tolist()), ('columns', self.columns.tolist()), ('data', lib.map_infer(self.values.ravel(), - _maybe_box_datetimelike) + com._maybe_box_datetimelike) .reshape(self.values.shape).tolist()))) elif orient.lower().startswith('s'): - return into_c((k, _maybe_box_datetimelike(v)) + return into_c((k, com._maybe_box_datetimelike(v)) for k, v in compat.iteritems(self)) elif orient.lower().startswith('r'): - return [into_c((k, _maybe_box_datetimelike(v)) + return [into_c((k, com._maybe_box_datetimelike(v)) for k, v in zip(self.columns, np.atleast_1d(row))) for row in self.values] elif orient.lower().startswith('i'): @@ -1947,30 +1941,28 @@ def transpose(self, *args, **kwargs): # legacy pickle formats def _unpickle_frame_compat(self, state): # pragma: no cover - from pandas.core.common import _unpickle_array if len(state) == 2: # pragma: no cover series, idx = state columns = sorted(series) else: series, cols, idx = state - columns = _unpickle_array(cols) + columns = com._unpickle_array(cols) - index = _unpickle_array(idx) + index = com._unpickle_array(idx) self._data = self._init_dict(series, index, columns, None) def _unpickle_matrix_compat(self, state): # pragma: no cover - from pandas.core.common import _unpickle_array # old unpickling (vals, idx, cols), object_state = state - index = _unpickle_array(idx) - dm = DataFrame(vals, index=index, columns=_unpickle_array(cols), + index = com._unpickle_array(idx) + dm = DataFrame(vals, index=index, columns=com._unpickle_array(cols), copy=False) if object_state is not None: ovals, _, ocols = object_state objects = DataFrame(ovals, index=index, - columns=_unpickle_array(ocols), copy=False) + columns=com._unpickle_array(ocols), copy=False) dm = dm.join(objects) @@ -2006,7 +1998,7 @@ def _get_value(self, index, col, takeable=False): if takeable: series = self._iget_item_cache(col) - return _maybe_box_datetimelike(series._values[index]) + return com._maybe_box_datetimelike(series._values[index]) series = self._get_item_cache(col) engine = self.index._engine @@ -3371,7 +3363,7 @@ def _maybe_casted_values(index, labels=None): values, mask, np.nan) return values - new_index = _default_index(len(new_obj)) + new_index = com._default_index(len(new_obj)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] @@ -6084,7 +6076,7 @@ def extract_index(data): (lengths[0], len(index))) raise ValueError(msg) else: - index = _default_index(lengths[0]) + index = com._default_index(lengths[0]) return _ensure_index(index) @@ -6155,7 +6147,7 @@ def _to_arrays(data, columns, coerce_float=False, dtype=None): dtype=dtype) elif isinstance(data[0], Categorical): if columns is None: - columns = _default_index(len(data)) + columns = com._default_index(len(data)) return data, columns elif (isinstance(data, (np.ndarray, Series, Index)) and data.dtype.names is not None): @@ -6179,7 +6171,7 @@ def _masked_rec_array_to_mgr(data, index, columns, dtype, copy): if index is None: index = _get_names_from_index(fdata) if index is None: - index = _default_index(len(data)) + index = com._default_index(len(data)) index = _ensure_index(index) if columns is not None: @@ -6239,14 +6231,14 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): for s in data: index = getattr(s, 'index', None) if index is None: - index = _default_index(len(s)) + index = com._default_index(len(s)) if id(index) in indexer_cache: indexer = indexer_cache[id(index)] else: indexer = indexer_cache[id(index)] = index.get_indexer(columns) - values = _values_from_object(s) + values = com._values_from_object(s) aligned_values.append(algorithms.take_1d(values, indexer)) values = np.vstack(aligned_values) @@ -6276,7 +6268,7 @@ def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): def _convert_object_array(content, columns, coerce_float=False, dtype=None): if columns is None: - columns = _default_index(len(content)) + columns = com._default_index(len(content)) else: if len(columns) != len(content): # pragma: no cover # caller's responsibility to check for this... @@ -6298,7 +6290,7 @@ def convert(arr): def _get_names_from_index(data): has_some_name = any(getattr(s, 'name', None) is not None for s in data) if not has_some_name: - return _default_index(len(data)) + return com._default_index(len(data)) index = lrange(len(data)) count = 0 @@ -6333,7 +6325,7 @@ def _homogenize(data, index, dtype=None): oindex = index.astype('O') if isinstance(index, (DatetimeIndex, TimedeltaIndex)): - v = _dict_compat(v) + v = com._dict_compat(v) else: v = dict(v) v = lib.fast_multiget(v, oindex.values, default=np.nan) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7ffef9c8a86d7..6e777281b11e1 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -30,10 +30,6 @@ from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.missing import isna, notna from pandas.core.dtypes.generic import ABCSeries, ABCPanel, ABCDataFrame -from pandas.core.common import (_count_not_none, - _maybe_box_datetimelike, _values_from_object, - AbstractMethodError, SettingWithCopyError, - SettingWithCopyWarning) from pandas.core.base import PandasObject, SelectionMixin from pandas.core.index import (Index, MultiIndex, _ensure_index, @@ -198,7 +194,7 @@ def _constructor(self): """Used when a manipulation result has the same dimensions as the original. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def __unicode__(self): # unicode representation based upon iterating over self @@ -220,7 +216,7 @@ def _constructor_sliced(self): """Used when a manipulation result has one lower dimension(s) as the original, such as DataFrame single columns slicing. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) @property def _constructor_expanddim(self): @@ -1026,7 +1022,7 @@ def _indexed_same(self, other): for a in self._AXIS_ORDERS) def __neg__(self): - values = _values_from_object(self) + values = com._values_from_object(self) if values.dtype == np.bool_: arr = operator.inv(values) else: @@ -1035,7 +1031,7 @@ def __neg__(self): def __invert__(self): try: - arr = operator.inv(_values_from_object(self)) + arr = operator.inv(com._values_from_object(self)) return self.__array_wrap__(arr) except Exception: @@ -1490,7 +1486,7 @@ def __round__(self, decimals=0): # Array Interface def __array__(self, dtype=None): - return _values_from_object(self) + return com._values_from_object(self) def __array_wrap__(self, result, context=None): d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False) @@ -2204,7 +2200,7 @@ def _iget_item_cache(self, item): return lower def _box_item_values(self, key, values): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _maybe_cache_changed(self, item, value): """The object has called back to us saying maybe it has changed. @@ -2397,9 +2393,10 @@ def _check_setitem_copy(self, stacklevel=4, t='setting', force=False): ) if value == 'raise': - raise SettingWithCopyError(t) + raise com.SettingWithCopyError(t) elif value == 'warn': - warnings.warn(t, SettingWithCopyWarning, stacklevel=stacklevel) + warnings.warn(t, com.SettingWithCopyWarning, + stacklevel=stacklevel) def __delitem__(self, key): """ @@ -2696,7 +2693,7 @@ def xs(self, key, axis=0, level=None, drop_level=True): # that means that their are list/ndarrays inside the Series! # so just return them (GH 6394) if not is_list_like(new_values) or self.ndim == 1: - return _maybe_box_datetimelike(new_values) + return com._maybe_box_datetimelike(new_values) result = self._constructor_sliced( new_values, index=self.columns, @@ -3557,7 +3554,7 @@ def filter(self, items=None, like=None, regex=None, axis=None): """ import re - nkw = _count_not_none(items, like, regex) + nkw = com._count_not_none(items, like, regex) if nkw > 1: raise TypeError('Keyword arguments `items`, `like`, or `regex` ' 'are mutually exclusive') @@ -6357,7 +6354,8 @@ def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None, if try_quick: try: - new_other = _values_from_object(self).copy() + new_other = com._values_from_object(self) + new_other = new_other.copy() new_other[icond] = other other = new_other except Exception: @@ -7318,7 +7316,7 @@ def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None, rs = (data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1) if freq is None: - mask = isna(_values_from_object(self)) + mask = isna(com._values_from_object(self)) np.putmask(rs.values, mask, np.nan) return rs @@ -7778,7 +7776,7 @@ def cum_func(self, axis=None, skipna=True, *args, **kwargs): else: axis = self._get_axis_number(axis) - y = _values_from_object(self).copy() + y = com._values_from_object(self).copy() if (skipna and issubclass(y.dtype.type, (np.datetime64, np.timedelta64))): diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 25e44589488ee..64ce78c78dc53 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -39,10 +39,6 @@ from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas.core.dtypes.missing import isna, notna, _maybe_fill -from pandas.core.common import (_values_from_object, AbstractMethodError, - _default_index, _not_none, _get_callable_name, - _asarray_tuplesafe, _pipe) - from pandas.core.base import (PandasObject, SelectionMixin, GroupByError, DataError, SpecificationError) from pandas.core.index import (Index, MultiIndex, @@ -61,6 +57,7 @@ from pandas.io.formats.printing import pprint_thing from pandas.util._validators import validate_kwargs +import pandas.core.common as com import pandas.core.algorithms as algorithms from pandas.core.config import option_context @@ -751,7 +748,7 @@ def __getattr__(self, attr): b 2""") @Appender(_pipe_template) def pipe(self, func, *args, **kwargs): - return _pipe(self, func, *args, **kwargs) + return com._pipe(self, func, *args, **kwargs) plot = property(GroupByPlot) @@ -895,7 +892,7 @@ def _iterate_slices(self): yield self._selection_name, self._selected_obj def transform(self, func, *args, **kwargs): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _cumcount_array(self, ascending=True): """ @@ -1037,7 +1034,7 @@ def _python_agg_general(self, func, *args, **kwargs): return self._wrap_aggregated_output(output) def _wrap_applied_output(self, *args, **kwargs): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _concat_objects(self, keys, values, not_indexed_same=False): from pandas.core.reshape.concat import concat @@ -1045,7 +1042,7 @@ def _concat_objects(self, keys, values, not_indexed_same=False): def reset_identity(values): # reset the identities of the components # of the values to prevent aliasing - for v in _not_none(*values): + for v in com._not_none(*values): ax = v._get_axis(self.axis) ax._reset_identity() return values @@ -1975,7 +1972,7 @@ def apply(self, f, data, axis=0): group_keys = self._get_group_keys() # oh boy - f_name = _get_callable_name(f) + f_name = com._get_callable_name(f) if (f_name not in _plotting_methods and hasattr(splitter, 'fast_apply') and axis == 0): try: @@ -2009,7 +2006,7 @@ def indices(self): return self.groupings[0].indices else: label_list = [ping.labels for ping in self.groupings] - keys = [_values_from_object(ping.group_index) + keys = [com._values_from_object(ping.group_index) for ping in self.groupings] return get_indexer_dict(label_list, keys) @@ -2707,7 +2704,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None, self.grouper = self.obj[self.name] elif isinstance(self.grouper, (list, tuple)): - self.grouper = _asarray_tuplesafe(self.grouper) + self.grouper = com._asarray_tuplesafe(self.grouper) # a passed Categorical elif is_categorical_dtype(self.grouper): @@ -2934,7 +2931,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True, if not any_callable and not all_in_columns_index and \ not any_arraylike and not any_groupers and \ match_axis_length and level is None: - keys = [_asarray_tuplesafe(keys)] + keys = [com._asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: @@ -3229,7 +3226,7 @@ def _aggregate_multiple_funcs(self, arg, _level): columns.append(f) else: # protect against callables without names - columns.append(_get_callable_name(f)) + columns.append(com._get_callable_name(f)) arg = lzip(columns, arg) results = {} @@ -3829,7 +3826,7 @@ def _aggregate_generic(self, func, *args, **kwargs): return self._wrap_generic_output(result, obj) def _wrap_aggregated_output(self, output, names=None): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _aggregate_item_by_item(self, func, *args, **kwargs): # only for axis==0 @@ -3891,7 +3888,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False): # GH12824. def first_not_none(values): try: - return next(_not_none(*values)) + return next(com._not_none(*values)) except StopIteration: return None @@ -4585,7 +4582,7 @@ def groupby_series(obj, col=None): results = concat(results, axis=1) if not self.as_index: - results.index = _default_index(len(results)) + results.index = com._default_index(len(results)) return results boxplot = boxplot_frame_groupby @@ -4675,7 +4672,7 @@ def _aggregate_item_by_item(self, func, *args, **kwargs): raise ValueError("axis value must be greater than 0") def _wrap_aggregated_output(self, output, names=None): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) class NDArrayGroupBy(GroupBy): @@ -4731,7 +4728,7 @@ def _chop(self, sdata, slice_obj): return sdata.iloc[slice_obj] def apply(self, f): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) class ArraySplitter(DataSplitter): diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 6d0a415f5b420..34578d7a717b1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -41,11 +41,9 @@ needs_i8_conversion, is_iterator, is_list_like, is_scalar) -from pandas.core.common import (is_bool_indexer, _values_from_object, - _asarray_tuplesafe, _not_none, - _index_labels_to_array) from pandas.core.base import PandasObject, IndexOpsMixin +import pandas.core.common as com import pandas.core.base as base from pandas.util._decorators import ( Appender, Substitution, cache_readonly, deprecate_kwarg) @@ -292,7 +290,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data): subarr = data.astype('object') else: - subarr = _asarray_tuplesafe(data, dtype=object) + subarr = com._asarray_tuplesafe(data, dtype=object) # _asarray_tuplesafe does not always copy underlying data, # so need to make sure that this happens @@ -361,7 +359,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, return MultiIndex.from_tuples( data, names=name or kwargs.get('names')) # other iterable of some kind - subarr = _asarray_tuplesafe(data, dtype=object) + subarr = com._asarray_tuplesafe(data, dtype=object) return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs) """ @@ -1498,7 +1496,7 @@ def _convert_listlike_indexer(self, keyarr, kind=None): @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): - keyarr = _asarray_tuplesafe(keyarr) + keyarr = com._asarray_tuplesafe(keyarr) return keyarr _index_shared_docs['_convert_index_indexer'] = """ @@ -1736,10 +1734,10 @@ def __getitem__(self, key): # pessimization of basic indexing. return promote(getitem(key)) - if is_bool_indexer(key): + if com.is_bool_indexer(key): key = np.asarray(key) - key = _values_from_object(key) + key = com._values_from_object(key) result = getitem(key) if not is_scalar(result): return promote(result) @@ -2022,8 +2020,8 @@ def equals(self, other): return other.equals(self) try: - return array_equivalent(_values_from_object(self), - _values_from_object(other)) + return array_equivalent(com._values_from_object(self), + com._values_from_object(other)) except Exception: return False @@ -2539,8 +2537,8 @@ def get_value(self, series, key): # invalid type as an indexer pass - s = _values_from_object(series) - k = _values_from_object(key) + s = com._values_from_object(series) + k = com._values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') try: @@ -2573,8 +2571,8 @@ def set_value(self, arr, key, value): Fast lookup of value from 1-dimensional ndarray. Only use this if you know what you're doing """ - self._engine.set_value(_values_from_object(arr), - _values_from_object(key), value) + self._engine.set_value(com._values_from_object(arr), + com._values_from_object(key), value) def _get_level_values(self, level): """ @@ -3193,8 +3191,8 @@ def _join_multi(self, other, how, return_indexers=True): other_is_mi = isinstance(other, MultiIndex) # figure out join names - self_names = _not_none(*self.names) - other_names = _not_none(*other.names) + self_names = com._not_none(*self.names) + other_names = com._not_none(*other.names) overlap = list(set(self_names) & set(other_names)) # need at least 1 in common, but not more than 1 @@ -3766,7 +3764,7 @@ def drop(self, labels, errors='raise'): If none of the labels are found in the selected axis """ arr_dtype = 'object' if self.dtype == 'object' else None - labels = _index_labels_to_array(labels, dtype=arr_dtype) + labels = com._index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer(labels) mask = indexer == -1 if mask.any(): @@ -4001,7 +3999,7 @@ def _validate_for_numeric_binop(self, other, op, opstr): if len(self) != len(other): raise ValueError("cannot evaluate a numeric op with " "unequal lengths") - other = _values_from_object(other) + other = com._values_from_object(other) if other.dtype.kind not in ['f', 'i', 'u']: raise TypeError("cannot evaluate a numeric op " "with a non-numeric dtype") diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 9a6210db1aacb..2d4655d84dca8 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -11,8 +11,6 @@ is_list_like, is_interval_dtype, is_scalar) -from pandas.core.common import (_asarray_tuplesafe, - _values_from_object) from pandas.core.dtypes.missing import array_equivalent, isna from pandas.core.algorithms import take_1d @@ -21,6 +19,7 @@ from pandas.core.config import get_option from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core import accessor +import pandas.core.common as com import pandas.core.base as base import pandas.core.missing as missing import pandas.core.indexes.base as ibase @@ -442,7 +441,7 @@ def get_value(self, series, key): know what you're doing """ try: - k = _values_from_object(key) + k = com._values_from_object(key) k = self._convert_scalar_indexer(k, kind='getitem') indexer = self.get_loc(k) return series.iloc[indexer] @@ -620,7 +619,7 @@ def _convert_list_indexer(self, keyarr, kind=None): @Appender(_index_shared_docs['_convert_arr_indexer']) def _convert_arr_indexer(self, keyarr): - keyarr = _asarray_tuplesafe(keyarr) + keyarr = com._asarray_tuplesafe(keyarr) if self.categories._defer_to_indexing: return keyarr diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 7bb6708e03421..f43c6dc567f69 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -31,9 +31,7 @@ from pandas.core.dtypes.missing import isna from pandas.core import common as com, algorithms from pandas.core.algorithms import checked_add_with_arr -from pandas.core.common import AbstractMethodError from pandas.errors import NullFrequencyError - import pandas.io.formats.printing as printing from pandas._libs import lib, iNaT, NaT from pandas._libs.tslibs.period import Period @@ -245,7 +243,7 @@ def _box_func(self): """ box function to get object from internal representation """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _box_values(self, values): """ @@ -589,7 +587,7 @@ def argmax(self, axis=None, *args, **kwargs): @property def _formatter_func(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _format_attrs(self): """ @@ -647,7 +645,7 @@ def _add_datelike(self, other): type(other).__name__)) def _sub_datelike(self, other): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _sub_period(self, other): return NotImplemented diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 0349e5c0a448f..afc86a51c02b4 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -34,7 +34,6 @@ import pandas.core.dtypes.concat as _concat from pandas.errors import PerformanceWarning -from pandas.core.common import _values_from_object, _maybe_box from pandas.core.algorithms import checked_add_with_arr from pandas.core.indexes.base import Index, _index_shared_docs @@ -126,7 +125,7 @@ def wrapper(self, other): self._assert_tzawareness_compat(other) result = func(np.asarray(other)) - result = _values_from_object(result) + result = com._values_from_object(result) if isinstance(other, Index): o_mask = other.values.view('i8') == libts.iNaT @@ -1488,8 +1487,8 @@ def get_value(self, series, key): return series.take(locs) try: - return _maybe_box(self, Index.get_value(self, series, key), - series, key) + return com._maybe_box(self, Index.get_value(self, series, key), + series, key) except KeyError: try: loc = self._get_string_slice(key) @@ -1508,9 +1507,9 @@ def get_value_maybe_box(self, series, key): key = Timestamp(key, tz=self.tz) elif not isinstance(key, Timestamp): key = Timestamp(key) - values = self._engine.get_value(_values_from_object(series), + values = self._engine.get_value(com._values_from_object(series), key, tz=self.tz) - return _maybe_box(self, values, series, key) + return com._maybe_box(self, values, series, key) def get_loc(self, key, method=None, tolerance=None): """ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 58b1bdb3f55ea..0e087c40cfef3 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -35,9 +35,7 @@ from pandas.core.indexes.timedeltas import timedelta_range from pandas.core.indexes.multi import MultiIndex from pandas.compat.numpy import function as nv -from pandas.core.common import ( - _all_not_none, _any_none, _asarray_tuplesafe, _count_not_none, - is_bool_indexer, _maybe_box_datetimelike, _not_none) +import pandas.core.common as com from pandas.util._decorators import cache_readonly, Appender from pandas.core.config import get_option from pandas.tseries.frequencies import to_offset @@ -237,7 +235,8 @@ def __new__(cls, data, closed=None, data = maybe_convert_platform_interval(data) left, right, infer_closed = intervals_to_interval_bounds(data) - if _all_not_none(closed, infer_closed) and closed != infer_closed: + if (com._all_not_none(closed, infer_closed) and + closed != infer_closed): # GH 18421 msg = ("conflicting values for closed: constructor got " "'{closed}', inferred from data '{infer_closed}'" @@ -602,7 +601,7 @@ def to_tuples(self, na_tuple=True): >>> idx.to_tuples(na_tuple=False) Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object') """ - tuples = _asarray_tuplesafe(zip(self.left, self.right)) + tuples = com._asarray_tuplesafe(zip(self.left, self.right)) if not na_tuple: # GH 18756 tuples = np.where(~self._isnan, tuples, np.nan) @@ -975,7 +974,7 @@ def get_loc(self, key, method=None): return self._engine.get_loc(key) def get_value(self, series, key): - if is_bool_indexer(key): + if com.is_bool_indexer(key): loc = key elif is_list_like(key): loc = self.get_indexer(key) @@ -1347,7 +1346,7 @@ def _is_type_compatible(a, b): return ((is_number(a) and is_number(b)) or (is_ts_compat(a) and is_ts_compat(b)) or (is_td_compat(a) and is_td_compat(b)) or - _any_none(a, b)) + com._any_none(a, b)) def interval_range(start=None, end=None, periods=None, freq=None, @@ -1426,13 +1425,13 @@ def interval_range(start=None, end=None, periods=None, freq=None, -------- IntervalIndex : an Index of intervals that are all closed on the same side. """ - if _count_not_none(start, end, periods) != 2: + if com._count_not_none(start, end, periods) != 2: raise ValueError('Of the three parameters: start, end, and periods, ' 'exactly two must be specified') - start = _maybe_box_datetimelike(start) - end = _maybe_box_datetimelike(end) - endpoint = next(_not_none(start, end)) + start = com._maybe_box_datetimelike(start) + end = com._maybe_box_datetimelike(end) + endpoint = next(com._not_none(start, end)) if not _is_valid_endpoint(start): msg = 'start must be numeric or datetime-like, got {start}' diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index e50e87f8bd571..797774832aaa5 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -22,11 +22,6 @@ is_scalar) from pandas.core.dtypes.missing import isna, array_equivalent from pandas.errors import PerformanceWarning, UnsortedIndexError -from pandas.core.common import (_any_not_none, - _values_from_object, - is_bool_indexer, - is_null_slice, - is_true_slices) import pandas.core.base as base from pandas.util._decorators import Appender, cache_readonly, deprecate_kwarg @@ -539,7 +534,7 @@ def _format_attrs(self): max_seq_items=False)), ('labels', ibase.default_pprint(self._labels, max_seq_items=False))] - if _any_not_none(*self.names): + if com._any_not_none(*self.names): attrs.append(('names', ibase.default_pprint(self.names))) if self.sortorder is not None: attrs.append(('sortorder', ibase.default_pprint(self.sortorder))) @@ -863,8 +858,8 @@ def get_value(self, series, key): from pandas.core.indexing import maybe_droplevels # Label-based - s = _values_from_object(series) - k = _values_from_object(key) + s = com._values_from_object(series) + k = com._values_from_object(key) def _try_mi(k): # TODO: what if a level contains tuples?? @@ -1474,7 +1469,7 @@ def __getitem__(self, key): return tuple(retval) else: - if is_bool_indexer(key): + if com.is_bool_indexer(key): key = np.asarray(key) sortorder = self.sortorder else: @@ -1612,7 +1607,7 @@ def drop(self, labels, level=None, errors='raise'): inds.append(loc) elif isinstance(loc, slice): inds.extend(lrange(loc.start, loc.stop)) - elif is_bool_indexer(loc): + elif com.is_bool_indexer(loc): if self.lexsort_depth == 0: warnings.warn('dropping on a non-lexsorted multi-index' ' without a level parameter may impact ' @@ -2145,7 +2140,7 @@ def _maybe_str_to_time_stamp(key, lev): pass return key - key = _values_from_object(key) + key = com._values_from_object(key) key = tuple(map(_maybe_str_to_time_stamp, key, self.levels)) return self._engine.get_loc(key) @@ -2303,7 +2298,7 @@ def partial_selection(key, indexer=None): key = tuple(self[indexer].tolist()[0]) return (self._engine.get_loc( - _values_from_object(key)), None) + com._values_from_object(key)), None) else: return partial_selection(key) @@ -2463,7 +2458,7 @@ def get_locs(self, seq): """ # must be lexsorted to at least as many levels - true_slices = [i for (i, s) in enumerate(is_true_slices(seq)) if s] + true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s] if true_slices and true_slices[-1] >= self.lexsort_depth: raise UnsortedIndexError('MultiIndex slicing requires the index ' 'to be lexsorted: slicing on levels {0}, ' @@ -2480,7 +2475,7 @@ def _convert_to_indexer(r): m = np.zeros(n, dtype=bool) m[r] = True r = m.nonzero()[0] - elif is_bool_indexer(r): + elif com.is_bool_indexer(r): if len(r) != n: raise ValueError("cannot index with a boolean indexer " "that is not the same length as the " @@ -2498,7 +2493,7 @@ def _update_indexer(idxr, indexer=indexer): for i, k in enumerate(seq): - if is_bool_indexer(k): + if com.is_bool_indexer(k): # a boolean indexer, must be the same length! k = np.asarray(k) indexer = _update_indexer(_convert_to_indexer(k), @@ -2527,7 +2522,7 @@ def _update_indexer(idxr, indexer=indexer): # no matches we are done return Int64Index([])._values - elif is_null_slice(k): + elif com.is_null_slice(k): # empty slice indexer = _update_indexer(None, indexer=indexer) @@ -2594,8 +2589,8 @@ def equals(self, other): return False if not isinstance(other, MultiIndex): - return array_equivalent(self._values, - _values_from_object(_ensure_index(other))) + other_vals = com._values_from_object(_ensure_index(other)) + return array_equivalent(self._values, other_vals) if self.nlevels != other.nlevels: return False diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 6337c2f73d5ec..5e6ebb7588ab9 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -9,10 +9,10 @@ is_bool, is_bool_dtype, is_scalar) -from pandas.core.common import _asarray_tuplesafe, _values_from_object from pandas import compat from pandas.core import algorithms +import pandas.core.common as com from pandas.core.indexes.base import ( Index, InvalidIndexError, _index_shared_docs) from pandas.util._decorators import Appender, cache_readonly @@ -251,9 +251,9 @@ def _convert_arr_indexer(self, keyarr): # Cast the indexer to uint64 if possible so # that the values returned from indexing are # also uint64. - keyarr = _asarray_tuplesafe(keyarr) + keyarr = com._asarray_tuplesafe(keyarr) if is_integer_dtype(keyarr): - return _asarray_tuplesafe(keyarr, dtype=np.uint64) + return com._asarray_tuplesafe(keyarr, dtype=np.uint64) return keyarr @Appender(_index_shared_docs['_convert_index_indexer']) @@ -357,9 +357,9 @@ def get_value(self, series, key): if not is_scalar(key): raise InvalidIndexError - k = _values_from_object(key) + k = com._values_from_object(key) loc = self.get_loc(k) - new_values = _values_from_object(series)[loc] + new_values = com._values_from_object(series)[loc] return new_values diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 10a923c056be2..1a18b86acf57f 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -13,7 +13,8 @@ from pandas import compat from pandas.compat import lrange, range, get_range_parameters from pandas.compat.numpy import function as nv -from pandas.core.common import _all_none + +import pandas.core.common as com from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util._decorators import Appender, cache_readonly import pandas.core.dtypes.concat as _concat @@ -89,7 +90,7 @@ def _ensure_int(value, field): return new_value - if _all_none(start, stop, step): + if com._all_none(start, stop, step): msg = "RangeIndex(...) must be called with integers" raise TypeError(msg) elif start is None: diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 3e671731be348..b88ee88210cfe 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -17,7 +17,6 @@ _ensure_int64) from pandas.core.dtypes.missing import isna from pandas.core.dtypes.generic import ABCSeries -from pandas.core.common import _maybe_box, _values_from_object from pandas.core.indexes.base import Index from pandas.core.indexes.numeric import Int64Index @@ -77,7 +76,7 @@ def wrapper(self, other): other = TimedeltaIndex(other).values result = func(other) - result = _values_from_object(result) + result = com._values_from_object(result) if isinstance(other, Index): o_mask = other.values.view('i8') == iNaT @@ -710,8 +709,8 @@ def get_value(self, series, key): return self.get_value_maybe_box(series, key) try: - return _maybe_box(self, Index.get_value(self, series, key), - series, key) + return com._maybe_box(self, Index.get_value(self, series, key), + series, key) except KeyError: try: loc = self._get_string_slice(key) @@ -727,8 +726,8 @@ def get_value(self, series, key): def get_value_maybe_box(self, series, key): if not isinstance(key, Timedelta): key = Timedelta(key) - values = self._engine.get_value(_values_from_object(series), key) - return _maybe_box(self, values, series, key) + values = self._engine.get_value(com._values_from_object(series), key) + return com._maybe_box(self, values, series, key) def get_loc(self, key, method=None, tolerance=None): """ diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index e2c4043f0508d..3ca150cda83c7 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -20,9 +20,6 @@ from pandas.core.index import Index, MultiIndex import pandas.core.common as com -from pandas.core.common import (is_bool_indexer, _asarray_tuplesafe, - is_null_slice, is_full_slice, - _values_from_object) from pandas._libs.indexing import _NDFrameIndexerBase @@ -314,7 +311,7 @@ def _setitem_with_indexer(self, indexer, value): # (not null slices) then we must take the split path, xref # GH 10360 if (isinstance(ax, MultiIndex) and - not (is_integer(i) or is_null_slice(i))): + not (is_integer(i) or com.is_null_slice(i))): take_split_path = True break @@ -519,8 +516,8 @@ def setter(item, v): # multi-dim object # GH6149 (null slice), GH10408 (full bounds) if (isinstance(pi, tuple) and - all(is_null_slice(idx) or - is_full_slice(idx, len(self.obj)) + all(com.is_null_slice(idx) or + com.is_full_slice(idx, len(self.obj)) for idx in pi)): s = v else: @@ -613,8 +610,10 @@ def can_do_equal_len(): # logic here if (len(indexer) > info_axis and is_integer(indexer[info_axis]) and - all(is_null_slice(idx) for i, idx in enumerate(indexer) - if i != info_axis) and item_labels.is_unique): + all(com.is_null_slice(idx) + for i, idx in enumerate(indexer) + if i != info_axis) and + item_labels.is_unique): self.obj[item_labels[indexer[info_axis]]] = value return @@ -667,7 +666,7 @@ def _align_series(self, indexer, ser, multiindex_indexer=False): ravel = lambda i: i.ravel() if isinstance(i, np.ndarray) else i indexer = tuple(map(ravel, indexer)) - aligners = [not is_null_slice(idx) for idx in indexer] + aligners = [not com.is_null_slice(idx) for idx in indexer] sum_aligners = sum(aligners) single_aligner = sum_aligners == 1 is_frame = self.obj.ndim == 2 @@ -706,7 +705,7 @@ def _align_series(self, indexer, ser, multiindex_indexer=False): # multiple aligners (or null slices) if is_sequence(idx) or isinstance(idx, slice): - if single_aligner and is_null_slice(idx): + if single_aligner and com.is_null_slice(idx): continue new_ix = ax[idx] if not is_list_like_indexer(new_ix): @@ -767,7 +766,7 @@ def _align_frame(self, indexer, df): if isinstance(indexer, tuple): - aligners = [not is_null_slice(idx) for idx in indexer] + aligners = [not com.is_null_slice(idx) for idx in indexer] sum_aligners = sum(aligners) # TODO: single_aligner is not used single_aligner = sum_aligners == 1 # noqa @@ -869,7 +868,7 @@ def _getitem_tuple(self, tup): if i >= self.obj.ndim: raise IndexingError('Too many indexers') - if is_null_slice(key): + if com.is_null_slice(key): continue retval = getattr(retval, self.name)._getitem_axis(key, axis=i) @@ -890,7 +889,7 @@ def _multi_take_opportunity(self, tup): for indexer, ax in zip(tup, self.obj._data.axes): if isinstance(ax, MultiIndex): return False - elif is_bool_indexer(indexer): + elif com.is_bool_indexer(indexer): return False elif not ax.is_unique: return False @@ -915,7 +914,7 @@ def _convert_for_reindex(self, key, axis=None): axis = self.axis or 0 labels = self.obj._get_axis(axis) - if is_bool_indexer(key): + if com.is_bool_indexer(key): key = check_bool_indexer(labels, key) return labels[key] else: @@ -923,7 +922,7 @@ def _convert_for_reindex(self, key, axis=None): keyarr = labels._convert_index_indexer(key) else: # asarray can be unsafe, NumPy strings are weird - keyarr = _asarray_tuplesafe(key) + keyarr = com._asarray_tuplesafe(key) if is_integer_dtype(keyarr): # Cast the indexer to uint64 if possible so @@ -1011,7 +1010,7 @@ def _getitem_lowerdim(self, tup): # Slices should return views, but calling iloc/loc with a null # slice returns a new object. - if is_null_slice(new_key): + if com.is_null_slice(new_key): return section # This is an elided recursive call to iloc/loc/etc' return getattr(section, self.name)[new_key] @@ -1040,7 +1039,7 @@ def _getitem_nested_tuple(self, tup): axis = 0 for i, key in enumerate(tup): - if is_null_slice(key): + if com.is_null_slice(key): axis += 1 continue @@ -1113,7 +1112,7 @@ def _getitem_iterable(self, key, axis=None): labels = self.obj._get_axis(axis) - if is_bool_indexer(key): + if com.is_bool_indexer(key): key = check_bool_indexer(labels, key) inds, = key.nonzero() return self.obj._take(inds, axis=axis, convert=False) @@ -1235,7 +1234,7 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False): elif is_list_like_indexer(obj): - if is_bool_indexer(obj): + if com.is_bool_indexer(obj): obj = check_bool_indexer(labels, obj) inds, = obj.nonzero() return inds @@ -1265,7 +1264,7 @@ def _convert_to_indexer(self, obj, axis=None, is_setter=False): raise KeyError('{mask} not in index' .format(mask=objarr[mask])) - return _values_from_object(indexer) + return com._values_from_object(indexer) else: try: @@ -1336,7 +1335,7 @@ def _has_valid_type(self, key, axis): if isinstance(key, slice): return True - elif is_bool_indexer(key): + elif com.is_bool_indexer(key): return True elif is_list_like_indexer(key): @@ -1448,7 +1447,7 @@ def _has_valid_type(self, key, axis): if isinstance(key, slice): return True - elif is_bool_indexer(key): + elif com.is_bool_indexer(key): return True elif is_list_like_indexer(key): @@ -1576,7 +1575,7 @@ def _getitem_axis(self, key, axis=None): if isinstance(key, slice): self._has_valid_type(key, axis) return self._get_slice_axis(key, axis=axis) - elif is_bool_indexer(key): + elif com.is_bool_indexer(key): return self._getbool_axis(key, axis=axis) elif is_list_like_indexer(key): @@ -1653,7 +1652,7 @@ class _iLocIndexer(_LocationIndexer): _exception = IndexError def _has_valid_type(self, key, axis): - if is_bool_indexer(key): + if com.is_bool_indexer(key): if hasattr(key, 'index') and isinstance(key.index, Index): if key.index.inferred_type == 'integer': raise NotImplementedError("iLocation based boolean " @@ -1743,7 +1742,7 @@ def _getitem_tuple(self, tup): if i >= self.obj.ndim: raise IndexingError('Too many indexers') - if is_null_slice(key): + if com.is_null_slice(key): axis += 1 continue @@ -1807,7 +1806,7 @@ def _getitem_axis(self, key, axis=None): except TypeError: # pragma: no cover pass - if is_bool_indexer(key): + if com.is_bool_indexer(key): self._has_valid_type(key, axis) return self._getbool_axis(key, axis=axis) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index d95062c54b4c6..516b58a26510c 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -56,7 +56,7 @@ import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex -from pandas.core.common import is_null_slice, _any_not_none +import pandas.core.common as com import pandas.core.algorithms as algos from pandas.core.index import Index, MultiIndex, _ensure_index @@ -591,7 +591,7 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, categories = kwargs.get('categories', None) ordered = kwargs.get('ordered', None) - if _any_not_none(categories, ordered): + if com._any_not_none(categories, ordered): dtype = CategoricalDtype(categories, ordered) if is_categorical_dtype(self.values): @@ -1733,7 +1733,7 @@ def iget(self, col): if self.ndim == 2 and isinstance(col, tuple): col, loc = col - if not is_null_slice(col) and col != 0: + if not com.is_null_slice(col) and col != 0: raise IndexError("{0} only contains one item".format(self)) return self.values[loc] else: @@ -2645,7 +2645,7 @@ def _slice(self, slicer): """ return a slice of my values """ if isinstance(slicer, tuple): col, loc = slicer - if not is_null_slice(col) and col != 0: + if not com.is_null_slice(col) and col != 0: raise IndexError("{0} only contains one item".format(self)) return self.values[loc] return self.values[slicer] diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index d1a355021f388..63989304bb5f9 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -20,7 +20,7 @@ from pandas.core.dtypes.cast import _int64_max, maybe_upcast_putmask from pandas.core.dtypes.missing import isna, notna, na_value_for_dtype from pandas.core.config import get_option -from pandas.core.common import _values_from_object +import pandas.core.common as com _BOTTLENECK_INSTALLED = False _MIN_BOTTLENECK_VERSION = '1.0.0' @@ -205,7 +205,7 @@ def _get_values(values, skipna, fill_value=None, fill_value_typ=None, if necessary copy and mask using the specified fill_value copy = True will force the copy """ - values = _values_from_object(values) + values = com._values_from_object(values) if isfinite: mask = _isfinite(values) else: @@ -376,7 +376,7 @@ def get_median(x): mask = notna(x) if not skipna and not mask.all(): return np.nan - return algos.median(_values_from_object(x[mask])) + return algos.median(com._values_from_object(x[mask])) if not is_float_dtype(values): values = values.astype('f8') @@ -437,7 +437,7 @@ def nanstd(values, axis=None, skipna=True, ddof=1): @bottleneck_switch(ddof=1) def nanvar(values, axis=None, skipna=True, ddof=1): - values = _values_from_object(values) + values = com._values_from_object(values) dtype = values.dtype mask = isna(values) if is_any_int_dtype(values): @@ -546,7 +546,7 @@ def nanskew(values, axis=None, skipna=True): """ - values = _values_from_object(values) + values = com._values_from_object(values) mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') @@ -604,7 +604,7 @@ def nankurt(values, axis=None, skipna=True): central moment. """ - values = _values_from_object(values) + values = com._values_from_object(values) mask = isna(values) if not is_float_dtype(values.dtype): values = values.astype('f8') diff --git a/pandas/core/ops.py b/pandas/core/ops.py index fc04d9d291bf9..343b62940173e 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -18,9 +18,9 @@ from pandas.compat import bind_method import pandas.core.missing as missing +import pandas.core.common as com from pandas.errors import NullFrequencyError -from pandas.core.common import _values_from_object, _maybe_match_name from pandas.core.dtypes.missing import notna, isna from pandas.core.dtypes.common import ( needs_i8_conversion, @@ -352,7 +352,7 @@ def na_op(x, y): dtype = find_common_type([x.dtype, y.dtype]) result = np.empty(x.size, dtype=dtype) mask = notna(x) & notna(y) - result[mask] = op(x[mask], _values_from_object(y[mask])) + result[mask] = op(x[mask], com._values_from_object(y[mask])) elif isinstance(x, np.ndarray): result = np.empty(len(x), dtype=x.dtype) mask = notna(x) @@ -453,7 +453,7 @@ def dispatch_to_index_op(op, left, right, index_class): def _get_series_op_result_name(left, right): # `left` is always a pd.Series if isinstance(right, (ABCSeries, pd.Index)): - name = _maybe_match_name(left, right) + name = com._maybe_match_name(left, right) else: name = left.name return name @@ -516,7 +516,7 @@ def na_op(x, y): if is_scalar(y): mask = isna(x) - y = libindex.convert_scalar(x, _values_from_object(y)) + y = libindex.convert_scalar(x, com._values_from_object(y)) else: mask = isna(x) | isna(y) y = y.view('i8') @@ -541,7 +541,7 @@ def wrapper(self, other, axis=None): self._get_axis_number(axis) if isinstance(other, ABCSeries): - name = _maybe_match_name(self, other) + name = com._maybe_match_name(self, other) if not self._indexed_same(other): msg = 'Can only compare identically-labeled Series objects' raise ValueError(msg) @@ -593,7 +593,7 @@ def wrapper(self, other, axis=None): .format(typ=type(other))) # always return a full value series here - res = _values_from_object(res) + res = com._values_from_object(res) res = pd.Series(res, index=self.index, name=self.name, dtype='bool') return res @@ -645,7 +645,7 @@ def wrapper(self, other): self, other = _align_method_SERIES(self, other, align_asobject=True) if isinstance(other, ABCSeries): - name = _maybe_match_name(self, other) + name = com._maybe_match_name(self, other) is_other_int_dtype = is_integer_dtype(other.dtype) other = fill_int(other) if is_other_int_dtype else fill_bool(other) diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 1df69576e6ff2..ae86074ce2d05 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -17,11 +17,10 @@ import pandas.core.ops as ops import pandas.core.missing as missing +import pandas.core.common as com from pandas import compat from pandas.compat import (map, zip, range, u, OrderedDict) from pandas.compat.numpy import function as nv -from pandas.core.common import (_try_sort, _default_index, _all_not_none, - _any_not_none, _apply_if_callable) from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, _ensure_index, @@ -31,7 +30,6 @@ from pandas.core.internals import (BlockManager, create_block_manager_from_arrays, create_block_manager_from_blocks) -from pandas.core.ops import _op_descriptions from pandas.core.series import Series from pandas.core.reshape.util import cartesian_product from pandas.util._decorators import Appender @@ -174,7 +172,7 @@ def _init_data(self, data, copy, dtype, **kwargs): axes = None if isinstance(data, BlockManager): - if _any_not_none(*passed_axes): + if com._any_not_none(*passed_axes): axes = [x if x is not None else y for x, y in zip(passed_axes, data.axes)] mgr = data @@ -186,7 +184,7 @@ def _init_data(self, data, copy, dtype, **kwargs): mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy) copy = False dtype = None - elif is_scalar(data) and _all_not_none(*passed_axes): + elif is_scalar(data) and com._all_not_none(*passed_axes): values = cast_scalar_to_array([len(x) for x in passed_axes], data, dtype=dtype) mgr = self._init_matrix(values, passed_axes, dtype=values.dtype, @@ -209,7 +207,7 @@ def _init_dict(self, data, axes, dtype=None): else: ks = list(data.keys()) if not isinstance(data, OrderedDict): - ks = _try_sort(ks) + ks = com._try_sort(ks) haxis = Index(ks) for k, v in compat.iteritems(data): @@ -287,7 +285,7 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None): return cls(**d) def __getitem__(self, key): - key = _apply_if_callable(key, self) + key = com._apply_if_callable(key, self) if isinstance(self._info_axis, MultiIndex): return self._getitem_multilevel(key) @@ -325,7 +323,7 @@ def _init_matrix(self, data, axes, dtype=None, copy=False): fixed_axes = [] for i, ax in enumerate(axes): if ax is None: - ax = _default_index(shape[i]) + ax = com._default_index(shape[i]) else: ax = _ensure_index(ax) fixed_axes.append(ax) @@ -601,7 +599,7 @@ def _box_item_values(self, key, values): return self._constructor_sliced(values, **d) def __setitem__(self, key, value): - key = _apply_if_callable(key, self) + key = com._apply_if_callable(key, self) shape = tuple(self.shape) if isinstance(value, self._constructor_sliced): value = value.reindex( @@ -1545,9 +1543,9 @@ def na_op(x, y): result = missing.fill_zeros(result, x, y, name, fill_zeros) return result - if name in _op_descriptions: + if name in ops._op_descriptions: op_name = name.replace('__', '') - op_desc = _op_descriptions[op_name] + op_desc = ops._op_descriptions[op_name] if op_desc['reversed']: equiv = 'other ' + op_desc['op'] + ' panel' else: diff --git a/pandas/core/resample.py b/pandas/core/resample.py index 5447ce7470b9d..c215d9d5cffcc 100644 --- a/pandas/core/resample.py +++ b/pandas/core/resample.py @@ -5,7 +5,7 @@ from textwrap import dedent import pandas as pd -from pandas.core.base import AbstractMethodError, GroupByMixin +from pandas.core.base import GroupByMixin from pandas.core.groupby import (BinGrouper, Grouper, _GroupBy, GroupBy, SeriesGroupBy, groupby, PanelGroupBy, @@ -233,7 +233,7 @@ def _convert_obj(self, obj): return obj def _get_binner_for_time(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _set_binner(self): """ @@ -372,10 +372,10 @@ def transform(self, arg, *args, **kwargs): arg, *args, **kwargs) def _downsample(self, f): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _upsample(self, f, limit=None, fill_value=None): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _gotitem(self, key, ndim, subset=None): """ diff --git a/pandas/core/series.py b/pandas/core/series.py index be40f65186d2d..470dd23f26316 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -39,15 +39,6 @@ construct_1d_arraylike_from_scalar) from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike -from pandas.core.common import (is_bool_indexer, - _default_index, - _asarray_tuplesafe, - _values_from_object, - _maybe_match_name, - SettingWithCopyError, - _maybe_box_datetimelike, - standardize_mapping, - _any_none) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, Float64Index, _ensure_index) from pandas.core.indexing import check_bool_indexer, maybe_convert_indices @@ -230,7 +221,7 @@ def __init__(self, data=None, index=None, dtype=None, name=None, if index is None: if not is_list_like(data): data = [data] - index = _default_index(len(data)) + index = com._default_index(len(data)) # create/copy the manager if isinstance(data, SingleBlockManager): @@ -688,7 +679,7 @@ def __getitem__(self, key): pass elif key is Ellipsis: return self - elif is_bool_indexer(key): + elif com.is_bool_indexer(key): pass else: @@ -762,7 +753,7 @@ def _get_with(self, key): def _get_values_tuple(self, key): # mpl hackaround - if _any_none(*key): + if com._any_none(*key): return self._get_values(key) if not isinstance(self.index, MultiIndex): @@ -787,7 +778,7 @@ def setitem(key, value): try: self._set_with_engine(key, value) return - except (SettingWithCopyError): + except com.SettingWithCopyError: raise except (KeyError, ValueError): values = self._values @@ -887,7 +878,7 @@ def _set_labels(self, key, value): if isinstance(key, Index): key = key.values else: - key = _asarray_tuplesafe(key) + key = com._asarray_tuplesafe(key) indexer = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): @@ -939,7 +930,7 @@ def get_value(self, label, takeable=False): def _get_value(self, label, takeable=False): if takeable is True: - return _maybe_box_datetimelike(self._values[label]) + return com._maybe_box_datetimelike(self._values[label]) return self.index.get_value(self._values, label) _get_value.__doc__ = get_value.__doc__ @@ -1039,7 +1030,7 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): """ inplace = validate_bool_kwarg(inplace, 'inplace') if drop: - new_index = _default_index(len(self)) + new_index = com._default_index(len(self)) if level is not None and isinstance(self.index, MultiIndex): if not isinstance(level, (tuple, list)): level = [level] @@ -1182,7 +1173,7 @@ def to_dict(self, into=dict): defaultdict(<type 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) """ # GH16122 - into_c = standardize_mapping(into) + into_c = com.standardize_mapping(into) return into_c(compat.iteritems(self)) def to_frame(self, name=None): @@ -1260,7 +1251,7 @@ def count(self, level=None): from pandas.core.index import _get_na_value if level is None: - return notna(_values_from_object(self)).sum() + return notna(com._values_from_object(self)).sum() if isinstance(level, compat.string_types): level = self.index._get_level_number(level) @@ -1342,7 +1333,7 @@ def idxmin(self, axis=None, skipna=True, *args, **kwargs): numpy.ndarray.argmin """ skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) - i = nanops.nanargmin(_values_from_object(self), skipna=skipna) + i = nanops.nanargmin(com._values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i] @@ -1378,7 +1369,7 @@ def idxmax(self, axis=None, skipna=True, *args, **kwargs): numpy.ndarray.argmax """ skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) - i = nanops.nanargmax(_values_from_object(self), skipna=skipna) + i = nanops.nanargmax(com._values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i] @@ -1419,7 +1410,7 @@ def round(self, decimals=0, *args, **kwargs): """ nv.validate_round(args, kwargs) - result = _values_from_object(self).round(decimals) + result = com._values_from_object(self).round(decimals) result = self._constructor(result, index=self.index).__finalize__(self) return result @@ -1536,7 +1527,7 @@ def diff(self, periods=1): ------- diffed : Series """ - result = algorithms.diff(_values_from_object(self), periods) + result = algorithms.diff(com._values_from_object(self), periods) return self._constructor(result, index=self.index).__finalize__(self) def autocorr(self, lag=1): @@ -1737,7 +1728,7 @@ def _binop(self, other, func, level=None, fill_value=None): with np.errstate(all='ignore'): result = func(this_vals, other_vals) - name = _maybe_match_name(self, other) + name = com._maybe_match_name(self, other) result = self._constructor(result, index=new_index, name=name) result = result.__finalize__(self) if name is None: @@ -1778,7 +1769,7 @@ def combine(self, other, func, fill_value=np.nan): """ if isinstance(other, Series): new_index = self.index.union(other.index) - new_name = _maybe_match_name(self, other) + new_name = com._maybe_match_name(self, other) new_values = np.empty(len(new_index), dtype=self.dtype) for i, idx in enumerate(new_index): lv = self.get(idx, fill_value) @@ -1823,7 +1814,7 @@ def combine_first(self, other): this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) # TODO: do we need name? - name = _maybe_match_name(self, other) # noqa + name = com._maybe_match_name(self, other) # noqa rs_vals = com._where_compat(isna(this), other._values, this._values) return self._constructor(rs_vals, index=new_index).__finalize__(self) @@ -1911,7 +1902,7 @@ def _try_kind_sort(arr): bad = isna(arr) good = ~bad - idx = _default_index(len(self)) + idx = com._default_index(len(self)) argsorted = _try_kind_sort(arr[good]) @@ -2784,7 +2775,7 @@ def isin(self, values): dtype: bool """ - result = algorithms.isin(_values_from_object(self), values) + result = algorithms.isin(com._values_from_object(self), values) return self._constructor(result, index=self.index).__finalize__(self) def between(self, left, right, inclusive=True): @@ -3253,7 +3244,7 @@ def _try_cast(arr, take_fast_path): if isinstance(data, np.ndarray): raise Exception('Data must be 1-dimensional') else: - subarr = _asarray_tuplesafe(data, dtype=dtype) + subarr = com._asarray_tuplesafe(data, dtype=dtype) # This is to prevent mixed-type Series getting all casted to # NumPy string type, e.g. NaN --> '-1#IND'. diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index 49a0b8d86ad31..c7f5b0ba67c19 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -14,12 +14,10 @@ from pandas.core.dtypes.cast import maybe_upcast, find_common_type from pandas.core.dtypes.common import _ensure_platform_int, is_scipy_sparse -from pandas.core.common import _try_sort from pandas.compat.numpy import function as nv from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.series import Series -from pandas.core.frame import (DataFrame, extract_index, _prep_ndarray, - _default_index) +from pandas.core.frame import DataFrame, extract_index, _prep_ndarray import pandas.core.algorithms as algos from pandas.core.internals import (BlockManager, create_block_manager_from_arrays) @@ -28,7 +26,7 @@ from pandas._libs.sparse import BlockIndex, get_blocks from pandas.util._decorators import Appender import pandas.core.ops as ops - +import pandas.core.common as com _shared_doc_kwargs = dict(klass='SparseDataFrame') @@ -133,7 +131,7 @@ def _init_dict(self, data, index, columns, dtype=None): columns = _ensure_index(columns) data = {k: v for k, v in compat.iteritems(data) if k in columns} else: - columns = Index(_try_sort(list(data.keys()))) + columns = Index(com._try_sort(list(data.keys()))) if index is None: index = extract_index(list(data.values())) @@ -208,9 +206,9 @@ def _init_spmatrix(self, data, index, columns, dtype=None, def _prep_index(self, data, index, columns): N, K = data.shape if index is None: - index = _default_index(N) + index = com._default_index(N) if columns is None: - columns = _default_index(K) + columns = com._default_index(K) if len(columns) != K: raise ValueError('Column length mismatch: {columns} vs. {K}' diff --git a/pandas/core/sparse/series.py b/pandas/core/sparse/series.py index b5d2c0b607444..4b649927f8f72 100644 --- a/pandas/core/sparse/series.py +++ b/pandas/core/sparse/series.py @@ -10,7 +10,6 @@ from pandas.core.dtypes.missing import isna, notna from pandas.core.dtypes.common import is_scalar -from pandas.core.common import _values_from_object, _maybe_match_name from pandas.compat.numpy import function as nv from pandas.core.index import Index, _ensure_index, InvalidIndexError @@ -80,7 +79,7 @@ def wrapper(self, other): def _sparse_series_op(left, right, op, name): left, right = left.align(right, join='outer', copy=False) new_index = left.index - new_name = _maybe_match_name(left, right) + new_name = com._maybe_match_name(left, right) result = _sparse_array_op(left.values, right.values, op, name, series=True) @@ -423,7 +422,7 @@ def __getitem__(self, key): # Could not hash item, must be array-like? pass - key = _values_from_object(key) + key = com._values_from_object(key) if self.index.nlevels > 1 and isinstance(key, tuple): # to handle MultiIndex labels key = self.index.get_loc(key) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 278b220753196..5c31b9a5668ff 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -12,8 +12,8 @@ is_scalar, is_integer, is_re) -from pandas.core.common import _values_from_object +import pandas.core.common as com from pandas.core.algorithms import take_1d import pandas.compat as compat from pandas.core.base import NoNewAttributesMixin @@ -37,7 +37,7 @@ def _get_array_list(arr, others): from pandas.core.series import Series - if len(others) and isinstance(_values_from_object(others)[0], + if len(others) and isinstance(com._values_from_object(others)[0], (list, np.ndarray, Series)): arrays = [arr] + list(others) else: @@ -461,7 +461,7 @@ def rep(x, r): return compat.text_type.__mul__(x, r) repeats = np.asarray(repeats, dtype=object) - result = lib.vec_binop(_values_from_object(arr), repeats, rep) + result = lib.vec_binop(com._values_from_object(arr), repeats, rep) return result @@ -1235,7 +1235,6 @@ def str_translate(arr, table, deletechars=None): if deletechars is None: f = lambda x: x.translate(table) else: - from pandas import compat if compat.PY3: raise ValueError("deletechars is not a valid argument for " "str.translate in python 3. You should simply " diff --git a/pandas/core/window.py b/pandas/core/window.py index 5d2fa16876c11..4d6a1de60f59b 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -32,7 +32,7 @@ from pandas.core.base import (PandasObject, SelectionMixin, GroupByMixin) -from pandas.core.common import _asarray_tuplesafe, _count_not_none +import pandas.core.common as com import pandas._libs.window as _window from pandas import compat @@ -508,7 +508,7 @@ def _prep_window(self, **kwargs): window = self._get_window() if isinstance(window, (list, tuple, np.ndarray)): - return _asarray_tuplesafe(window).astype(float) + return com._asarray_tuplesafe(window).astype(float) elif is_integer(window): import scipy.signal as sig @@ -1908,33 +1908,33 @@ def dataframe_from_int_dict(data, frame_template): return _flex_binary_moment(arg2, arg1, f) -def _get_center_of_mass(com, span, halflife, alpha): - valid_count = _count_not_none(com, span, halflife, alpha) +def _get_center_of_mass(comass, span, halflife, alpha): + valid_count = com._count_not_none(comass, span, halflife, alpha) if valid_count > 1: - raise ValueError("com, span, halflife, and alpha " + raise ValueError("comass, span, halflife, and alpha " "are mutually exclusive") # Convert to center of mass; domain checks ensure 0 < alpha <= 1 - if com is not None: - if com < 0: - raise ValueError("com must satisfy: com >= 0") + if comass is not None: + if comass < 0: + raise ValueError("comass must satisfy: comass >= 0") elif span is not None: if span < 1: raise ValueError("span must satisfy: span >= 1") - com = (span - 1) / 2. + comass = (span - 1) / 2. elif halflife is not None: if halflife <= 0: raise ValueError("halflife must satisfy: halflife > 0") decay = 1 - np.exp(np.log(0.5) / halflife) - com = 1 / decay - 1 + comass = 1 / decay - 1 elif alpha is not None: if alpha <= 0 or alpha > 1: raise ValueError("alpha must satisfy: 0 < alpha <= 1") - com = (1.0 - alpha) / alpha + comass = (1.0 - alpha) / alpha else: - raise ValueError("Must pass one of com, span, halflife, or alpha") + raise ValueError("Must pass one of comass, span, halflife, or alpha") - return float(com) + return float(comass) def _offset(window, center): diff --git a/pandas/io/common.py b/pandas/io/common.py index c2d1da5a1035d..4ba969f0abac4 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -9,7 +9,7 @@ from pandas.compat import StringIO, BytesIO, string_types, text_type from pandas import compat from pandas.io.formats.printing import pprint_thing -from pandas.core.common import AbstractMethodError +import pandas.core.common as com from pandas.core.dtypes.common import is_number, is_file_like # compat @@ -66,7 +66,7 @@ def __iter__(self): return self def __next__(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) if not compat.PY3: diff --git a/pandas/io/formats/excel.py b/pandas/io/formats/excel.py index aff3e35861434..2fc648d2952c4 100644 --- a/pandas/io/formats/excel.py +++ b/pandas/io/formats/excel.py @@ -10,7 +10,7 @@ from pandas.compat import reduce from pandas.io.formats.css import CSSResolver, CSSWarning from pandas.io.formats.printing import pprint_thing -from pandas.core.common import _any_not_none +import pandas.core.common as com from pandas.core.dtypes.common import is_float, is_scalar from pandas.core.dtypes import missing from pandas import Index, MultiIndex, PeriodIndex @@ -549,7 +549,7 @@ def _format_hierarchical_rows(self): self.rowcounter += 1 # if index labels are not empty go ahead and dump - if _any_not_none(*index_labels) and self.header is not False: + if com._any_not_none(*index_labels) and self.header is not False: for cidx, name in enumerate(index_labels): yield ExcelCell(self.rowcounter - 1, cidx, name, diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index 886a887568d69..2293032ebb8a1 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -27,7 +27,7 @@ is_list_like) from pandas.core.dtypes.generic import ABCSparseArray from pandas.core.base import PandasObject -from pandas.core.common import _any_not_none, sentinel_factory +import pandas.core.common as com from pandas.core.index import Index, MultiIndex, _ensure_index from pandas import compat from pandas.compat import (StringIO, lzip, range, map, zip, u, @@ -1277,7 +1277,7 @@ def _column_header(): if self.fmt.sparsify: # GH3547 - sentinel = sentinel_factory() + sentinel = com.sentinel_factory() else: sentinel = None levels = self.columns.format(sparsify=sentinel, adjoin=False, @@ -1446,7 +1446,7 @@ def _write_hierarchical_rows(self, fmt_values, indent): if self.fmt.sparsify: # GH3547 - sentinel = sentinel_factory() + sentinel = com.sentinel_factory() levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False) @@ -2372,7 +2372,7 @@ def single_row_table(row): # pragma: no cover def _has_names(index): if isinstance(index, MultiIndex): - return _any_not_none(*index.names) + return com._any_not_none(*index.names) else: return index.name is not None diff --git a/pandas/io/formats/style.py b/pandas/io/formats/style.py index 2c3d92cea0ad8..58796aa30f0bf 100644 --- a/pandas/io/formats/style.py +++ b/pandas/io/formats/style.py @@ -27,7 +27,7 @@ from pandas.compat import range from pandas.core.config import get_option from pandas.core.generic import _shared_docs -from pandas.core.common import _any_not_none, sentinel_factory +import pandas.core.common as com from pandas.core.indexing import _maybe_numeric_slice, _non_reducing_slice from pandas.util._decorators import Appender try: @@ -257,7 +257,8 @@ def format_attr(pair): row_es.append(es) head.append(row_es) - if (self.data.index.names and _any_not_none(*self.data.index.names) and + if (self.data.index.names and + com._any_not_none(*self.data.index.names) and not hidden_index): index_header_row = [] @@ -1207,7 +1208,7 @@ def _get_level_lengths(index, hidden_elements=None): Result is a dictionary of (level, inital_position): span """ - sentinel = sentinel_factory() + sentinel = com.sentinel_factory() levels = index.format(sparsify=sentinel, adjoin=False, names=False) if hidden_elements is None: diff --git a/pandas/io/html.py b/pandas/io/html.py index e7794864ccb3e..be4854bc19cc6 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -20,7 +20,7 @@ from pandas.compat import (lrange, lmap, u, string_types, iteritems, raise_with_traceback, binary_type) from pandas import Series -from pandas.core.common import AbstractMethodError +import pandas.core.common as com from pandas.io.formats.printing import pprint_thing _IMPORTS = False @@ -234,7 +234,7 @@ def _text_getter(self, obj): text : str or unicode The text from an individual DOM node. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _parse_td(self, obj): """Return the td elements from a row element. @@ -248,7 +248,7 @@ def _parse_td(self, obj): columns : list of node-like These are the elements of each row, i.e., the columns. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _parse_tables(self, doc, match, attrs): """Return all tables from the parsed DOM. @@ -275,7 +275,7 @@ def _parse_tables(self, doc, match, attrs): tables : list of node-like A list of <table> elements to be parsed into raw data. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _parse_tr(self, table): """Return the list of row elements from the parsed table element. @@ -290,7 +290,7 @@ def _parse_tr(self, table): rows : list of node-like A list row elements of a table, usually <tr> or <th> elements. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _parse_thead(self, table): """Return the header of a table. @@ -305,7 +305,7 @@ def _parse_thead(self, table): thead : node-like A <thead>...</thead> element. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _parse_tbody(self, table): """Return the body of the table. @@ -320,7 +320,7 @@ def _parse_tbody(self, table): tbody : node-like A <tbody>...</tbody> element. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _parse_tfoot(self, table): """Return the footer of the table if any. @@ -335,7 +335,7 @@ def _parse_tfoot(self, table): tfoot : node-like A <tfoot>...</tfoot> element. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _build_doc(self): """Return a tree-like object that can be used to iterate over the DOM. @@ -344,7 +344,7 @@ def _build_doc(self): ------- obj : tree-like """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _build_table(self, table): header = self._parse_raw_thead(table) diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 6d35fc5769331..e3a1321336fb3 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -12,7 +12,7 @@ _infer_compression, _stringify_path, BaseIterator) from pandas.io.parsers import _validate_integer -from pandas.core.common import AbstractMethodError +import pandas.core.common as com from pandas.core.reshape.concat import concat from pandas.io.formats.printing import pprint_thing from .normalize import _convert_to_line_delimits @@ -93,7 +93,7 @@ def __init__(self, obj, orient, date_format, double_precision, self._format_axes() def _format_axes(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def write(self): return self._write(self.obj, self.orient, self.double_precision, @@ -648,7 +648,7 @@ def _convert_axes(self): setattr(self.obj, axis, new_axis) def _try_convert_types(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _try_convert_data(self, name, data, use_dtypes=True, convert_dates=True): @@ -761,7 +761,7 @@ def _try_convert_to_date(self, data): return data, False def _try_convert_dates(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) class SeriesParser(Parser): diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py index 89b7a1de8acfc..01f7db7d68664 100644 --- a/pandas/io/json/table_schema.py +++ b/pandas/io/json/table_schema.py @@ -8,7 +8,7 @@ import pandas._libs.json as json from pandas import DataFrame from pandas.api.types import CategoricalDtype -from pandas.core.common import _all_not_none +import pandas.core.common as com from pandas.core.dtypes.common import ( is_integer_dtype, is_timedelta64_dtype, is_numeric_dtype, is_bool_dtype, is_datetime64_dtype, is_datetime64tz_dtype, @@ -69,7 +69,7 @@ def as_json_table_type(x): def set_default_names(data): """Sets index names to 'index' for regular, or 'level_x' for Multi""" - if _all_not_none(*data.index.names): + if com._all_not_none(*data.index.names): nms = data.index.names if len(nms) == 1 and data.index.name == 'index': warnings.warn("Index name of 'index' is not round-trippable") diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 1d3fd8552eeb7..4508d5c1e1781 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -4,7 +4,7 @@ from distutils.version import LooseVersion from pandas import DataFrame, RangeIndex, Int64Index, get_option from pandas.compat import string_types -from pandas.core.common import AbstractMethodError +import pandas.core.common as com from pandas.io.common import get_filepath_or_buffer, is_s3_url @@ -64,10 +64,10 @@ def validate_dataframe(df): raise ValueError("Index level names must be strings") def write(self, df, path, compression, **kwargs): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def read(self, path, columns=None, **kwargs): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) class PyArrowImpl(BaseImpl): diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 0d2c4a3e9f629..5135bb01fb378 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -30,7 +30,7 @@ from pandas.core.frame import DataFrame from pandas.core.arrays import Categorical from pandas.core import algorithms -from pandas.core.common import AbstractMethodError +import pandas.core.common as com from pandas.io.date_converters import generic_parser from pandas.errors import ParserWarning, ParserError, EmptyDataError from pandas.io.common import (get_filepath_or_buffer, is_file_like, @@ -1010,7 +1010,7 @@ def _make_engine(self, engine='c'): self._engine = klass(self.f, **self.options) def _failover_to_python(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def read(self, nrows=None): nrows = _validate_integer('nrows', nrows) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index c8490167022e5..106823199ee93 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -34,7 +34,7 @@ from pandas.core.base import StringMixin from pandas.io.formats.printing import adjoin, pprint_thing from pandas.errors import PerformanceWarning -from pandas.core.common import _asarray_tuplesafe, _all_none +import pandas.core.common as com from pandas.core.algorithms import match, unique from pandas.core.arrays.categorical import (Categorical, _factorize_from_iterables) @@ -903,7 +903,7 @@ def remove(self, key, where=None, start=None, stop=None): raise KeyError('No object named %s in the file' % key) # remove the node - if _all_none(where, start, stop): + if com._all_none(where, start, stop): s.group._f_remove(recursive=True) # delete from the table @@ -2368,7 +2368,7 @@ def delete(self, where=None, start=None, stop=None, **kwargs): support fully deleting the node in its entirety (only) - where specification must be None """ - if _all_none(where, start, stop): + if com._all_none(where, start, stop): self._handle.remove_node(self.group, recursive=True) return None @@ -3844,7 +3844,7 @@ def read(self, where=None, columns=None, **kwargs): tuple_index = long_index.values unique_tuples = lib.fast_unique(tuple_index) - unique_tuples = _asarray_tuplesafe(unique_tuples) + unique_tuples = com._asarray_tuplesafe(unique_tuples) indexer = match(unique_tuples, tuple_index) indexer = _ensure_platform_int(indexer) diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 3094d7d0ab1c6..8b03d6ddde4ec 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -10,6 +10,7 @@ import numpy as np from pandas.util._decorators import cache_readonly +import pandas.core.common as com from pandas.core.base import PandasObject from pandas.core.config import get_option from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike @@ -21,7 +22,6 @@ is_iterator) from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame -from pandas.core.common import AbstractMethodError, _try_sort, _any_not_none from pandas.core.generic import _shared_docs, _shared_doc_kwargs from pandas.core.index import Index, MultiIndex @@ -225,7 +225,7 @@ def _iter_data(self, data=None, keep_index=False, fillna=None): # TODO: unused? # if self.sort_columns: - # columns = _try_sort(data.columns) + # columns = com._try_sort(data.columns) # else: # columns = data.columns @@ -367,7 +367,7 @@ def _compute_plot_data(self): self.data = numeric_data def _make_plot(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _add_table(self): if self.table is False: @@ -609,7 +609,7 @@ def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds): def _get_index_name(self): if isinstance(self.data.index, MultiIndex): name = self.data.index.names - if _any_not_none(*name): + if com._any_not_none(*name): name = ','.join(pprint_thing(x) for x in name) else: name = None @@ -957,7 +957,7 @@ def _make_plot(self): it = self._iter_data() stacking_id = self._get_stacking_id() - is_errorbar = _any_not_none(*self.errors.values()) + is_errorbar = com._any_not_none(*self.errors.values()) colors = self._get_colors() for i, (label, y) in enumerate(it): @@ -2182,7 +2182,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, layout=layout) _axes = _flatten(axes) - for i, col in enumerate(_try_sort(data.columns)): + for i, col in enumerate(com._try_sort(data.columns)): ax = _axes[i] ax.hist(data[col].dropna().values, bins=bins, **kwds) ax.set_title(col) diff --git a/pandas/plotting/_style.py b/pandas/plotting/_style.py index 887202e22b4e0..426b29a8840f4 100644 --- a/pandas/plotting/_style.py +++ b/pandas/plotting/_style.py @@ -44,12 +44,12 @@ def _get_standard_colors(num_colors=None, colormap=None, color_type='default', if isinstance(colors, compat.string_types): colors = list(colors) elif color_type == 'random': - from pandas.core.common import _random_state + import pandas.core.common as com def random_color(column): """ Returns a random color represented as a list of length 3""" # GH17525 use common._random_state to avoid resetting the seed - rs = _random_state(column) + rs = com._random_state(column) return rs.rand(3).tolist() colors = lmap(random_color, lrange(num_colors)) diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 0ca25735fc03f..da881e6f29bc9 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -9,7 +9,7 @@ import numpy as np from pandas.compat import (lmap, range, lrange, StringIO, u) -from pandas.core.common import _all_none +import pandas.core.common as com from pandas.errors import ParserError from pandas import (DataFrame, Index, Series, MultiIndex, Timestamp, date_range, read_csv, compat, to_datetime) @@ -572,7 +572,7 @@ def _make_frame(names=None): df = _make_frame(True) df.to_csv(path, index=False) result = read_csv(path, header=[0, 1]) - assert _all_none(*result.columns.names) + assert com._all_none(*result.columns.names) result.columns.names = df.columns.names assert_frame_equal(df, result) diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 9895ee06a22c0..e8a7bc50d8e3c 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -6,7 +6,7 @@ Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp, Timedelta, date_range, timedelta_range, Categorical) from pandas.compat import lzip -from pandas.core.common import _asarray_tuplesafe +import pandas.core.common as com from pandas.tests.indexes.common import Base import pandas.util.testing as tm import pandas as pd @@ -1177,7 +1177,7 @@ def test_to_tuples(self, tuples): # GH 18756 idx = IntervalIndex.from_tuples(tuples) result = idx.to_tuples() - expected = Index(_asarray_tuplesafe(tuples)) + expected = Index(com._asarray_tuplesafe(tuples)) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('tuples', [ @@ -1193,7 +1193,7 @@ def test_to_tuples_na(self, tuples, na_tuple): result = idx.to_tuples(na_tuple=na_tuple) # check the non-NA portion - expected_notna = Index(_asarray_tuplesafe(tuples[:-1])) + expected_notna = Index(com._asarray_tuplesafe(tuples[:-1])) result_notna = result[:-1] tm.assert_index_equal(result_notna, expected_notna) diff --git a/pandas/tests/io/parser/test_parsers.py b/pandas/tests/io/parser/test_parsers.py index 0ea4757b10e94..ec240531925e3 100644 --- a/pandas/tests/io/parser/test_parsers.py +++ b/pandas/tests/io/parser/test_parsers.py @@ -4,7 +4,7 @@ import pandas.util.testing as tm from pandas import read_csv, read_table, DataFrame -from pandas.core.common import AbstractMethodError +import pandas.core.common as com from pandas._libs.lib import Timestamp from pandas.compat import StringIO @@ -43,7 +43,7 @@ def read_table(self, *args, **kwargs): raise NotImplementedError def float_precision_choices(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def setup_method(self, method): self.dirpath = tm.get_data_path() diff --git a/pandas/tests/scalar/test_interval.py b/pandas/tests/scalar/test_interval.py index 23dad9736dac5..c9e6e84d226a8 100644 --- a/pandas/tests/scalar/test_interval.py +++ b/pandas/tests/scalar/test_interval.py @@ -2,7 +2,7 @@ import numpy as np from pandas import Interval, Timestamp, Timedelta -from pandas.core.common import _any_none +import pandas.core.common as com import pytest import pandas.util.testing as tm @@ -197,6 +197,6 @@ def test_constructor_errors_tz(self, tz_left, tz_right): # GH 18538 left = Timestamp('2017-01-01', tz=tz_left) right = Timestamp('2017-01-02', tz=tz_right) - error = TypeError if _any_none(tz_left, tz_right) else ValueError + error = TypeError if com._any_none(tz_left, tz_right) else ValueError with pytest.raises(error): Interval(left, right) diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py index 6b3b519d49f7f..b1e3177547ac6 100644 --- a/pandas/tests/test_algos.py +++ b/pandas/tests/test_algos.py @@ -17,7 +17,7 @@ from pandas._libs.hashtable import unique_label_indices from pandas.compat import lrange, range import pandas.core.algorithms as algos -from pandas.core.common import _asarray_tuplesafe +import pandas.core.common as com import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas.core.dtypes.dtypes import CategoricalDtype as CDT @@ -217,7 +217,8 @@ def test_factorize_tuple_list(self, data, expected_label, expected_level): tm.assert_numpy_array_equal(result[0], np.array(expected_label, dtype=np.intp)) - expected_level_array = _asarray_tuplesafe(expected_level, dtype=object) + expected_level_array = com._asarray_tuplesafe(expected_level, + dtype=object) tm.assert_numpy_array_equal(result[1], expected_level_array) def test_complex_sorting(self): diff --git a/pandas/tests/test_resample.py b/pandas/tests/test_resample.py index e9a517605020a..515850c14ecd6 100644 --- a/pandas/tests/test_resample.py +++ b/pandas/tests/test_resample.py @@ -20,9 +20,10 @@ from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame from pandas.compat import range, lrange, zip, product, OrderedDict -from pandas.core.base import SpecificationError, AbstractMethodError +from pandas.core.base import SpecificationError from pandas.errors import UnsupportedFunctionCall from pandas.core.groupby import DataError +import pandas.core.common as com from pandas.tseries.frequencies import to_offset from pandas.core.indexes.datetimes import date_range @@ -726,7 +727,7 @@ def index(self, _index_start, _index_end, _index_freq): @pytest.fixture def _series_name(self): - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) @pytest.fixture def _static_values(self, index): diff --git a/pandas/tests/util/test_util.py b/pandas/tests/util/test_util.py index 8da2b401fc848..3b0a428218771 100644 --- a/pandas/tests/util/test_util.py +++ b/pandas/tests/util/test_util.py @@ -8,7 +8,7 @@ import pytest from pandas.compat import intern -from pandas.core.common import _all_none +import pandas.core.common as com from pandas.util._move import move_into_mutable_buffer, BadMove, stolenbuf from pandas.util._decorators import deprecate_kwarg, make_signature from pandas.util._validators import (validate_args, validate_kwargs, @@ -438,7 +438,7 @@ def test_set_locale(self): pytest.skip("Only a single locale found, no point in " "trying to test setting another locale") - if _all_none(*self.current_locale): + if com._all_none(*self.current_locale): # Not sure why, but on some travis runs with pytest, # getlocale() returned (None, None). pytest.skip("Current locale is not set.") diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index e6b9f66c094c1..ec206e0997d0b 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -9,7 +9,7 @@ from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex, ABCPeriod from pandas.core.tools.datetimes import to_datetime -from pandas.core.common import AbstractMethodError +import pandas.core.common as com # import after tools, dateutil check from dateutil.easter import easter @@ -1148,7 +1148,7 @@ def apply(self, other): def _apply(self, n, other): """Handle specific apply logic for child classes""" - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) @apply_index_wraps def apply_index(self, i): @@ -1182,11 +1182,11 @@ def _get_roll(self, i, before_day_of_month, after_day_of_month): The roll array is based on the fact that i gets rolled back to the first day of the month. """ - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) def _apply_index_days(self, i, roll): """Apply the correct day for each date in i""" - raise AbstractMethodError(self) + raise com.AbstractMethodError(self) class SemiMonthEnd(SemiMonthOffset): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 1bea25a16ca1e..30915f7891c8c 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -32,7 +32,7 @@ is_list_like) from pandas.io.formats.printing import pprint_thing from pandas.core.algorithms import take_1d -from pandas.core.common import _all_not_none +import pandas.core.common as com import pandas.compat as compat from pandas.compat import ( @@ -484,7 +484,7 @@ def set_locale(new_locale, lc_var=locale.LC_ALL): except ValueError: yield new_locale else: - if _all_not_none(*normalized_locale): + if com._all_not_none(*normalized_locale): yield '.'.join(normalized_locale) else: yield new_locale
There are a bunch of modules that will do both `import pandas.core.common as com` and `from pandas.core.common import _whatever`, then sprinkled throughout we'll see both `_whatever` and `com._whatever`. This PR tracks down a bunch of those and standardizes on `com._whatever`. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19292
2018-01-18T04:26:04Z
2018-01-21T16:43:13Z
2018-01-21T16:43:13Z
2022-11-15T19:24:38Z
Fix (Series|DataFrame).interpolate for datetime dtypes
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 86fc47dee09fc..80768945544f4 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -430,6 +430,7 @@ Conversion - +- Bug in :meth:`Series.interpolate` and :class:`DataFrame.interpolate` where ``dtype='datetime64[ns]'`` series and columns were ignored. (:issue:`19199`) - Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`) - Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`) - Bug in comparison of timezone-aware :class:`DatetimeIndex` against ``NaT`` incorrectly raising ``TypeError`` (:issue:`19276`) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 7ffef9c8a86d7..98fcde81e9e03 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5151,8 +5151,11 @@ def interpolate(self, method='linear', axis=0, limit=None, inplace=False, raise ValueError("Only `method=linear` interpolation is supported " "on MultiIndexes.") - if _maybe_transposed_self._data.get_dtype_counts().get( - 'object') == len(_maybe_transposed_self.T): + dtype_counts = _maybe_transposed_self._data.get_dtype_counts() + if ('object' in dtype_counts and + dtype_counts.get('object') == len(_maybe_transposed_self.T)): + # Checking for 'object' lets us avoid sometimes-fragile tranpose + # call GH#19198 raise TypeError("Cannot interpolate with all NaNs.") # create/use the index diff --git a/pandas/core/internals.py b/pandas/core/internals.py index d95062c54b4c6..4392ce31bdf3c 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1108,7 +1108,7 @@ def check_int_bool(self, inplace): # a fill na type method try: m = missing.clean_fill_method(method) - except: + except ValueError: m = None if m is not None: @@ -1123,7 +1123,7 @@ def check_int_bool(self, inplace): # try an interp method try: m = missing.clean_interp_method(method, **kwargs) - except: + except ValueError: m = None if m is not None: @@ -1182,24 +1182,9 @@ def _interpolate(self, method=None, index=None, values=None, if fill_value is None: fill_value = self.fill_value - if method in ('krogh', 'piecewise_polynomial', 'pchip'): - if not index.is_monotonic: - raise ValueError("{0} interpolation requires that the " - "index be monotonic.".format(method)) - # process 1-d slices in the axis direction - - def func(x): - - # process a 1-d slice, returning it - # should the axis argument be handled below in apply_along_axis? - # i.e. not an arg to missing.interpolate_1d - return missing.interpolate_1d(index, x, method=method, limit=limit, - limit_direction=limit_direction, - fill_value=fill_value, - bounds_error=False, **kwargs) - - # interp each column independently - interp_values = np.apply_along_axis(func, axis, data) + interp_values = _interpolate_values(method, data, index, axis, + limit, limit_direction, + fill_value, **kwargs) blocks = [self.make_block(interp_values, klass=self.__class__, fastpath=True)] @@ -2594,6 +2579,32 @@ def set(self, locs, values, check=False): self.values[locs] = values + def _interpolate(self, method=None, index=None, values=None, + fill_value=None, axis=0, limit=None, + limit_direction='forward', inplace=False, downcast=None, + mgr=None, **kwargs): + """ interpolate using scipy wrappers, adapted to datetime64 values""" + + inplace = validate_bool_kwarg(inplace, 'inplace') + data = self.values if inplace else self.values.copy() + + # only deal with floats + mask = isna(self.values) + data = data.astype(np.float64) + data[mask] = np.nan + + if fill_value is None: + fill_value = self.fill_value + + interp_values = _interpolate_values(method, data, index, axis, + limit, limit_direction, + fill_value, **kwargs) + interp_values = interp_values.astype(self.dtype) + + blocks = [self.make_block(interp_values, klass=self.__class__, + fastpath=True)] + return self._maybe_downcast(blocks, downcast) + class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock): """ implement a datetime64 block with a tz attribute """ @@ -2750,6 +2761,43 @@ def concat_same_type(self, to_concat, placement=None): return make_block( values, placement=placement or slice(0, len(values), 1)) + def _interpolate(self, method=None, index=None, values=None, + fill_value=None, axis=0, limit=None, + limit_direction='forward', inplace=False, downcast=None, + mgr=None, **kwargs): + """ interpolate using scipy wrappers, adapted to datetime64 values""" + + inplace = validate_bool_kwarg(inplace, 'inplace') + data = self.values if inplace else self.values.copy() + + # only deal with floats + mask = isna(self.values) + + # Convert to UTC for interpolation + data = data.tz_convert('UTC').values + + # data is 1D because it comes from a DatetimeIndex, but we need ndim + # to match self.ndim + data = data.reshape(self.shape) + mask = mask.reshape(self.shape) + data = data.astype(np.float64) + data[mask] = np.nan + + if fill_value is None: + fill_value = self.fill_value + + interp_values = _interpolate_values(method, data, index, axis, + limit, limit_direction, + fill_value, **kwargs) + + interp_values = interp_values.squeeze() + utc_values = self._holder(interp_values, tz='UTC') + interp_values = utc_values.tz_convert(self.values.tz) + + blocks = [self.make_block(interp_values, klass=self.__class__, + fastpath=True)] + return self._maybe_downcast(blocks, downcast) + class SparseBlock(NonConsolidatableMixIn, Block): """ implement as a list of sparse arrays of the same dtype """ @@ -5671,3 +5719,26 @@ def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill): if not allow_fill: indexer = maybe_convert_indices(indexer, length) return 'fancy', indexer, len(indexer) + + +def _interpolate_values(method, data, index, axis, limit, limit_direction, + fill_value, **kwargs): + """interpolate using scipy wrappers""" + if method in ('krogh', 'piecewise_polynomial', 'pchip'): + if not index.is_monotonic: + raise ValueError("{0} interpolation requires that the " + "index be monotonic.".format(method)) + # process 1-d slices in the axis direction + + def func(x): + # process a 1-d slice, returning it + # should the axis argument be handled below in apply_along_axis? + # i.e. not an arg to missing.interpolate_1d + return missing.interpolate_1d(index, x, method=method, limit=limit, + limit_direction=limit_direction, + fill_value=fill_value, + bounds_error=False, **kwargs) + + # interp each column independently + interp_values = np.apply_along_axis(func, axis, data) + return interp_values diff --git a/pandas/tests/frame/test_missing.py b/pandas/tests/frame/test_missing.py index 2e4e8b9582cf6..f9db07ba2c67b 100644 --- a/pandas/tests/frame/test_missing.py +++ b/pandas/tests/frame/test_missing.py @@ -816,3 +816,16 @@ def test_interp_ignore_all_good(self): # all good result = df[['B', 'D']].interpolate(downcast=None) assert_frame_equal(result, df[['B', 'D']]) + + @pytest.mark.parametrize('tz', [None, 'US/Central']) + def test_interpolate_dt64_values(self, tz): + index = pd.Index([23, 26, 30]) + dti = pd.DatetimeIndex(['2015-09-23', '2015-09-26', '2015-09-30'], + tz=tz) + df = DataFrame(dti, index=index).reindex(range(23, 31)) + + dti_ex = pd.date_range('2015-09-23', '2015-09-30', tz=tz) + expected = DataFrame(dti_ex, index=df.index) + + result = df.interpolate() + assert_frame_equal(expected, result) diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index 0dc5e23184af7..a4bdcaa549b8b 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -1278,3 +1278,16 @@ def test_series_interpolate_intraday(self): result = ts.reindex(new_index).interpolate(method='time') tm.assert_numpy_array_equal(result.values, exp.values) + + @pytest.mark.parametrize('tz', [None, 'US/Central']) + def test_interpolate_dt64_values(self, tz): + index = pd.Index([23, 26, 30]) + dti = pd.DatetimeIndex(['2015-09-23', '2015-09-26', '2015-09-30'], + tz=tz) + ser = pd.Series(dti, index=index).reindex(range(23, 31)) + + dti_ex = pd.date_range('2015-09-23', '2015-09-30', tz=tz) + expected = pd.Series(dti_ex, index=ser.index) + + result = ser.interpolate() + tm.assert_series_equal(expected, result)
- [x] closes #19199 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19291
2018-01-18T01:53:08Z
2018-01-23T18:52:11Z
null
2020-04-05T17:39:53Z
WIP: Dispatch Series comparison methods to Index implementations
diff --git a/pandas/core/ops.py b/pandas/core/ops.py index fc3ea106252db..cfd75201fcd53 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -11,7 +11,7 @@ import pandas as pd import datetime -from pandas._libs import (lib, index as libindex, +from pandas._libs import (lib, tslib as libts, algos as libalgos, iNaT) from pandas import compat @@ -39,8 +39,7 @@ from pandas.core.dtypes.generic import ( ABCSeries, ABCDataFrame, - ABCIndex, ABCDatetimeIndex, - ABCPeriodIndex) + ABCIndex, ABCDatetimeIndex, ABCIndexClass) # ----------------------------------------------------------------------------- # Functions that add arithmetic methods to objects, given arithmetic factory @@ -748,7 +747,7 @@ def _get_series_op_result_name(left, right): def _comp_method_OBJECT_ARRAY(op, x, y): if isinstance(y, list): y = construct_1d_object_array_from_listlike(y) - if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)): + if isinstance(y, (np.ndarray, ABCSeries, ABCIndexClass)): if not is_object_dtype(y.dtype): y = y.astype(np.object_) @@ -775,8 +774,9 @@ def na_op(x, y): return op(x, y) elif is_categorical_dtype(y) and not is_scalar(y): return op(y, x) + # TODO: Does this make sense or should op be reversed? - if is_object_dtype(x.dtype): + elif is_object_dtype(x.dtype): result = _comp_method_OBJECT_ARRAY(op, x, y) else: @@ -797,15 +797,9 @@ def na_op(x, y): # we have a datetime/timedelta and may need to convert mask = None - if (needs_i8_conversion(x) or - (not is_scalar(y) and needs_i8_conversion(y))): - - if is_scalar(y): - mask = isna(x) - y = libindex.convert_scalar(x, _values_from_object(y)) - else: - mask = isna(x) | isna(y) - y = y.view('i8') + if not is_scalar(y) and needs_i8_conversion(y): + mask = isna(x) | isna(y) + y = y.view('i8') x = x.view('i8') try: @@ -821,54 +815,72 @@ def na_op(x, y): return result - def wrapper(self, other, axis=None): - # Validate the axis parameter - if axis is not None: - self._get_axis_number(axis) - - if isinstance(other, ABCSeries): - name = _maybe_match_name(self, other) - if not self._indexed_same(other): - msg = 'Can only compare identically-labeled Series objects' - raise ValueError(msg) - return self._constructor(na_op(self.values, other.values), - index=self.index, name=name) - elif isinstance(other, ABCDataFrame): # pragma: no cover + def wrapper(self, other): + if isinstance(other, ABCDataFrame): # pragma: no cover return NotImplemented - elif isinstance(other, (np.ndarray, pd.Index)): - # do not check length of zerodim array - # as it will broadcast - if (not is_scalar(lib.item_from_zerodim(other)) and - len(self) != len(other)): - raise ValueError('Lengths must match to compare') - if isinstance(other, ABCPeriodIndex): - # temp workaround until fixing GH 13637 - # tested in test_nat_comparisons - # (pandas.tests.series.test_operators.TestSeriesOperators) - return self._constructor(na_op(self.values, - other.astype(object).values), - index=self.index) + elif isinstance(other, ABCSeries) and not self._indexed_same(other): + msg = 'Can only compare identically-labeled Series objects' + raise ValueError(msg) - return self._constructor(na_op(self.values, np.asarray(other)), - index=self.index).__finalize__(self) + res_name = _get_series_op_result_name(self, other) - elif isinstance(other, pd.Categorical): - if not is_categorical_dtype(self): - msg = ("Cannot compare a Categorical for op {op} with Series " - "of dtype {typ}.\nIf you want to compare values, use " - "'series <op> np.asarray(other)'.") - raise TypeError(msg.format(op=op, typ=self.dtype)) + if is_timedelta64_dtype(self): + res = op(pd.TimedeltaIndex(self), other) + return self._constructor(res, index=self.index, name=res_name) + + elif is_datetime64_dtype(self) or is_datetime64tz_dtype(self): + # kludge; DatetimeIndex refuses to compare against None or + # datetime.date; until the "right" behavior is resolved, we cast + # these types here to types that DatetimeIndex understand. + if type(other) is datetime.date: + other = datetime.datetime(other.year, other.month, other.day) + elif other is None: + other = pd.NaT + res = op(pd.DatetimeIndex(self), other) + return self._constructor(res, index=self.index, name=res_name) - if is_categorical_dtype(self): + elif isinstance(other, ABCSeries): + # Note: Ordering matters; this needs to go before + # is_categorical_dtype(self) branch. + res = na_op(self.values, other.values) + return self._constructor(res, + index=self.index, name=res_name) + + elif is_categorical_dtype(self): # cats are a special case as get_values() would return an ndarray, # which would then not take categories ordering into account # we can go directly to op, as the na_op would just test again and # dispatch to it. with np.errstate(all='ignore'): res = op(self.values, other) + # Note: self.values is a pd.Categorical object + return self._constructor(res, index=self.index, + name=res_name, dtype='bool') + + elif isinstance(other, pd.Categorical): + # Ordering of conditions here matters; we know at this point + # that not is_categorical_dtype(self) + msg = ("Cannot compare a Categorical for op {op} with Series " + "of dtype {typ}.\nIf you want to compare values, use " + "'series <op> np.asarray(other)'.") + raise TypeError(msg.format(op=op, typ=self.dtype)) + + elif isinstance(other, (np.ndarray, pd.Index)): + # do not check length of zerodim array + # as it will broadcast + if (not is_scalar(lib.item_from_zerodim(other)) and + len(self) != len(other)): + raise ValueError('Lengths must match to compare') + + res = na_op(self.values, np.asarray(other)) + return self._constructor(res, + index=self.index).__finalize__(self) + # TODO: Why __finalize__ here but not elsewhere? + else: values = self.get_values() + # TODO: why get_values() here and just values elsewhere? if isinstance(other, (list, np.ndarray)): other = np.asarray(other) @@ -881,9 +893,10 @@ def wrapper(self, other, axis=None): # always return a full value series here res = _values_from_object(res) - res = pd.Series(res, index=self.index, name=self.name, dtype='bool') - return res - + res = pd.Series(res, index=self.index, name=res_name, dtype='bool') + # TODO: Why not use self._constructor here? + # TODO: pass dtype='bool' in other locations? + return res return wrapper diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 8948c5f79900d..bfba134541164 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -760,11 +760,12 @@ def test_equals_op(self): if isinstance(index_a, PeriodIndex): return + len_msg = "Lengths must match|different lengths" n = len(index_a) index_b = index_a[0:-1] index_c = index_a[0:-1].append(index_a[-2:-1]) index_d = index_a[0:1] - with tm.assert_raises_regex(ValueError, "Lengths must match"): + with tm.assert_raises_regex(ValueError, len_msg): index_a == index_b expected1 = np.array([True] * n) expected2 = np.array([True] * (n - 1) + [False]) @@ -776,7 +777,7 @@ def test_equals_op(self): array_b = np.array(index_a[0:-1]) array_c = np.array(index_a[0:-1].append(index_a[-2:-1])) array_d = np.array(index_a[0:1]) - with tm.assert_raises_regex(ValueError, "Lengths must match"): + with tm.assert_raises_regex(ValueError, len_msg): index_a == array_b tm.assert_numpy_array_equal(index_a == array_a, expected1) tm.assert_numpy_array_equal(index_a == array_c, expected2) @@ -786,22 +787,22 @@ def test_equals_op(self): series_b = Series(array_b) series_c = Series(array_c) series_d = Series(array_d) - with tm.assert_raises_regex(ValueError, "Lengths must match"): + with tm.assert_raises_regex(ValueError, len_msg): index_a == series_b tm.assert_numpy_array_equal(index_a == series_a, expected1) tm.assert_numpy_array_equal(index_a == series_c, expected2) # cases where length is 1 for one of them - with tm.assert_raises_regex(ValueError, "Lengths must match"): + with tm.assert_raises_regex(ValueError, len_msg): index_a == index_d - with tm.assert_raises_regex(ValueError, "Lengths must match"): + with tm.assert_raises_regex(ValueError, len_msg): index_a == series_d - with tm.assert_raises_regex(ValueError, "Lengths must match"): + with tm.assert_raises_regex(ValueError, len_msg): index_a == array_d msg = "Can only compare identically-labeled Series objects" with tm.assert_raises_regex(ValueError, msg): series_a == series_d - with tm.assert_raises_regex(ValueError, "Lengths must match"): + with tm.assert_raises_regex(ValueError, len_msg): series_a == array_d # comparing with a scalar should broadcast; note that we are excluding
Same thing we've been doing for arithmetic operations, now moving on to comparison methods. Based on some trial-and-error, it seems like there are some inconsistencies to be worked out for the categorical dtype case. This PR: - Dispatches timedelta and datetime `Series` comparisons to their respective `Index` subclass implementations - Patches `DatetimeIndex._assert_tz_awareness_compat`, which has already been broken out into #19276. - Kludges `datetime.date` and `None` case that `Series` treats differently from `DatetimeIndex`; decisions should be made about those so these kludges can be removed. - Removes a `# temp workaround until fixing GH 13637` for comparison with `PeriodIndex` that appears to no longer be needed. - Fixes names of the results for comparisons between `Series` and `Index` classes. (probably needs tests) - Notes some small inconsistencies in how `_constructor` is called, also discussed in #19271.
https://api.github.com/repos/pandas-dev/pandas/pulls/19288
2018-01-17T18:32:06Z
2018-01-23T18:51:53Z
null
2018-06-22T03:42:01Z
DOC: add `source activate` for older versions of Anaconda and fix a typo
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index cdbbad6eb75d6..258ab874cafcf 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -171,6 +171,9 @@ We'll now kick off a three-step process: # Create and activate the build environment conda env create -f ci/environment-dev.yaml conda activate pandas-dev + + # or with older versions of Anaconda: + source activate pandas-dev # Build and install pandas python setup.py build_ext --inplace -j 4 @@ -456,7 +459,7 @@ Here are *some* of the more common ``cpplint`` issues: - we restrict line-length to 80 characters to promote readability - every header file must include a header guard to avoid name collisions if re-included -:ref:`Continuous Integration <contributing.ci>`. will run the +:ref:`Continuous Integration <contributing.ci>` will run the `cpplint <https://pypi.python.org/pypi/cpplint>`_ tool and report any stylistic errors in your code. Therefore, it is helpful before submitting code to run the check yourself::
In the contrib docs, `conda activate` should be `source activate`, and there was also an extraneous period after the "continuous integration" heading.
https://api.github.com/repos/pandas-dev/pandas/pulls/19282
2018-01-17T13:57:23Z
2018-01-19T08:57:42Z
2018-01-19T08:57:42Z
2018-01-19T08:57:42Z
BUG: timezone comparisions are inconsistent, manifesting in bugs in .concat
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 853d5cee11cd1..a93e0b1a3b0dd 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -427,7 +427,6 @@ Conversion - Bug in :class:`Index` multiplication and division methods where operating with a ``Series`` would return an ``Index`` object instead of a ``Series`` object (:issue:`19042`) -- - - Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`) - Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`) @@ -503,6 +502,7 @@ Reshaping - Bug in :func:`Dataframe.pivot_table` which fails when the ``aggfunc`` arg is of type string. The behavior is now consistent with other methods like ``agg`` and ``apply`` (:issue:`18713`) - Bug in :func:`DataFrame.merge` in which merging using ``Index`` objects as vectors raised an Exception (:issue:`19038`) - Bug in :func:`DataFrame.stack`, :func:`DataFrame.unstack`, :func:`Series.unstack` which were not returning subclasses (:issue:`15563`) +- Bug in timezone comparisons, manifesting as a conversion of the index to UTC in ``.concat()`` (:issue:`18523`) - Numeric diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index f1da60057186c..e1ffd450c9a68 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -6,7 +6,7 @@ cimport cython import cython from numpy cimport ndarray from tslib import Timestamp -from tslibs.timezones cimport get_timezone +from tslibs.timezones cimport tz_compare from cpython.object cimport (Py_EQ, Py_NE, Py_GT, Py_LT, Py_GE, Py_LE, PyObject_RichCompare) @@ -131,7 +131,7 @@ cdef class Interval(IntervalMixin): if not left <= right: raise ValueError('left side of interval must be <= right side') if (isinstance(left, Timestamp) and - get_timezone(left.tzinfo) != get_timezone(right.tzinfo)): + not tz_compare(left.tzinfo, right.tzinfo)): # GH 18538 msg = ("left and right must have the same time zone, got " "'{left_tz}' and '{right_tz}'") diff --git a/pandas/_libs/src/inference.pyx b/pandas/_libs/src/inference.pyx index b74b3a79fd69a..e15f276b39bf8 100644 --- a/pandas/_libs/src/inference.pyx +++ b/pandas/_libs/src/inference.pyx @@ -5,7 +5,7 @@ cimport cython from tslibs.nattype import NaT from tslibs.conversion cimport convert_to_tsobject from tslibs.timedeltas cimport convert_to_timedelta64 -from tslibs.timezones cimport get_timezone +from tslibs.timezones cimport get_timezone, tz_compare from datetime import datetime, timedelta iNaT = util.get_nat() @@ -907,7 +907,7 @@ cpdef bint is_datetime_with_singletz_array(ndarray values): val = values[j] if val is not NaT: tz = getattr(val, 'tzinfo', None) - if base_tz != tz and base_tz != get_timezone(tz): + if not tz_compare(base_tz, tz): return False break diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 53abdd013ec37..9cfe41172fedc 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -35,7 +35,7 @@ from timedeltas cimport cast_from_unit from timezones cimport (is_utc, is_tzlocal, is_fixed_offset, treat_tz_as_dateutil, treat_tz_as_pytz, get_utcoffset, get_dst_info, - get_timezone, maybe_get_tz) + get_timezone, maybe_get_tz, tz_compare) from parsing import parse_datetime_string from nattype import nat_strings, NaT @@ -169,7 +169,7 @@ def datetime_to_datetime64(ndarray[object] values): elif PyDateTime_Check(val): if val.tzinfo is not None: if inferred_tz is not None: - if get_timezone(val.tzinfo) != inferred_tz: + if not tz_compare(val.tzinfo, inferred_tz): raise ValueError('Array must be all same time zone') else: inferred_tz = get_timezone(val.tzinfo) diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index de31643742d87..1ddb299598fd0 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -33,7 +33,8 @@ from np_datetime cimport (reverse_ops, cmp_scalar, check_dts_bounds, is_leapyear) from timedeltas import Timedelta from timedeltas cimport delta_to_nanoseconds -from timezones cimport get_timezone, is_utc, maybe_get_tz, treat_tz_as_pytz +from timezones cimport ( + get_timezone, is_utc, maybe_get_tz, treat_tz_as_pytz, tz_compare) # ---------------------------------------------------------------------- # Constants @@ -266,7 +267,7 @@ cdef class _Timestamp(datetime): other = Timestamp(other) # validate tz's - if get_timezone(self.tzinfo) != get_timezone(other.tzinfo): + if not tz_compare(self.tzinfo, other.tzinfo): raise TypeError("Timestamp subtraction must have the " "same timezones or no timezones") diff --git a/pandas/_libs/tslibs/timezones.pxd b/pandas/_libs/tslibs/timezones.pxd index 95e0474b3a174..67353f3eec614 100644 --- a/pandas/_libs/tslibs/timezones.pxd +++ b/pandas/_libs/tslibs/timezones.pxd @@ -7,6 +7,7 @@ cdef bint is_tzlocal(object tz) cdef bint treat_tz_as_pytz(object tz) cdef bint treat_tz_as_dateutil(object tz) +cpdef bint tz_compare(object start, object end) cpdef object get_timezone(object tz) cpdef object maybe_get_tz(object tz) diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index fdcf40337fab9..242b8262a8721 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -275,7 +275,7 @@ cdef object get_dst_info(object tz): def infer_tzinfo(start, end): if start is not None and end is not None: tz = start.tzinfo - if not (get_timezone(tz) == get_timezone(end.tzinfo)): + if not tz_compare(tz, end.tzinfo): msg = 'Inputs must both have the same timezone, {tz1} != {tz2}' raise AssertionError(msg.format(tz1=tz, tz2=end.tzinfo)) elif start is not None: @@ -285,3 +285,32 @@ def infer_tzinfo(start, end): else: tz = None return tz + + +cpdef bint tz_compare(object start, object end): + """ + Compare string representations of timezones + + The same timezone can be represented as different instances of + timezones. For example + `<DstTzInfo 'Europe/Paris' LMT+0:09:00 STD>` and + `<DstTzInfo 'Europe/Paris' CET+1:00:00 STD>` are essentially same + timezones but aren't evaluted such, but the string representation + for both of these is `'Europe/Paris'`. + + This exists only to add a notion of equality to pytz-style zones + that is compatible with the notion of equality expected of tzinfo + subclasses. + + Parameters + ---------- + start : tzinfo + end : tzinfo + + Returns: + ------- + compare : bint + + """ + # GH 18523 + return get_timezone(start) == get_timezone(end) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index d83d2d2c93ec8..4ec929947783c 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -521,8 +521,7 @@ def _generate(cls, start, end, periods, name, offset, tz = tz.localize(date.replace(tzinfo=None)).tzinfo if tz is not None and inferred_tz is not None: - if not (timezones.get_timezone(inferred_tz) == - timezones.get_timezone(tz)): + if not timezones.tz_compare(inferred_tz, tz): raise AssertionError("Inferred time zone not equal to passed " "time zone") @@ -1192,7 +1191,7 @@ def _maybe_utc_convert(self, other): raise TypeError('Cannot join tz-naive with tz-aware ' 'DatetimeIndex') - if self.tz != other.tz: + if not timezones.tz_compare(self.tz, other.tz): this = self.tz_convert('UTC') other = other.tz_convert('UTC') return this, other @@ -1296,7 +1295,7 @@ def __iter__(self): def _wrap_union_result(self, other, result): name = self.name if self.name == other.name else None - if self.tz != other.tz: + if not timezones.tz_compare(self.tz, other.tz): raise ValueError('Passed item and index have different timezone') return self._simple_new(result, name=name, freq=None, tz=self.tz) diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 150410e404305..7e126dd56775b 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -2074,6 +2074,45 @@ def test_concat_order(self): expected = expected.sort_values() tm.assert_index_equal(result, expected) + def test_concat_datetime_timezone(self): + # GH 18523 + idx1 = pd.date_range('2011-01-01', periods=3, freq='H', + tz='Europe/Paris') + idx2 = pd.date_range(start=idx1[0], end=idx1[-1], freq='H') + df1 = pd.DataFrame({'a': [1, 2, 3]}, index=idx1) + df2 = pd.DataFrame({'b': [1, 2, 3]}, index=idx2) + result = pd.concat([df1, df2], axis=1) + + exp_idx = DatetimeIndex(['2011-01-01 00:00:00+01:00', + '2011-01-01 01:00:00+01:00', + '2011-01-01 02:00:00+01:00'], + freq='H' + ).tz_localize('UTC').tz_convert('Europe/Paris') + + expected = pd.DataFrame([[1, 1], [2, 2], [3, 3]], + index=exp_idx, columns=['a', 'b']) + + tm.assert_frame_equal(result, expected) + + idx3 = pd.date_range('2011-01-01', periods=3, + freq='H', tz='Asia/Tokyo') + df3 = pd.DataFrame({'b': [1, 2, 3]}, index=idx3) + result = pd.concat([df1, df3], axis=1) + + exp_idx = DatetimeIndex(['2010-12-31 15:00:00+00:00', + '2010-12-31 16:00:00+00:00', + '2010-12-31 17:00:00+00:00', + '2010-12-31 23:00:00+00:00', + '2011-01-01 00:00:00+00:00', + '2011-01-01 01:00:00+00:00'] + ).tz_localize('UTC') + + expected = pd.DataFrame([[np.nan, 1], [np.nan, 2], [np.nan, 3], + [1, np.nan], [2, np.nan], [3, np.nan]], + index=exp_idx, columns=['a', 'b']) + + tm.assert_frame_equal(result, expected) + @pytest.mark.parametrize('pdt', [pd.Series, pd.DataFrame, pd.Panel]) @pytest.mark.parametrize('dt', np.sctypes['float'])
closes #18523 superseded #18596
https://api.github.com/repos/pandas-dev/pandas/pulls/19281
2018-01-17T11:39:19Z
2018-01-18T00:14:03Z
2018-01-18T00:14:03Z
2018-01-18T00:14:10Z
DOC: add missing period to DataFrame docstring
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2c05eefa5706e..35cc7a2a34acb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -250,7 +250,7 @@ class DataFrame(NDFrame): """ Two-dimensional size-mutable, potentially heterogeneous tabular data structure with labeled axes (rows and columns). Arithmetic operations align on both row and column labels. Can be thought of as a dict-like - container for Series objects. The primary pandas data structure + container for Series objects. The primary pandas data structure. Parameters ----------
- [x] closes #19141 Went ahead and did this one manually. Working on a regex that will apply this to all docstrings in general and will make a new PR when it's ready.
https://api.github.com/repos/pandas-dev/pandas/pulls/19280
2018-01-17T07:35:25Z
2018-01-17T11:46:45Z
2018-01-17T11:46:45Z
2018-01-17T11:46:47Z
remove unused block attribute
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 43fdd454250a5..bc75a110354c0 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -97,7 +97,6 @@ class Block(PandasObject): is_sparse = False _box_to_block_values = True _can_hold_na = False - _downcast_dtype = None _can_consolidate = True _verify_integrity = True _validate_ndim = True @@ -1841,7 +1840,6 @@ def equals(self, other): class FloatBlock(FloatOrComplexBlock): __slots__ = () is_float = True - _downcast_dtype = 'int64' def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element)
grepping across the code, `_downcast_dtype` doesn't show up anywhere else. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19279
2018-01-17T07:33:09Z
2018-01-18T00:42:04Z
2018-01-18T00:42:04Z
2018-02-11T21:59:04Z
Remove timeop
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 853d5cee11cd1..ca5385ee4f857 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -310,6 +310,7 @@ Other API Changes - :func:`Series.to_csv` now accepts a ``compression`` argument that works in the same way as the ``compression`` argument in :func:`DataFrame.to_csv` (:issue:`18958`) - Addition or subtraction of ``NaT`` from :class:`TimedeltaIndex` will return ``TimedeltaIndex`` instead of ``DatetimeIndex`` (:issue:`19124`) - :func:`DatetimeIndex.shift` and :func:`TimedeltaIndex.shift` will now raise ``NullFrequencyError`` (which subclasses ``ValueError``, which was raised in older versions) when the index object frequency is ``None`` (:issue:`19147`) +- Addition and subtraction of ``NaN`` from a :class:`Series` with ``dtype='timedelta64[ns]'`` will raise a ``TypeError` instead of treating the ``NaN`` as ``NaT`` (:issue:`19274`) .. _whatsnew_0230.deprecations: diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 866329b16c830..3e671731be348 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -393,7 +393,7 @@ def _evaluate_with_timedelta_like(self, other, op, opstr, reversed=False): if opstr in ['__floordiv__']: result = left // right else: - result = op(left, float(right)) + result = op(left, np.float64(right)) result = self._maybe_mask_results(result, convert='float64') return Index(result, name=self.name, copy=False) diff --git a/pandas/core/ops.py b/pandas/core/ops.py index fc3ea106252db..fc04d9d291bf9 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -6,13 +6,12 @@ # necessary to enforce truediv in Python 2.X from __future__ import division import operator -import warnings + import numpy as np import pandas as pd -import datetime from pandas._libs import (lib, index as libindex, - tslib as libts, algos as libalgos, iNaT) + algos as libalgos) from pandas import compat from pandas.util._decorators import Appender @@ -20,7 +19,7 @@ from pandas.compat import bind_method import pandas.core.missing as missing -from pandas.errors import PerformanceWarning, NullFrequencyError +from pandas.errors import NullFrequencyError from pandas.core.common import _values_from_object, _maybe_match_name from pandas.core.dtypes.missing import notna, isna from pandas.core.dtypes.common import ( @@ -28,9 +27,9 @@ is_datetimelike_v_numeric, is_integer_dtype, is_categorical_dtype, is_object_dtype, is_timedelta64_dtype, - is_datetime64_dtype, is_datetime64tz_dtype, is_datetime64_ns_dtype, - is_bool_dtype, is_datetimetz, - is_list_like, is_offsetlike, + is_datetime64_dtype, is_datetime64tz_dtype, + is_bool_dtype, + is_list_like, is_scalar, _ensure_object) from pandas.core.dtypes.cast import ( @@ -39,7 +38,7 @@ from pandas.core.dtypes.generic import ( ABCSeries, ABCDataFrame, - ABCIndex, ABCDatetimeIndex, + ABCIndex, ABCPeriodIndex) # ----------------------------------------------------------------------------- @@ -294,287 +293,6 @@ def add_flex_arithmetic_methods(cls, flex_arith_method, exclude=exclude) -class _Op(object): - - """ - Wrapper around Series arithmetic operations. - Generally, you should use classmethod ``_Op.get_op`` as an entry point. - - This validates and coerces lhs and rhs depending on its dtype and - based on op. See _TimeOp also. - - Parameters - ---------- - left : Series - lhs of op - right : object - rhs of op - name : str - name of op - na_op : callable - a function which wraps op - """ - - fill_value = np.nan - wrap_results = staticmethod(lambda x: x) - dtype = None - - def __init__(self, left, right, name, na_op): - self.left = left - self.right = right - - self.name = name - self.na_op = na_op - - self.lvalues = left - self.rvalues = right - - @classmethod - def get_op(cls, left, right, name, na_op): - """ - Get op dispatcher, returns _Op or _TimeOp. - - If ``left`` and ``right`` are appropriate for datetime arithmetic with - operation ``name``, processes them and returns a ``_TimeOp`` object - that stores all the required values. Otherwise, it will generate - either a ``_Op``, indicating that the operation is performed via - normal numpy path. - """ - is_timedelta_lhs = is_timedelta64_dtype(left) - - if not is_timedelta_lhs: - return _Op(left, right, name, na_op) - else: - return _TimeOp(left, right, name, na_op) - - -class _TimeOp(_Op): - """ - Wrapper around Series datetime/time/timedelta arithmetic operations. - Generally, you should use classmethod ``_Op.get_op`` as an entry point. - """ - fill_value = iNaT - - def __init__(self, left, right, name, na_op): - super(_TimeOp, self).__init__(left, right, name, na_op) - - lvalues = self._convert_to_array(left, name=name) - rvalues = self._convert_to_array(right, name=name, other=lvalues) - - # left - self.is_timedelta_lhs = is_timedelta64_dtype(lvalues) - assert self.is_timedelta_lhs - - # right - self.is_offset_rhs = is_offsetlike(right) - self.is_datetime64_rhs = is_datetime64_dtype(rvalues) - self.is_datetime64tz_rhs = is_datetime64tz_dtype(rvalues) - self.is_datetime_rhs = (self.is_datetime64_rhs or - self.is_datetime64tz_rhs) - self.is_timedelta_rhs = is_timedelta64_dtype(rvalues) - self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u') - self.is_floating_rhs = rvalues.dtype.kind == 'f' - - self._validate(lvalues, rvalues, name) - self.lvalues, self.rvalues = self._convert_for_datetime(lvalues, - rvalues) - - def _validate_timedelta(self, name): - # assumes self.is_timedelta_lhs - - if self.is_integer_rhs or self.is_floating_rhs: - # timedelta and integer mul/div - self._check_timedelta_with_numeric(name) - elif self.is_timedelta_rhs or self.is_offset_rhs: - # 2 timedeltas - if name not in ('__div__', '__rdiv__', '__truediv__', - '__rtruediv__', '__add__', '__radd__', '__sub__', - '__rsub__', '__floordiv__', '__rfloordiv__'): - raise TypeError("can only operate on a timedeltas for addition" - ", subtraction, and division, but the operator" - " [{name}] was passed".format(name=name)) - elif self.is_datetime_rhs: - if name not in ('__add__', '__radd__', '__rsub__'): - raise TypeError("can only operate on a timedelta/DateOffset " - "with a rhs of a datetime for addition, " - "but the operator [{name}] was passed" - .format(name=name)) - else: - raise TypeError('cannot operate on a series without a rhs ' - 'of a series/ndarray of type datetime64[ns] ' - 'or a timedelta') - - def _validate(self, lvalues, rvalues, name): - return self._validate_timedelta(name) - - def _check_timedelta_with_numeric(self, name): - if name not in ('__div__', '__truediv__', '__mul__', '__rmul__'): - raise TypeError("can only operate on a timedelta and an " - "integer or a float for division and " - "multiplication, but the operator [{name}] " - "was passed".format(name=name)) - - def _convert_to_array(self, values, name=None, other=None): - """converts values to ndarray""" - from pandas.core.tools.timedeltas import to_timedelta - - ovalues = values - supplied_dtype = None - if not is_list_like(values): - values = np.array([values]) - - # if this is a Series that contains relevant dtype info, then use this - # instead of the inferred type; this avoids coercing Series([NaT], - # dtype='datetime64[ns]') to Series([NaT], dtype='timedelta64[ns]') - elif (isinstance(values, (pd.Series, ABCDatetimeIndex)) and - (is_timedelta64_dtype(values) or is_datetime64_dtype(values))): - supplied_dtype = values.dtype - - inferred_type = lib.infer_dtype(values) - if (inferred_type in ('datetime64', 'datetime', 'date', 'time') or - is_datetimetz(inferred_type)): - # if we have a other of timedelta, but use pd.NaT here we - # we are in the wrong path - if (supplied_dtype is None and other is not None and - (other.dtype in ('timedelta64[ns]', 'datetime64[ns]')) and - isna(values).all()): - values = np.empty(values.shape, dtype='timedelta64[ns]') - values[:] = iNaT - - elif isinstance(values, ABCDatetimeIndex): - # a datelike - pass - elif isinstance(ovalues, datetime.datetime): - # datetime scalar - values = pd.DatetimeIndex(values) - # datetime array with tz - elif is_datetimetz(values): - if isinstance(values, ABCSeries): - values = values._values - elif not (isinstance(values, (np.ndarray, ABCSeries)) and - is_datetime64_dtype(values)): - values = libts.array_to_datetime(values) - elif (is_datetime64_dtype(values) and - not is_datetime64_ns_dtype(values)): - # GH#7996 e.g. np.datetime64('2013-01-01') is datetime64[D] - values = values.astype('datetime64[ns]') - - elif inferred_type in ('timedelta', 'timedelta64'): - # have a timedelta, convert to to ns here - values = to_timedelta(values, errors='coerce', box=False) - if isinstance(other, ABCDatetimeIndex): - # GH#13905 - # Defer to DatetimeIndex/TimedeltaIndex operations where - # timezones are handled carefully. - values = pd.TimedeltaIndex(values) - elif inferred_type == 'integer': - # py3 compat where dtype is 'm' but is an integer - if values.dtype.kind == 'm': - values = values.astype('timedelta64[ns]') - elif isinstance(values, pd.PeriodIndex): - values = values.to_timestamp().to_series() - elif name not in ('__truediv__', '__div__', '__mul__', '__rmul__'): - raise TypeError("incompatible type for a datetime/timedelta " - "operation [{name}]".format(name=name)) - elif inferred_type == 'floating': - if (isna(values).all() and - name in ('__add__', '__radd__', '__sub__', '__rsub__')): - values = np.empty(values.shape, dtype=other.dtype) - values[:] = iNaT - return values - elif is_offsetlike(values): - return values - else: - raise TypeError("incompatible type [{dtype}] for a " - "datetime/timedelta operation" - .format(dtype=np.array(values).dtype)) - - return values - - def _convert_for_datetime(self, lvalues, rvalues): - from pandas.core.tools.timedeltas import to_timedelta - - mask = isna(lvalues) | isna(rvalues) - - # datetimes require views - if self.is_datetime_rhs: - - # datetime subtraction means timedelta - if self.is_datetime64tz_rhs: - self.dtype = rvalues.dtype - else: - self.dtype = 'datetime64[ns]' - - # if adding single offset try vectorized path - # in DatetimeIndex; otherwise elementwise apply - def _offset(lvalues, rvalues): - if len(lvalues) == 1: - rvalues = pd.DatetimeIndex(rvalues) - lvalues = lvalues[0] - else: - warnings.warn("Adding/subtracting array of DateOffsets to " - "Series not vectorized", PerformanceWarning) - rvalues = rvalues.astype('O') - - # pass thru on the na_op - self.na_op = lambda x, y: getattr(x, self.name)(y) - return lvalues, rvalues - - if self.is_offset_rhs: - rvalues, lvalues = _offset(rvalues, lvalues) - else: - - # with tz, convert to UTC - if self.is_datetime64tz_rhs: - rvalues = rvalues.tz_convert('UTC').tz_localize(None) - - lvalues = lvalues.view(np.int64) - rvalues = rvalues.view(np.int64) - - # otherwise it's a timedelta - else: - - self.dtype = 'timedelta64[ns]' - - # convert Tick DateOffset to underlying delta - if self.is_offset_rhs: - rvalues = to_timedelta(rvalues, box=False) - - lvalues = lvalues.astype(np.int64) - if not self.is_floating_rhs: - rvalues = rvalues.astype(np.int64) - - # time delta division -> unit less - # integer gets converted to timedelta in np < 1.6 - if ((self.is_timedelta_lhs and self.is_timedelta_rhs) and - not self.is_integer_rhs and - self.name in ('__div__', '__rdiv__', - '__truediv__', '__rtruediv__', - '__floordiv__', '__rfloordiv__')): - self.dtype = 'float64' - self.fill_value = np.nan - lvalues = lvalues.astype(np.float64) - rvalues = rvalues.astype(np.float64) - - # if we need to mask the results - if mask.any(): - - def f(x): - - # datetime64[ns]/timedelta64[ns] masking - try: - x = np.array(x, dtype=self.dtype) - except TypeError: - x = np.array(x, dtype='datetime64[ns]') - - np.putmask(x, mask, self.fill_value) - return x - - self.wrap_results = f - - return lvalues, rvalues - - def _align_method_SERIES(left, right, align_asobject=False): """ align lhs and rhs Series """ @@ -678,26 +396,22 @@ def wrapper(left, right, name=name, na_op=na_op): index=left.index, name=res_name, dtype=result.dtype) - converted = _Op.get_op(left, right, name, na_op) - - lvalues, rvalues = converted.lvalues, converted.rvalues - dtype = converted.dtype - wrap_results = converted.wrap_results - na_op = converted.na_op + elif is_timedelta64_dtype(left): + result = dispatch_to_index_op(op, left, right, pd.TimedeltaIndex) + res_name = _get_series_op_result_name(left, right) + return construct_result(left, result, + index=left.index, name=res_name, + dtype=result.dtype) + lvalues = left.values + rvalues = right if isinstance(rvalues, ABCSeries): - lvalues = getattr(lvalues, 'values', lvalues) rvalues = getattr(rvalues, 'values', rvalues) - # _Op aligns left and right - else: - if (hasattr(lvalues, 'values') and - not isinstance(lvalues, ABCDatetimeIndex)): - lvalues = lvalues.values - result = wrap_results(safe_na_op(lvalues, rvalues)) + result = safe_na_op(lvalues, rvalues) res_name = _get_series_op_result_name(left, right) return construct_result(left, result, - index=left.index, name=res_name, dtype=dtype) + index=left.index, name=res_name, dtype=None) return wrapper diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 962de91ed0581..44f48f3ea9833 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -652,14 +652,14 @@ def test_timedelta_ops_with_missing_values(self): actual = -timedelta_NaT + s1 tm.assert_series_equal(actual, sn) - actual = s1 + NA - tm.assert_series_equal(actual, sn) - actual = NA + s1 - tm.assert_series_equal(actual, sn) - actual = s1 - NA - tm.assert_series_equal(actual, sn) - actual = -NA + s1 - tm.assert_series_equal(actual, sn) + with pytest.raises(TypeError): + s1 + np.nan + with pytest.raises(TypeError): + np.nan + s1 + with pytest.raises(TypeError): + s1 - np.nan + with pytest.raises(TypeError): + -np.nan + s1 actual = s1 + pd.NaT tm.assert_series_equal(actual, sn) diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index c06435d4b8c42..7505e6b0cec3b 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -1108,7 +1108,7 @@ def test_operators_timedelta64_with_timedelta_invalid(self, scalar_td): # check that we are getting a TypeError # with 'operate' (from core/ops.py) for the ops that are not # defined - pattern = 'operate|unsupported|cannot' + pattern = 'operate|unsupported|cannot|not supported' with tm.assert_raises_regex(TypeError, pattern): td1 * scalar_td with tm.assert_raises_regex(TypeError, pattern):
We're finally able to remove `ops._Op` and `ops._TimeOp`. This PR makes one final compatibility fix: ATM: ``` tdi = pd.TimedeltaIndex(['1 Day']) ser = pd.Series(tdi) >>> tdi + np.nan TypeError: unsupported operand type(s) for +: 'TimedeltaIndex' and 'float' >>> ser + np.nan 0 NaT dtype: timedelta64[ns] ``` Same deal for subtracting np.nan. See #19274. This makes the TimedeltaIndex canonical and updates the appropriate Series tests. - [x] closes #19274 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19277
2018-01-17T06:39:10Z
2018-01-19T11:32:36Z
2018-01-19T11:32:36Z
2018-01-19T16:06:02Z
Fix tzawareness_compat for DatetimeIndex comparisons with NaT
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 853d5cee11cd1..e8923a4c0bc20 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -431,7 +431,7 @@ Conversion - - Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`) - Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`) - +- Bug in comparison of timezone-aware :class:`DatetimeIndex` against ``NaT`` incorrectly raising ``TypeError`` (:issue:`19276`) Indexing ^^^^^^^^ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index d83d2d2c93ec8..978674b9d2a8d 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -666,7 +666,10 @@ def _assert_tzawareness_compat(self, other): if is_datetime64tz_dtype(other): # Get tzinfo from Series dtype other_tz = other.dtype.tz - if self.tz is None: + if other is libts.NaT: + # pd.NaT quacks both aware and naive + pass + elif self.tz is None: if other_tz is not None: raise TypeError('Cannot compare tz-naive and tz-aware ' 'datetime-like objects.') diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 41cd654cf22b9..e3ebb8769db02 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -286,6 +286,21 @@ def test_comparison_tzawareness_compat(self, op): with pytest.raises(TypeError): op(dz, ts) + @pytest.mark.parametrize('op', [operator.eq, operator.ne, + operator.gt, operator.ge, + operator.lt, operator.le]) + def test_nat_comparison_tzawareness(self, op): + # GH#19276 + # tzaware DatetimeIndex should not raise when compared to NaT + dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT, + '2014-05-01', '2014-07-01']) + expected = np.array([op == operator.ne] * len(dti)) + result = op(dti, pd.NaT) + tm.assert_numpy_array_equal(result, expected) + + result = op(dti.tz_localize('US/Pacific'), pd.NaT) + tm.assert_numpy_array_equal(result, expected) + def test_comparisons_coverage(self): rng = date_range('1/1/2000', periods=10)
ATM `pd.date_range('2016-01-01', periods=1, tz='US/Pacific') < pd.NaT` raises `TypeError` because `NaT` is tz-naive. This fixes that. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19276
2018-01-17T06:30:29Z
2018-01-18T00:32:46Z
2018-01-18T00:32:46Z
2018-02-11T21:59:05Z
TST: Clean up DataFrame.to_csv compression tests
diff --git a/pandas/tests/conftest.py b/pandas/tests/conftest.py new file mode 100644 index 0000000000000..8f5d963927f60 --- /dev/null +++ b/pandas/tests/conftest.py @@ -0,0 +1,11 @@ +import pytest +import pandas.util._test_decorators as td + + +@pytest.fixture(params=[None, 'gzip', 'bz2', + pytest.param('xz', marks=td.skip_if_no_lzma)]) +def compression(request): + """ + Fixture for trying common compression types in compression tests + """ + return request.param diff --git a/pandas/tests/frame/test_to_csv.py b/pandas/tests/frame/test_to_csv.py index 0ca25735fc03f..3fd07869c4159 100644 --- a/pandas/tests/frame/test_to_csv.py +++ b/pandas/tests/frame/test_to_csv.py @@ -21,7 +21,6 @@ ensure_clean, makeCustomDataframe as mkdf) import pandas.util.testing as tm -import pandas.util._test_decorators as td from pandas.tests.frame.common import TestData @@ -920,73 +919,28 @@ def test_to_csv_path_is_none(self): recons = pd.read_csv(StringIO(csv_str), index_col=0) assert_frame_equal(self.frame, recons) - def test_to_csv_compression_gzip(self): - # GH7615 - # use the compression kw in to_csv - df = DataFrame([[0.123456, 0.234567, 0.567567], - [12.32112, 123123.2, 321321.2]], - index=['A', 'B'], columns=['X', 'Y', 'Z']) - - with ensure_clean() as filename: - - df.to_csv(filename, compression="gzip") - - # test the round trip - to_csv -> read_csv - rs = read_csv(filename, compression="gzip", index_col=0) - assert_frame_equal(df, rs) - - # explicitly make sure file is gziped - import gzip - f = gzip.open(filename, 'rb') - text = f.read().decode('utf8') - f.close() - for col in df.columns: - assert col in text + def test_to_csv_compression(self, compression): - def test_to_csv_compression_bz2(self): - # GH7615 - # use the compression kw in to_csv df = DataFrame([[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], index=['A', 'B'], columns=['X', 'Y', 'Z']) with ensure_clean() as filename: - df.to_csv(filename, compression="bz2") + df.to_csv(filename, compression=compression) # test the round trip - to_csv -> read_csv - rs = read_csv(filename, compression="bz2", index_col=0) + rs = read_csv(filename, compression=compression, index_col=0) assert_frame_equal(df, rs) - # explicitly make sure file is bz2ed - import bz2 - f = bz2.BZ2File(filename, 'rb') - text = f.read().decode('utf8') - f.close() - for col in df.columns: - assert col in text - - @td.skip_if_no_lzma - def test_to_csv_compression_xz(self): - # GH11852 - # use the compression kw in to_csv - df = DataFrame([[0.123456, 0.234567, 0.567567], - [12.32112, 123123.2, 321321.2]], - index=['A', 'B'], columns=['X', 'Y', 'Z']) - - with ensure_clean() as filename: - - df.to_csv(filename, compression="xz") - - # test the round trip - to_csv -> read_csv - rs = read_csv(filename, compression="xz", index_col=0) - assert_frame_equal(df, rs) + # explicitly make sure file is compressed + with tm.decompress_file(filename, compression) as fh: + text = fh.read().decode('utf8') + for col in df.columns: + assert col in text - # explicitly make sure file is xzipped - lzma = compat.import_lzma() - f = lzma.open(filename, 'rb') - assert_frame_equal(df, read_csv(f, index_col=0)) - f.close() + with tm.decompress_file(filename, compression) as fh: + assert_frame_equal(df, read_csv(fh, index_col=0)) def test_to_csv_compression_value_error(self): # GH7615 diff --git a/pandas/tests/io/parser/test_network.py b/pandas/tests/io/parser/test_network.py index 2d0a23d71a2e6..10f6cef04b593 100644 --- a/pandas/tests/io/parser/test_network.py +++ b/pandas/tests/io/parser/test_network.py @@ -15,15 +15,17 @@ @pytest.mark.network @pytest.mark.parametrize( - "compression,extension", [ + "compress_type, extension", [ ('gzip', '.gz'), ('bz2', '.bz2'), ('zip', '.zip'), pytest.param('xz', '.xz', marks=td.skip_if_no_lzma) ] ) @pytest.mark.parametrize('mode', ['explicit', 'infer']) @pytest.mark.parametrize('engine', ['python', 'c']) -def test_compressed_urls(salaries_table, compression, extension, mode, engine): - check_compressed_urls(salaries_table, compression, extension, mode, engine) +def test_compressed_urls(salaries_table, compress_type, extension, mode, + engine): + check_compressed_urls(salaries_table, compress_type, extension, mode, + engine) @tm.network diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index 99dcc9272bf11..ec26716f79446 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -14,7 +14,6 @@ from pandas.util.testing import (assert_series_equal, assert_almost_equal, assert_frame_equal, ensure_clean) import pandas.util.testing as tm -import pandas.util._test_decorators as td from .common import TestData @@ -139,12 +138,6 @@ def test_to_csv_path_is_none(self): csv_str = s.to_csv(path=None) assert isinstance(csv_str, str) - @pytest.mark.parametrize('compression', [ - None, - 'gzip', - 'bz2', - pytest.param('xz', marks=td.skip_if_no_lzma), - ]) def test_to_csv_compression(self, compression): s = Series([0.123456, 0.234567, 0.567567], index=['A', 'B', 'C'], @@ -160,14 +153,13 @@ def test_to_csv_compression(self, compression): assert_series_equal(s, rs) # explicitly ensure file was compressed - f = tm.decompress_file(filename, compression=compression) - text = f.read().decode('utf8') - assert s.name in text - f.close() - - f = tm.decompress_file(filename, compression=compression) - assert_series_equal(s, pd.read_csv(f, index_col=0, squeeze=True)) - f.close() + with tm.decompress_file(filename, compression=compression) as fh: + text = fh.read().decode('utf8') + assert s.name in text + + with tm.decompress_file(filename, compression=compression) as fh: + assert_series_equal(s, pd.read_csv(fh, + index_col=0, squeeze=True)) class TestSeriesIO(TestData): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 1bea25a16ca1e..3567754371da3 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -162,6 +162,7 @@ def round_trip_localpath(writer, reader, path=None): return obj +@contextmanager def decompress_file(path, compression): """ Open a compressed file and return a file object @@ -194,7 +195,7 @@ def decompress_file(path, compression): msg = 'Unrecognized compression type: {}'.format(compression) raise ValueError(msg) - return f + yield f def assert_almost_equal(left, right, check_exact=False,
xref #19226 Parametrized some of the compression tests in ``tests/frame/test_to_csv.py`` and used the new ``decompress_file`` function.
https://api.github.com/repos/pandas-dev/pandas/pulls/19273
2018-01-16T19:18:48Z
2018-01-21T15:29:33Z
2018-01-21T15:29:33Z
2018-01-22T13:58:11Z
REF: Move pandas.core.categorical
diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index cf63b5083885e..75cf0a88e37c1 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -55,7 +55,7 @@ from pandas.core.dtypes.common import ( is_bool_dtype, is_object_dtype, is_datetime64_dtype, pandas_dtype) -from pandas.core.categorical import Categorical +from pandas.core.arrays import Categorical from pandas.core.dtypes.concat import union_categoricals import pandas.io.common as com diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py index 07b34961ce25d..f651fbbf56316 100644 --- a/pandas/compat/pickle_compat.py +++ b/pandas/compat/pickle_compat.py @@ -108,7 +108,11 @@ def load_reduce(self): ('pandas.tseries.index', 'DatetimeIndex'): ('pandas.core.indexes.datetimes', 'DatetimeIndex'), ('pandas.tseries.period', 'PeriodIndex'): - ('pandas.core.indexes.period', 'PeriodIndex') + ('pandas.core.indexes.period', 'PeriodIndex'), + + # 19269, arrays moving + ('pandas.core.categorical', 'Categorical'): + ('pandas.core.arrays', 'Categorical'), } diff --git a/pandas/core/api.py b/pandas/core/api.py index b228a97c99074..aa37ddffa1156 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -6,7 +6,7 @@ from pandas.core.algorithms import factorize, unique, value_counts from pandas.core.dtypes.missing import isna, isnull, notna, notnull -from pandas.core.categorical import Categorical +from pandas.core.arrays import Categorical from pandas.core.groupby import Grouper from pandas.io.formats.format import set_eng_float_format from pandas.core.index import (Index, CategoricalIndex, Int64Index, diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py new file mode 100644 index 0000000000000..ee32b12f0e712 --- /dev/null +++ b/pandas/core/arrays/__init__.py @@ -0,0 +1 @@ +from .categorical import Categorical # noqa diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py new file mode 100644 index 0000000000000..708f903cd73cb --- /dev/null +++ b/pandas/core/arrays/categorical.py @@ -0,0 +1,2331 @@ +# pylint: disable=E1101,W0232 + +import numpy as np +from warnings import warn +import types + +from pandas import compat +from pandas.compat import u, lzip +from pandas._libs import lib, algos as libalgos + +from pandas.core.dtypes.generic import ( + ABCSeries, ABCIndexClass, ABCCategoricalIndex) +from pandas.core.dtypes.missing import isna, notna +from pandas.core.dtypes.cast import ( + maybe_infer_to_datetimelike, + coerce_indexer_dtype) +from pandas.core.dtypes.dtypes import CategoricalDtype +from pandas.core.dtypes.common import ( + _ensure_int64, + _ensure_object, + _ensure_platform_int, + is_dtype_equal, + is_datetimelike, + is_datetime64_dtype, + is_timedelta64_dtype, + is_categorical, + is_categorical_dtype, + is_list_like, is_sequence, + is_scalar, + is_dict_like) +from pandas.core.common import is_null_slice, _maybe_box_datetimelike + +from pandas.core.algorithms import factorize, take_1d, unique1d +from pandas.core.accessor import PandasDelegate +from pandas.core.base import (PandasObject, + NoNewAttributesMixin, _shared_docs) +import pandas.core.common as com +from pandas.core.missing import interpolate_2d +from pandas.compat.numpy import function as nv +from pandas.util._decorators import ( + Appender, cache_readonly, deprecate_kwarg, Substitution) + +from pandas.io.formats.terminal import get_terminal_size +from pandas.util._validators import validate_bool_kwarg +from pandas.core.config import get_option + + +def _cat_compare_op(op): + def f(self, other): + # On python2, you can usually compare any type to any type, and + # Categoricals can be seen as a custom type, but having different + # results depending whether categories are the same or not is kind of + # insane, so be a bit stricter here and use the python3 idea of + # comparing only things of equal type. + if not self.ordered: + if op in ['__lt__', '__gt__', '__le__', '__ge__']: + raise TypeError("Unordered Categoricals can only compare " + "equality or not") + if isinstance(other, Categorical): + # Two Categoricals can only be be compared if the categories are + # the same (maybe up to ordering, depending on ordered) + + msg = ("Categoricals can only be compared if " + "'categories' are the same.") + if len(self.categories) != len(other.categories): + raise TypeError(msg + " Categories are different lengths") + elif (self.ordered and not (self.categories == + other.categories).all()): + raise TypeError(msg) + elif not set(self.categories) == set(other.categories): + raise TypeError(msg) + + if not (self.ordered == other.ordered): + raise TypeError("Categoricals can only be compared if " + "'ordered' is the same") + if not self.ordered and not self.categories.equals( + other.categories): + # both unordered and different order + other_codes = _get_codes_for_values(other, self.categories) + else: + other_codes = other._codes + + na_mask = (self._codes == -1) | (other_codes == -1) + f = getattr(self._codes, op) + ret = f(other_codes) + if na_mask.any(): + # In other series, the leads to False, so do that here too + ret[na_mask] = False + return ret + + # Numpy-1.9 and earlier may convert a scalar to a zerodim array during + # comparison operation when second arg has higher priority, e.g. + # + # cat[0] < cat + # + # With cat[0], for example, being ``np.int64(1)`` by the time it gets + # into this function would become ``np.array(1)``. + other = lib.item_from_zerodim(other) + if is_scalar(other): + if other in self.categories: + i = self.categories.get_loc(other) + return getattr(self._codes, op)(i) + else: + if op == '__eq__': + return np.repeat(False, len(self)) + elif op == '__ne__': + return np.repeat(True, len(self)) + else: + msg = ("Cannot compare a Categorical for op {op} with a " + "scalar, which is not a category.") + raise TypeError(msg.format(op=op)) + else: + + # allow categorical vs object dtype array comparisons for equality + # these are only positional comparisons + if op in ['__eq__', '__ne__']: + return getattr(np.array(self), op)(np.array(other)) + + msg = ("Cannot compare a Categorical for op {op} with type {typ}." + "\nIf you want to compare values, use 'np.asarray(cat) " + "<op> other'.") + raise TypeError(msg.format(op=op, typ=type(other))) + + f.__name__ = op + + return f + + +def _maybe_to_categorical(array): + """ + Coerce to a categorical if a series is given. + + Internal use ONLY. + """ + if isinstance(array, (ABCSeries, ABCCategoricalIndex)): + return array._values + elif isinstance(array, np.ndarray): + return Categorical(array) + return array + + +_codes_doc = """The category codes of this categorical. + +Level codes are an array if integer which are the positions of the real +values in the categories array. + +There is not setter, use the other categorical methods and the normal item +setter to change values in the categorical. +""" + + +class Categorical(PandasObject): + """ + Represents a categorical variable in classic R / S-plus fashion + + `Categoricals` can only take on only a limited, and usually fixed, number + of possible values (`categories`). In contrast to statistical categorical + variables, a `Categorical` might have an order, but numerical operations + (additions, divisions, ...) are not possible. + + All values of the `Categorical` are either in `categories` or `np.nan`. + Assigning values outside of `categories` will raise a `ValueError`. Order + is defined by the order of the `categories`, not lexical order of the + values. + + Parameters + ---------- + values : list-like + The values of the categorical. If categories are given, values not in + categories will be replaced with NaN. + categories : Index-like (unique), optional + The unique categories for this categorical. If not given, the + categories are assumed to be the unique values of values. + ordered : boolean, (default False) + Whether or not this categorical is treated as a ordered categorical. + If not given, the resulting categorical will not be ordered. + dtype : CategoricalDtype + An instance of ``CategoricalDtype`` to use for this categorical + + .. versionadded:: 0.21.0 + + Attributes + ---------- + categories : Index + The categories of this categorical + codes : ndarray + The codes (integer positions, which point to the categories) of this + categorical, read only. + ordered : boolean + Whether or not this Categorical is ordered. + dtype : CategoricalDtype + The instance of ``CategoricalDtype`` storing the ``categories`` + and ``ordered``. + + .. versionadded:: 0.21.0 + + Methods + ------- + from_codes + __array__ + + Raises + ------ + ValueError + If the categories do not validate. + TypeError + If an explicit ``ordered=True`` is given but no `categories` and the + `values` are not sortable. + + Examples + -------- + >>> pd.Categorical([1, 2, 3, 1, 2, 3]) + [1, 2, 3, 1, 2, 3] + Categories (3, int64): [1, 2, 3] + + >>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c']) + [a, b, c, a, b, c] + Categories (3, object): [a, b, c] + + Ordered `Categoricals` can be sorted according to the custom order + of the categories and can have a min and max value. + + >>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True, + ... categories=['c', 'b', 'a']) + >>> c + [a, b, c, a, b, c] + Categories (3, object): [c < b < a] + >>> c.min() + 'c' + + Notes + ----- + See the `user guide + <http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more. + + See also + -------- + pandas.api.types.CategoricalDtype : Type for categorical data + CategoricalIndex : An Index with an underlying ``Categorical`` + """ + + # For comparisons, so that numpy uses our implementation if the compare + # ops, which raise + __array_priority__ = 1000 + _dtype = CategoricalDtype() + _deprecations = frozenset(['labels']) + _typ = 'categorical' + + def __init__(self, values, categories=None, ordered=None, dtype=None, + fastpath=False): + + # Ways of specifying the dtype (prioritized ordered) + # 1. dtype is a CategoricalDtype + # a.) with known categories, use dtype.categories + # b.) else with Categorical values, use values.dtype + # c.) else, infer from values + # d.) specifying dtype=CategoricalDtype and categories is an error + # 2. dtype is a string 'category' + # a.) use categories, ordered + # b.) use values.dtype + # c.) infer from values + # 3. dtype is None + # a.) use categories, ordered + # b.) use values.dtype + # c.) infer from values + + if dtype is not None: + # The dtype argument takes precedence over values.dtype (if any) + if isinstance(dtype, compat.string_types): + if dtype == 'category': + dtype = CategoricalDtype(categories, ordered) + else: + msg = "Unknown `dtype` {dtype}" + raise ValueError(msg.format(dtype=dtype)) + elif categories is not None or ordered is not None: + raise ValueError("Cannot specify both `dtype` and `categories`" + " or `ordered`.") + + categories = dtype.categories + ordered = dtype.ordered + + elif is_categorical(values): + # If no "dtype" was passed, use the one from "values", but honor + # the "ordered" and "categories" arguments + dtype = values.dtype._from_categorical_dtype(values.dtype, + categories, ordered) + else: + # If dtype=None and values is not categorical, create a new dtype + dtype = CategoricalDtype(categories, ordered) + + # At this point, dtype is always a CategoricalDtype + # if dtype.categories is None, we are inferring + + if fastpath: + self._codes = coerce_indexer_dtype(values, categories) + self._dtype = dtype + return + + # null_mask indicates missing values we want to exclude from inference. + # This means: only missing values in list-likes (not arrays/ndframes). + null_mask = np.array(False) + + # sanitize input + if is_categorical_dtype(values): + if dtype.categories is None: + dtype = CategoricalDtype(values.categories, dtype.ordered) + + elif not isinstance(values, (ABCIndexClass, ABCSeries)): + # _sanitize_array coerces np.nan to a string under certain versions + # of numpy + values = maybe_infer_to_datetimelike(values, convert_dates=True) + if not isinstance(values, np.ndarray): + values = _convert_to_list_like(values) + from pandas.core.series import _sanitize_array + # By convention, empty lists result in object dtype: + if len(values) == 0: + sanitize_dtype = 'object' + else: + sanitize_dtype = None + null_mask = isna(values) + if null_mask.any(): + values = [values[idx] for idx in np.where(~null_mask)[0]] + values = _sanitize_array(values, None, dtype=sanitize_dtype) + + if dtype.categories is None: + try: + codes, categories = factorize(values, sort=True) + except TypeError: + codes, categories = factorize(values, sort=False) + if dtype.ordered: + # raise, as we don't have a sortable data structure and so + # the user should give us one by specifying categories + raise TypeError("'values' is not ordered, please " + "explicitly specify the categories order " + "by passing in a categories argument.") + except ValueError: + + # FIXME + raise NotImplementedError("> 1 ndim Categorical are not " + "supported at this time") + + # we're inferring from values + dtype = CategoricalDtype(categories, dtype.ordered) + + elif is_categorical_dtype(values): + old_codes = (values.cat.codes if isinstance(values, ABCSeries) + else values.codes) + codes = _recode_for_categories(old_codes, values.dtype.categories, + dtype.categories) + + else: + codes = _get_codes_for_values(values, dtype.categories) + + if null_mask.any(): + # Reinsert -1 placeholders for previously removed missing values + full_codes = - np.ones(null_mask.shape, dtype=codes.dtype) + full_codes[~null_mask] = codes + codes = full_codes + + self._dtype = dtype + self._codes = coerce_indexer_dtype(codes, dtype.categories) + + @property + def categories(self): + """The categories of this categorical. + + Setting assigns new values to each category (effectively a rename of + each individual category). + + The assigned value has to be a list-like object. All items must be + unique and the number of items in the new categories must be the same + as the number of items in the old categories. + + Assigning to `categories` is a inplace operation! + + Raises + ------ + ValueError + If the new categories do not validate as categories or if the + number of new categories is unequal the number of old categories + + See also + -------- + rename_categories + reorder_categories + add_categories + remove_categories + remove_unused_categories + set_categories + """ + return self.dtype.categories + + @categories.setter + def categories(self, categories): + new_dtype = CategoricalDtype(categories, ordered=self.ordered) + if (self.dtype.categories is not None and + len(self.dtype.categories) != len(new_dtype.categories)): + raise ValueError("new categories need to have the same number of " + "items as the old categories!") + self._dtype = new_dtype + + @property + def ordered(self): + """Whether the categories have an ordered relationship""" + return self.dtype.ordered + + @property + def dtype(self): + """The :class:`~pandas.api.types.CategoricalDtype` for this instance""" + return self._dtype + + @property + def _constructor(self): + return Categorical + + def copy(self): + """ Copy constructor. """ + return self._constructor(values=self._codes.copy(), + categories=self.categories, + ordered=self.ordered, + fastpath=True) + + def astype(self, dtype, copy=True): + """ + Coerce this type to another dtype + + Parameters + ---------- + dtype : numpy dtype or pandas type + copy : bool, default True + By default, astype always returns a newly allocated object. + If copy is set to False and dtype is categorical, the original + object is returned. + + .. versionadded:: 0.19.0 + + """ + if is_categorical_dtype(dtype): + # GH 10696/18593 + dtype = self.dtype._update_dtype(dtype) + self = self.copy() if copy else self + if dtype == self.dtype: + return self + return self._set_dtype(dtype) + return np.array(self, dtype=dtype, copy=copy) + + @cache_readonly + def ndim(self): + """Number of dimensions of the Categorical """ + return self._codes.ndim + + @cache_readonly + def size(self): + """ return the len of myself """ + return len(self) + + @cache_readonly + def itemsize(self): + """ return the size of a single category """ + return self.categories.itemsize + + def tolist(self): + """ + Return a list of the values. + + These are each a scalar type, which is a Python scalar + (for str, int, float) or a pandas scalar + (for Timestamp/Timedelta/Interval/Period) + """ + if is_datetimelike(self.categories): + return [_maybe_box_datetimelike(x) for x in self] + return np.array(self).tolist() + + @property + def base(self): + """ compat, we are always our own object """ + return None + + @classmethod + def _from_inferred_categories(cls, inferred_categories, inferred_codes, + dtype): + """Construct a Categorical from inferred values + + For inferred categories (`dtype` is None) the categories are sorted. + For explicit `dtype`, the `inferred_categories` are cast to the + appropriate type. + + Parameters + ---------- + + inferred_categories : Index + inferred_codes : Index + dtype : CategoricalDtype or 'category' + + Returns + ------- + Categorical + """ + from pandas import Index, to_numeric, to_datetime, to_timedelta + + cats = Index(inferred_categories) + + known_categories = (isinstance(dtype, CategoricalDtype) and + dtype.categories is not None) + + if known_categories: + # Convert to a specialzed type with `dtype` if specified + if dtype.categories.is_numeric(): + cats = to_numeric(inferred_categories, errors='coerce') + elif is_datetime64_dtype(dtype.categories): + cats = to_datetime(inferred_categories, errors='coerce') + elif is_timedelta64_dtype(dtype.categories): + cats = to_timedelta(inferred_categories, errors='coerce') + + if known_categories: + # recode from observation oder to dtype.categories order + categories = dtype.categories + codes = _recode_for_categories(inferred_codes, cats, categories) + elif not cats.is_monotonic_increasing: + # sort categories and recode for unknown categories + unsorted = cats.copy() + categories = cats.sort_values() + codes = _recode_for_categories(inferred_codes, unsorted, + categories) + dtype = CategoricalDtype(categories, ordered=False) + else: + dtype = CategoricalDtype(cats, ordered=False) + codes = inferred_codes + + return cls(codes, dtype=dtype, fastpath=True) + + @classmethod + def from_codes(cls, codes, categories, ordered=False): + """ + Make a Categorical type from codes and categories arrays. + + This constructor is useful if you already have codes and categories and + so do not need the (computation intensive) factorization step, which is + usually done on the constructor. + + If your data does not follow this convention, please use the normal + constructor. + + Parameters + ---------- + codes : array-like, integers + An integer array, where each integer points to a category in + categories or -1 for NaN + categories : index-like + The categories for the categorical. Items need to be unique. + ordered : boolean, (default False) + Whether or not this categorical is treated as a ordered + categorical. If not given, the resulting categorical will be + unordered. + """ + try: + codes = np.asarray(codes, np.int64) + except (ValueError, TypeError): + raise ValueError( + "codes need to be convertible to an arrays of integers") + + categories = CategoricalDtype._validate_categories(categories) + + if len(codes) and (codes.max() >= len(categories) or codes.min() < -1): + raise ValueError("codes need to be between -1 and " + "len(categories)-1") + + return cls(codes, categories=categories, ordered=ordered, + fastpath=True) + + _codes = None + + def _get_codes(self): + """ Get the codes. + + Returns + ------- + codes : integer array view + A non writable view of the `codes` array. + """ + v = self._codes.view() + v.flags.writeable = False + return v + + def _set_codes(self, codes): + """ + Not settable by the user directly + """ + raise ValueError("cannot set Categorical codes directly") + + codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc) + + def _set_categories(self, categories, fastpath=False): + """ Sets new categories inplace + + Parameters + ---------- + fastpath : boolean (default: False) + Don't perform validation of the categories for uniqueness or nulls + + Examples + -------- + >>> c = Categorical(['a', 'b']) + >>> c + [a, b] + Categories (2, object): [a, b] + + >>> c._set_categories(pd.Index(['a', 'c'])) + >>> c + [a, c] + Categories (2, object): [a, c] + """ + + if fastpath: + new_dtype = CategoricalDtype._from_fastpath(categories, + self.ordered) + else: + new_dtype = CategoricalDtype(categories, ordered=self.ordered) + if (not fastpath and self.dtype.categories is not None and + len(new_dtype.categories) != len(self.dtype.categories)): + raise ValueError("new categories need to have the same number of " + "items than the old categories!") + + self._dtype = new_dtype + + def _codes_for_groupby(self, sort): + """ + If sort=False, return a copy of self, coded with categories as + returned by .unique(), followed by any categories not appearing in + the data. If sort=True, return self. + + This method is needed solely to ensure the categorical index of the + GroupBy result has categories in the order of appearance in the data + (GH-8868). + + Parameters + ---------- + sort : boolean + The value of the sort parameter groupby was called with. + + Returns + ------- + Categorical + If sort=False, the new categories are set to the order of + appearance in codes (unless ordered=True, in which case the + original order is preserved), followed by any unrepresented + categories in the original order. + """ + + # Already sorted according to self.categories; all is fine + if sort: + return self + + # sort=False should order groups in as-encountered order (GH-8868) + cat = self.unique() + + # But for groupby to work, all categories should be present, + # including those missing from the data (GH-13179), which .unique() + # above dropped + cat.add_categories( + self.categories[~self.categories.isin(cat.categories)], + inplace=True) + + return self.reorder_categories(cat.categories) + + def _set_dtype(self, dtype): + """Internal method for directly updating the CategoricalDtype + + Parameters + ---------- + dtype : CategoricalDtype + + Notes + ----- + We don't do any validation here. It's assumed that the dtype is + a (valid) instance of `CategoricalDtype`. + """ + codes = _recode_for_categories(self.codes, self.categories, + dtype.categories) + return type(self)(codes, dtype=dtype, fastpath=True) + + def set_ordered(self, value, inplace=False): + """ + Sets the ordered attribute to the boolean value + + Parameters + ---------- + value : boolean to set whether this categorical is ordered (True) or + not (False) + inplace : boolean (default: False) + Whether or not to set the ordered attribute inplace or return a copy + of this categorical with ordered set to the value + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + new_dtype = CategoricalDtype(self.categories, ordered=value) + cat = self if inplace else self.copy() + cat._dtype = new_dtype + if not inplace: + return cat + + def as_ordered(self, inplace=False): + """ + Sets the Categorical to be ordered + + Parameters + ---------- + inplace : boolean (default: False) + Whether or not to set the ordered attribute inplace or return a copy + of this categorical with ordered set to True + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + return self.set_ordered(True, inplace=inplace) + + def as_unordered(self, inplace=False): + """ + Sets the Categorical to be unordered + + Parameters + ---------- + inplace : boolean (default: False) + Whether or not to set the ordered attribute inplace or return a copy + of this categorical with ordered set to False + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + return self.set_ordered(False, inplace=inplace) + + def set_categories(self, new_categories, ordered=None, rename=False, + inplace=False): + """ Sets the categories to the specified new_categories. + + `new_categories` can include new categories (which will result in + unused categories) or remove old categories (which results in values + set to NaN). If `rename==True`, the categories will simple be renamed + (less or more items than in old categories will result in values set to + NaN or in unused categories respectively). + + This method can be used to perform more than one action of adding, + removing, and reordering simultaneously and is therefore faster than + performing the individual steps via the more specialised methods. + + On the other hand this methods does not do checks (e.g., whether the + old categories are included in the new categories on a reorder), which + can result in surprising changes, for example when using special string + dtypes on python3, which does not considers a S1 string equal to a + single char python string. + + Raises + ------ + ValueError + If new_categories does not validate as categories + + Parameters + ---------- + new_categories : Index-like + The categories in new order. + ordered : boolean, (default: False) + Whether or not the categorical is treated as a ordered categorical. + If not given, do not change the ordered information. + rename : boolean (default: False) + Whether or not the new_categories should be considered as a rename + of the old categories or as reordered categories. + inplace : boolean (default: False) + Whether or not to reorder the categories inplace or return a copy of + this categorical with reordered categories. + + Returns + ------- + cat : Categorical with reordered categories or None if inplace. + + See also + -------- + rename_categories + reorder_categories + add_categories + remove_categories + remove_unused_categories + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + if ordered is None: + ordered = self.dtype.ordered + new_dtype = CategoricalDtype(new_categories, ordered=ordered) + + cat = self if inplace else self.copy() + if rename: + if (cat.dtype.categories is not None and + len(new_dtype.categories) < len(cat.dtype.categories)): + # remove all _codes which are larger and set to -1/NaN + self._codes[self._codes >= len(new_dtype.categories)] = -1 + else: + codes = _recode_for_categories(self.codes, self.categories, + new_dtype.categories) + cat._codes = codes + cat._dtype = new_dtype + + if not inplace: + return cat + + def rename_categories(self, new_categories, inplace=False): + """ Renames categories. + + Raises + ------ + ValueError + If new categories are list-like and do not have the same number of + items than the current categories or do not validate as categories + + Parameters + ---------- + new_categories : list-like, dict-like or callable + + * list-like: all items must be unique and the number of items in + the new categories must match the existing number of categories. + + * dict-like: specifies a mapping from + old categories to new. Categories not contained in the mapping + are passed through and extra categories in the mapping are + ignored. + + .. versionadded:: 0.21.0 + + * callable : a callable that is called on all items in the old + categories and whose return values comprise the new categories. + + .. versionadded:: 0.23.0 + + .. warning:: + + Currently, Series are considered list like. In a future version + of pandas they'll be considered dict-like. + + inplace : boolean (default: False) + Whether or not to rename the categories inplace or return a copy of + this categorical with renamed categories. + + Returns + ------- + cat : Categorical or None + With ``inplace=False``, the new categorical is returned. + With ``inplace=True``, there is no return value. + + See also + -------- + reorder_categories + add_categories + remove_categories + remove_unused_categories + set_categories + + Examples + -------- + >>> c = Categorical(['a', 'a', 'b']) + >>> c.rename_categories([0, 1]) + [0, 0, 1] + Categories (2, int64): [0, 1] + + For dict-like ``new_categories``, extra keys are ignored and + categories not in the dictionary are passed through + + >>> c.rename_categories({'a': 'A', 'c': 'C'}) + [A, A, b] + Categories (2, object): [A, b] + + You may also provide a callable to create the new categories + + >>> c.rename_categories(lambda x: x.upper()) + [A, A, B] + Categories (2, object): [A, B] + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + cat = self if inplace else self.copy() + + if isinstance(new_categories, ABCSeries): + msg = ("Treating Series 'new_categories' as a list-like and using " + "the values. In a future version, 'rename_categories' will " + "treat Series like a dictionary.\n" + "For dict-like, use 'new_categories.to_dict()'\n" + "For list-like, use 'new_categories.values'.") + warn(msg, FutureWarning, stacklevel=2) + new_categories = list(new_categories) + + if is_dict_like(new_categories): + cat.categories = [new_categories.get(item, item) + for item in cat.categories] + elif callable(new_categories): + cat.categories = [new_categories(item) for item in cat.categories] + else: + cat.categories = new_categories + if not inplace: + return cat + + def reorder_categories(self, new_categories, ordered=None, inplace=False): + """ Reorders categories as specified in new_categories. + + `new_categories` need to include all old categories and no new category + items. + + Raises + ------ + ValueError + If the new categories do not contain all old category items or any + new ones + + Parameters + ---------- + new_categories : Index-like + The categories in new order. + ordered : boolean, optional + Whether or not the categorical is treated as a ordered categorical. + If not given, do not change the ordered information. + inplace : boolean (default: False) + Whether or not to reorder the categories inplace or return a copy of + this categorical with reordered categories. + + Returns + ------- + cat : Categorical with reordered categories or None if inplace. + + See also + -------- + rename_categories + add_categories + remove_categories + remove_unused_categories + set_categories + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + if set(self.dtype.categories) != set(new_categories): + raise ValueError("items in new_categories are not the same as in " + "old categories") + return self.set_categories(new_categories, ordered=ordered, + inplace=inplace) + + def add_categories(self, new_categories, inplace=False): + """ Add new categories. + + `new_categories` will be included at the last/highest place in the + categories and will be unused directly after this call. + + Raises + ------ + ValueError + If the new categories include old categories or do not validate as + categories + + Parameters + ---------- + new_categories : category or list-like of category + The new categories to be included. + inplace : boolean (default: False) + Whether or not to add the categories inplace or return a copy of + this categorical with added categories. + + Returns + ------- + cat : Categorical with new categories added or None if inplace. + + See also + -------- + rename_categories + reorder_categories + remove_categories + remove_unused_categories + set_categories + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + if not is_list_like(new_categories): + new_categories = [new_categories] + already_included = set(new_categories) & set(self.dtype.categories) + if len(already_included) != 0: + msg = ("new categories must not include old categories: " + "{already_included!s}") + raise ValueError(msg.format(already_included=already_included)) + new_categories = list(self.dtype.categories) + list(new_categories) + new_dtype = CategoricalDtype(new_categories, self.ordered) + + cat = self if inplace else self.copy() + cat._dtype = new_dtype + cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories) + if not inplace: + return cat + + def remove_categories(self, removals, inplace=False): + """ Removes the specified categories. + + `removals` must be included in the old categories. Values which were in + the removed categories will be set to NaN + + Raises + ------ + ValueError + If the removals are not contained in the categories + + Parameters + ---------- + removals : category or list of categories + The categories which should be removed. + inplace : boolean (default: False) + Whether or not to remove the categories inplace or return a copy of + this categorical with removed categories. + + Returns + ------- + cat : Categorical with removed categories or None if inplace. + + See also + -------- + rename_categories + reorder_categories + add_categories + remove_unused_categories + set_categories + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + if not is_list_like(removals): + removals = [removals] + + removal_set = set(list(removals)) + not_included = removal_set - set(self.dtype.categories) + new_categories = [c for c in self.dtype.categories + if c not in removal_set] + + # GH 10156 + if any(isna(removals)): + not_included = [x for x in not_included if notna(x)] + new_categories = [x for x in new_categories if notna(x)] + + if len(not_included) != 0: + msg = "removals must all be in old categories: {not_included!s}" + raise ValueError(msg.format(not_included=not_included)) + + return self.set_categories(new_categories, ordered=self.ordered, + rename=False, inplace=inplace) + + def remove_unused_categories(self, inplace=False): + """ Removes categories which are not used. + + Parameters + ---------- + inplace : boolean (default: False) + Whether or not to drop unused categories inplace or return a copy of + this categorical with unused categories dropped. + + Returns + ------- + cat : Categorical with unused categories dropped or None if inplace. + + See also + -------- + rename_categories + reorder_categories + add_categories + remove_categories + set_categories + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + cat = self if inplace else self.copy() + idx, inv = np.unique(cat._codes, return_inverse=True) + + if idx.size != 0 and idx[0] == -1: # na sentinel + idx, inv = idx[1:], inv - 1 + + new_categories = cat.dtype.categories.take(idx) + new_dtype = CategoricalDtype._from_fastpath(new_categories, + ordered=self.ordered) + cat._dtype = new_dtype + cat._codes = coerce_indexer_dtype(inv, new_dtype.categories) + + if not inplace: + return cat + + def map(self, mapper): + """Apply mapper function to its categories (not codes). + + Parameters + ---------- + mapper : callable + Function to be applied. When all categories are mapped + to different categories, the result will be Categorical which has + the same order property as the original. Otherwise, the result will + be np.ndarray. + + Returns + ------- + applied : Categorical or Index. + + """ + new_categories = self.categories.map(mapper) + try: + return self.from_codes(self._codes.copy(), + categories=new_categories, + ordered=self.ordered) + except ValueError: + return np.take(new_categories, self._codes) + + __eq__ = _cat_compare_op('__eq__') + __ne__ = _cat_compare_op('__ne__') + __lt__ = _cat_compare_op('__lt__') + __gt__ = _cat_compare_op('__gt__') + __le__ = _cat_compare_op('__le__') + __ge__ = _cat_compare_op('__ge__') + + # for Series/ndarray like compat + @property + def shape(self): + """ Shape of the Categorical. + + For internal compatibility with numpy arrays. + + Returns + ------- + shape : tuple + """ + + return tuple([len(self._codes)]) + + def shift(self, periods): + """ + Shift Categorical by desired number of periods. + + Parameters + ---------- + periods : int + Number of periods to move, can be positive or negative + + Returns + ------- + shifted : Categorical + """ + # since categoricals always have ndim == 1, an axis parameter + # doesn't make any sense here. + codes = self.codes + if codes.ndim > 1: + raise NotImplementedError("Categorical with ndim > 1.") + if np.prod(codes.shape) and (periods != 0): + codes = np.roll(codes, _ensure_platform_int(periods), axis=0) + if periods > 0: + codes[:periods] = -1 + else: + codes[periods:] = -1 + + return self.from_codes(codes, categories=self.categories, + ordered=self.ordered) + + def __array__(self, dtype=None): + """ + The numpy array interface. + + Returns + ------- + values : numpy array + A numpy array of either the specified dtype or, + if dtype==None (default), the same dtype as + categorical.categories.dtype + """ + ret = take_1d(self.categories.values, self._codes) + if dtype and not is_dtype_equal(dtype, self.categories.dtype): + return np.asarray(ret, dtype) + return ret + + def __setstate__(self, state): + """Necessary for making this object picklable""" + if not isinstance(state, dict): + raise Exception('invalid pickle state') + + # Provide compatibility with pre-0.15.0 Categoricals. + if '_categories' not in state and '_levels' in state: + state['_categories'] = self.dtype._validate_categories(state.pop( + '_levels')) + if '_codes' not in state and 'labels' in state: + state['_codes'] = coerce_indexer_dtype( + state.pop('labels'), state['_categories']) + + # 0.16.0 ordered change + if '_ordered' not in state: + + # >=15.0 < 0.16.0 + if 'ordered' in state: + state['_ordered'] = state.pop('ordered') + else: + state['_ordered'] = False + + # 0.21.0 CategoricalDtype change + if '_dtype' not in state: + state['_dtype'] = CategoricalDtype(state['_categories'], + state['_ordered']) + + for k, v in compat.iteritems(state): + setattr(self, k, v) + + @property + def T(self): + return self + + @property + def nbytes(self): + return self._codes.nbytes + self.dtype.categories.values.nbytes + + def memory_usage(self, deep=False): + """ + Memory usage of my values + + Parameters + ---------- + deep : bool + Introspect the data deeply, interrogate + `object` dtypes for system-level memory consumption + + Returns + ------- + bytes used + + Notes + ----- + Memory usage does not include memory consumed by elements that + are not components of the array if deep=False + + See Also + -------- + numpy.ndarray.nbytes + """ + return self._codes.nbytes + self.dtype.categories.memory_usage( + deep=deep) + + @Substitution(klass='Categorical') + @Appender(_shared_docs['searchsorted']) + @deprecate_kwarg(old_arg_name='v', new_arg_name='value') + def searchsorted(self, value, side='left', sorter=None): + if not self.ordered: + raise ValueError("Categorical not ordered\nyou can use " + ".as_ordered() to change the Categorical to an " + "ordered one") + + from pandas.core.series import Series + + values_as_codes = _get_codes_for_values(Series(value).values, + self.categories) + + if -1 in values_as_codes: + raise ValueError("Value(s) to be inserted must be in categories.") + + return self.codes.searchsorted(values_as_codes, side=side, + sorter=sorter) + + def isna(self): + """ + Detect missing values + + Both missing values (-1 in .codes) and NA as a category are detected. + + Returns + ------- + a boolean array of whether my values are null + + See also + -------- + isna : top-level isna + isnull : alias of isna + Categorical.notna : boolean inverse of Categorical.isna + + """ + + ret = self._codes == -1 + + # String/object and float categories can hold np.nan + if self.categories.dtype.kind in ['S', 'O', 'f']: + if np.nan in self.categories: + nan_pos = np.where(isna(self.categories))[0] + # we only have one NA in categories + ret = np.logical_or(ret, self._codes == nan_pos) + return ret + isnull = isna + + def notna(self): + """ + Inverse of isna + + Both missing values (-1 in .codes) and NA as a category are detected as + null. + + Returns + ------- + a boolean array of whether my values are not null + + See also + -------- + notna : top-level notna + notnull : alias of notna + Categorical.isna : boolean inverse of Categorical.notna + + """ + return ~self.isna() + notnull = notna + + def put(self, *args, **kwargs): + """ + Replace specific elements in the Categorical with given values. + """ + raise NotImplementedError(("'put' is not yet implemented " + "for Categorical")) + + def dropna(self): + """ + Return the Categorical without null values. + + Both missing values (-1 in .codes) and NA as a category are detected. + NA is removed from the categories if present. + + Returns + ------- + valid : Categorical + """ + result = self[self.notna()] + if isna(result.categories).any(): + result = result.remove_categories([np.nan]) + return result + + def value_counts(self, dropna=True): + """ + Returns a Series containing counts of each category. + + Every category will have an entry, even those with a count of 0. + + Parameters + ---------- + dropna : boolean, default True + Don't include counts of NaN, even if NaN is a category. + + Returns + ------- + counts : Series + + See Also + -------- + Series.value_counts + + """ + from numpy import bincount + from pandas import isna, Series, CategoricalIndex + + obj = (self.remove_categories([np.nan]) if dropna and + isna(self.categories).any() else self) + code, cat = obj._codes, obj.categories + ncat, mask = len(cat), 0 <= code + ix, clean = np.arange(ncat), mask.all() + + if dropna or clean: + obs = code if clean else code[mask] + count = bincount(obs, minlength=ncat or None) + else: + count = bincount(np.where(mask, code, ncat)) + ix = np.append(ix, -1) + + ix = self._constructor(ix, dtype=self.dtype, + fastpath=True) + + return Series(count, index=CategoricalIndex(ix), dtype='int64') + + def get_values(self): + """ Return the values. + + For internal compatibility with pandas formatting. + + Returns + ------- + values : numpy array + A numpy array of the same dtype as categorical.categories.dtype or + Index if datetime / periods + """ + # if we are a datetime and period index, return Index to keep metadata + if is_datetimelike(self.categories): + return self.categories.take(self._codes, fill_value=np.nan) + return np.array(self) + + def check_for_ordered(self, op): + """ assert that we are ordered """ + if not self.ordered: + raise TypeError("Categorical is not ordered for operation {op}\n" + "you can use .as_ordered() to change the " + "Categorical to an ordered one\n".format(op=op)) + + def argsort(self, ascending=True, kind='quicksort', *args, **kwargs): + """ + Returns the indices that would sort the Categorical instance if + 'sort_values' was called. This function is implemented to provide + compatibility with numpy ndarray objects. + + While an ordering is applied to the category values, arg-sorting + in this context refers more to organizing and grouping together + based on matching category values. Thus, this function can be + called on an unordered Categorical instance unlike the functions + 'Categorical.min' and 'Categorical.max'. + + Returns + ------- + argsorted : numpy array + + See also + -------- + numpy.ndarray.argsort + """ + ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs) + result = np.argsort(self._codes.copy(), kind=kind, **kwargs) + if not ascending: + result = result[::-1] + return result + + def sort_values(self, inplace=False, ascending=True, na_position='last'): + """ Sorts the Categorical by category value returning a new + Categorical by default. + + While an ordering is applied to the category values, sorting in this + context refers more to organizing and grouping together based on + matching category values. Thus, this function can be called on an + unordered Categorical instance unlike the functions 'Categorical.min' + and 'Categorical.max'. + + Parameters + ---------- + inplace : boolean, default False + Do operation in place. + ascending : boolean, default True + Order ascending. Passing False orders descending. The + ordering parameter provides the method by which the + category values are organized. + na_position : {'first', 'last'} (optional, default='last') + 'first' puts NaNs at the beginning + 'last' puts NaNs at the end + + Returns + ------- + y : Categorical or None + + See Also + -------- + Categorical.sort + Series.sort_values + + Examples + -------- + >>> c = pd.Categorical([1, 2, 2, 1, 5]) + >>> c + [1, 2, 2, 1, 5] + Categories (3, int64): [1, 2, 5] + >>> c.sort_values() + [1, 1, 2, 2, 5] + Categories (3, int64): [1, 2, 5] + >>> c.sort_values(ascending=False) + [5, 2, 2, 1, 1] + Categories (3, int64): [1, 2, 5] + + Inplace sorting can be done as well: + + >>> c.sort_values(inplace=True) + >>> c + [1, 1, 2, 2, 5] + Categories (3, int64): [1, 2, 5] + >>> + >>> c = pd.Categorical([1, 2, 2, 1, 5]) + + 'sort_values' behaviour with NaNs. Note that 'na_position' + is independent of the 'ascending' parameter: + + >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5]) + >>> c + [NaN, 2.0, 2.0, NaN, 5.0] + Categories (2, int64): [2, 5] + >>> c.sort_values() + [2.0, 2.0, 5.0, NaN, NaN] + Categories (2, int64): [2, 5] + >>> c.sort_values(ascending=False) + [5.0, 2.0, 2.0, NaN, NaN] + Categories (2, int64): [2, 5] + >>> c.sort_values(na_position='first') + [NaN, NaN, 2.0, 2.0, 5.0] + Categories (2, int64): [2, 5] + >>> c.sort_values(ascending=False, na_position='first') + [NaN, NaN, 5.0, 2.0, 2.0] + Categories (2, int64): [2, 5] + """ + inplace = validate_bool_kwarg(inplace, 'inplace') + if na_position not in ['last', 'first']: + msg = 'invalid na_position: {na_position!r}' + raise ValueError(msg.format(na_position=na_position)) + + codes = np.sort(self._codes) + if not ascending: + codes = codes[::-1] + + # NaN handling + na_mask = (codes == -1) + if na_mask.any(): + n_nans = len(codes[na_mask]) + if na_position == "first": + # in this case sort to the front + new_codes = codes.copy() + new_codes[0:n_nans] = -1 + new_codes[n_nans:] = codes[~na_mask] + codes = new_codes + elif na_position == "last": + # ... and to the end + new_codes = codes.copy() + pos = len(codes) - n_nans + new_codes[0:pos] = codes[~na_mask] + new_codes[pos:] = -1 + codes = new_codes + if inplace: + self._codes = codes + return + else: + return self._constructor(values=codes, categories=self.categories, + ordered=self.ordered, fastpath=True) + + def _values_for_rank(self): + """ + For correctly ranking ordered categorical data. See GH#15420 + + Ordered categorical data should be ranked on the basis of + codes with -1 translated to NaN. + + Returns + ------- + numpy array + + """ + from pandas import Series + if self.ordered: + values = self.codes + mask = values == -1 + if mask.any(): + values = values.astype('float64') + values[mask] = np.nan + elif self.categories.is_numeric(): + values = np.array(self) + else: + # reorder the categories (so rank can use the float codes) + # instead of passing an object array to rank + values = np.array( + self.rename_categories(Series(self.categories).rank().values) + ) + return values + + def ravel(self, order='C'): + """ Return a flattened (numpy) array. + + For internal compatibility with numpy arrays. + + Returns + ------- + raveled : numpy array + """ + return np.array(self) + + def view(self): + """Return a view of myself. + + For internal compatibility with numpy arrays. + + Returns + ------- + view : Categorical + Returns `self`! + """ + return self + + def to_dense(self): + """Return my 'dense' representation + + For internal compatibility with numpy arrays. + + Returns + ------- + dense : array + """ + return np.asarray(self) + + @deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value') + def fillna(self, value=None, method=None, limit=None): + """ Fill NA/NaN values using the specified method. + + Parameters + ---------- + method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None + Method to use for filling holes in reindexed Series + pad / ffill: propagate last valid observation forward to next valid + backfill / bfill: use NEXT valid observation to fill gap + value : scalar, dict, Series + If a scalar value is passed it is used to fill all missing values. + Alternatively, a Series or dict can be used to fill in different + values for each index. The value should not be a list. The + value(s) passed should either be in the categories or should be + NaN. + limit : int, default None + (Not implemented yet for Categorical!) + If method is specified, this is the maximum number of consecutive + NaN values to forward/backward fill. In other words, if there is + a gap with more than this number of consecutive NaNs, it will only + be partially filled. If method is not specified, this is the + maximum number of entries along the entire axis where NaNs will be + filled. + + Returns + ------- + filled : Categorical with NA/NaN filled + """ + + if value is None: + value = np.nan + if limit is not None: + raise NotImplementedError("specifying a limit for fillna has not " + "been implemented yet") + + values = self._codes + + # Make sure that we also get NA in categories + if self.categories.dtype.kind in ['S', 'O', 'f']: + if np.nan in self.categories: + values = values.copy() + nan_pos = np.where(isna(self.categories))[0] + # we only have one NA in categories + values[values == nan_pos] = -1 + + # pad / bfill + if method is not None: + + values = self.to_dense().reshape(-1, len(self)) + values = interpolate_2d(values, method, 0, None, + value).astype(self.categories.dtype)[0] + values = _get_codes_for_values(values, self.categories) + + else: + + # If value is a dict or a Series (a dict value has already + # been converted to a Series) + if isinstance(value, ABCSeries): + if not value[~value.isin(self.categories)].isna().all(): + raise ValueError("fill value must be in categories") + + values_codes = _get_codes_for_values(value, self.categories) + indexer = np.where(values_codes != -1) + values[indexer] = values_codes[values_codes != -1] + + # If value is not a dict or Series it should be a scalar + elif is_scalar(value): + if not isna(value) and value not in self.categories: + raise ValueError("fill value must be in categories") + + mask = values == -1 + if mask.any(): + values = values.copy() + if isna(value): + values[mask] = -1 + else: + values[mask] = self.categories.get_loc(value) + + else: + raise TypeError('"value" parameter must be a scalar, dict ' + 'or Series, but you passed a ' + '"{0}"'.format(type(value).__name__)) + + return self._constructor(values, categories=self.categories, + ordered=self.ordered, fastpath=True) + + def take_nd(self, indexer, allow_fill=True, fill_value=None): + """ Take the codes by the indexer, fill with the fill_value. + + For internal compatibility with numpy arrays. + """ + + # filling must always be None/nan here + # but is passed thru internally + assert isna(fill_value) + + codes = take_1d(self._codes, indexer, allow_fill=True, fill_value=-1) + result = self._constructor(codes, categories=self.categories, + ordered=self.ordered, fastpath=True) + return result + + take = take_nd + + def _slice(self, slicer): + """ Return a slice of myself. + + For internal compatibility with numpy arrays. + """ + + # only allow 1 dimensional slicing, but can + # in a 2-d case be passd (slice(None),....) + if isinstance(slicer, tuple) and len(slicer) == 2: + if not is_null_slice(slicer[0]): + raise AssertionError("invalid slicing for a 1-ndim " + "categorical") + slicer = slicer[1] + + _codes = self._codes[slicer] + return self._constructor(values=_codes, categories=self.categories, + ordered=self.ordered, fastpath=True) + + def __len__(self): + """The length of this Categorical.""" + return len(self._codes) + + def __iter__(self): + """Returns an Iterator over the values of this Categorical.""" + return iter(self.get_values()) + + def _tidy_repr(self, max_vals=10, footer=True): + """ a short repr displaying only max_vals and an optional (but default + footer) + """ + num = max_vals // 2 + head = self[:num]._get_repr(length=False, footer=False) + tail = self[-(max_vals - num):]._get_repr(length=False, footer=False) + + result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:]) + if footer: + result = u('{result}\n{footer}').format(result=result, + footer=self._repr_footer()) + + return compat.text_type(result) + + def _repr_categories(self): + """ return the base repr for the categories """ + max_categories = (10 if get_option("display.max_categories") == 0 else + get_option("display.max_categories")) + from pandas.io.formats import format as fmt + if len(self.categories) > max_categories: + num = max_categories // 2 + head = fmt.format_array(self.categories[:num], None) + tail = fmt.format_array(self.categories[-num:], None) + category_strs = head + ["..."] + tail + else: + category_strs = fmt.format_array(self.categories, None) + + # Strip all leading spaces, which format_array adds for columns... + category_strs = [x.strip() for x in category_strs] + return category_strs + + def _repr_categories_info(self): + """ Returns a string representation of the footer.""" + + category_strs = self._repr_categories() + dtype = getattr(self.categories, 'dtype_str', + str(self.categories.dtype)) + + levheader = "Categories ({length}, {dtype}): ".format( + length=len(self.categories), dtype=dtype) + width, height = get_terminal_size() + max_width = get_option("display.width") or width + if com.in_ipython_frontend(): + # 0 = no breaks + max_width = 0 + levstring = "" + start = True + cur_col_len = len(levheader) # header + sep_len, sep = (3, " < ") if self.ordered else (2, ", ") + linesep = sep.rstrip() + "\n" # remove whitespace + for val in category_strs: + if max_width != 0 and cur_col_len + sep_len + len(val) > max_width: + levstring += linesep + (" " * (len(levheader) + 1)) + cur_col_len = len(levheader) + 1 # header + a whitespace + elif not start: + levstring += sep + cur_col_len += len(val) + levstring += val + start = False + # replace to simple save space by + return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]" + + def _repr_footer(self): + + return u('Length: {length}\n{info}').format( + length=len(self), info=self._repr_categories_info()) + + def _get_repr(self, length=True, na_rep='NaN', footer=True): + from pandas.io.formats import format as fmt + formatter = fmt.CategoricalFormatter(self, length=length, + na_rep=na_rep, footer=footer) + result = formatter.to_string() + return compat.text_type(result) + + def __unicode__(self): + """ Unicode representation. """ + _maxlen = 10 + if len(self._codes) > _maxlen: + result = self._tidy_repr(_maxlen) + elif len(self._codes) > 0: + result = self._get_repr(length=len(self) > _maxlen) + else: + msg = self._get_repr(length=False, footer=True).replace("\n", ", ") + result = ('[], {repr_msg}'.format(repr_msg=msg)) + + return result + + def _maybe_coerce_indexer(self, indexer): + """ return an indexer coerced to the codes dtype """ + if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i': + indexer = indexer.astype(self._codes.dtype) + return indexer + + def __getitem__(self, key): + """ Return an item. """ + if isinstance(key, (int, np.integer)): + i = self._codes[key] + if i == -1: + return np.nan + else: + return self.categories[i] + else: + return self._constructor(values=self._codes[key], + categories=self.categories, + ordered=self.ordered, fastpath=True) + + def __setitem__(self, key, value): + """ Item assignment. + + + Raises + ------ + ValueError + If (one or more) Value is not in categories or if a assigned + `Categorical` does not have the same categories + """ + + # require identical categories set + if isinstance(value, Categorical): + if not value.categories.equals(self.categories): + raise ValueError("Cannot set a Categorical with another, " + "without identical categories") + + rvalue = value if is_list_like(value) else [value] + + from pandas import Index + to_add = Index(rvalue).difference(self.categories) + + # no assignments of values not in categories, but it's always ok to set + # something to np.nan + if len(to_add) and not isna(to_add).all(): + raise ValueError("Cannot setitem on a Categorical with a new " + "category, set the categories first") + + # set by position + if isinstance(key, (int, np.integer)): + pass + + # tuple of indexers (dataframe) + elif isinstance(key, tuple): + # only allow 1 dimensional slicing, but can + # in a 2-d case be passd (slice(None),....) + if len(key) == 2: + if not is_null_slice(key[0]): + raise AssertionError("invalid slicing for a 1-ndim " + "categorical") + key = key[1] + elif len(key) == 1: + key = key[0] + else: + raise AssertionError("invalid slicing for a 1-ndim " + "categorical") + + # slicing in Series or Categorical + elif isinstance(key, slice): + pass + + # Array of True/False in Series or Categorical + else: + # There is a bug in numpy, which does not accept a Series as a + # indexer + # https://github.com/pandas-dev/pandas/issues/6168 + # https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9 + # FIXME: remove when numpy 1.9 is the lowest numpy version pandas + # accepts... + key = np.asarray(key) + + lindexer = self.categories.get_indexer(rvalue) + + # FIXME: the following can be removed after GH7820 is fixed: + # https://github.com/pandas-dev/pandas/issues/7820 + # float categories do currently return -1 for np.nan, even if np.nan is + # included in the index -> "repair" this here + if isna(rvalue).any() and isna(self.categories).any(): + nan_pos = np.where(isna(self.categories))[0] + lindexer[lindexer == -1] = nan_pos + + lindexer = self._maybe_coerce_indexer(lindexer) + self._codes[key] = lindexer + + def _reverse_indexer(self): + """ + Compute the inverse of a categorical, returning + a dict of categories -> indexers. + + *This is an internal function* + + Returns + ------- + dict of categories -> indexers + + Example + ------- + In [1]: c = pd.Categorical(list('aabca')) + + In [2]: c + Out[2]: + [a, a, b, c, a] + Categories (3, object): [a, b, c] + + In [3]: c.categories + Out[3]: Index([u'a', u'b', u'c'], dtype='object') + + In [4]: c.codes + Out[4]: array([0, 0, 1, 2, 0], dtype=int8) + + In [5]: c._reverse_indexer() + Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])} + + """ + categories = self.categories + r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'), + categories.size) + counts = counts.cumsum() + result = [r[counts[indexer]:counts[indexer + 1]] + for indexer in range(len(counts) - 1)] + result = dict(zip(categories, result)) + return result + + # reduction ops # + def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, + filter_type=None, **kwds): + """ perform the reduction type operation """ + func = getattr(self, name, None) + if func is None: + msg = 'Categorical cannot perform the operation {op}' + raise TypeError(msg.format(op=name)) + return func(numeric_only=numeric_only, **kwds) + + def min(self, numeric_only=None, **kwargs): + """ The minimum value of the object. + + Only ordered `Categoricals` have a minimum! + + Raises + ------ + TypeError + If the `Categorical` is not `ordered`. + + Returns + ------- + min : the minimum of this `Categorical` + """ + self.check_for_ordered('min') + if numeric_only: + good = self._codes != -1 + pointer = self._codes[good].min(**kwargs) + else: + pointer = self._codes.min(**kwargs) + if pointer == -1: + return np.nan + else: + return self.categories[pointer] + + def max(self, numeric_only=None, **kwargs): + """ The maximum value of the object. + + Only ordered `Categoricals` have a maximum! + + Raises + ------ + TypeError + If the `Categorical` is not `ordered`. + + Returns + ------- + max : the maximum of this `Categorical` + """ + self.check_for_ordered('max') + if numeric_only: + good = self._codes != -1 + pointer = self._codes[good].max(**kwargs) + else: + pointer = self._codes.max(**kwargs) + if pointer == -1: + return np.nan + else: + return self.categories[pointer] + + def mode(self): + """ + Returns the mode(s) of the Categorical. + + Always returns `Categorical` even if only one value. + + Returns + ------- + modes : `Categorical` (sorted) + """ + + import pandas._libs.hashtable as htable + good = self._codes != -1 + values = sorted(htable.mode_int64(_ensure_int64(self._codes[good]))) + result = self._constructor(values=values, categories=self.categories, + ordered=self.ordered, fastpath=True) + return result + + def unique(self): + """ + Return the ``Categorical`` which ``categories`` and ``codes`` are + unique. Unused categories are NOT returned. + + - unordered category: values and categories are sorted by appearance + order. + - ordered category: values are sorted by appearance order, categories + keeps existing order. + + Returns + ------- + unique values : ``Categorical`` + + Examples + -------- + An unordered Categorical will return categories in the + order of appearance. + + >>> pd.Categorical(list('baabc')) + [b, a, c] + Categories (3, object): [b, a, c] + + >>> pd.Categorical(list('baabc'), categories=list('abc')) + [b, a, c] + Categories (3, object): [b, a, c] + + An ordered Categorical preserves the category ordering. + + >>> pd.Categorical(list('baabc'), + ... categories=list('abc'), + ... ordered=True) + [b, a, c] + Categories (3, object): [a < b < c] + + See Also + -------- + unique + CategoricalIndex.unique + Series.unique + + """ + + # unlike np.unique, unique1d does not sort + unique_codes = unique1d(self.codes) + cat = self.copy() + + # keep nan in codes + cat._codes = unique_codes + + # exclude nan from indexer for categories + take_codes = unique_codes[unique_codes != -1] + if self.ordered: + take_codes = sorted(take_codes) + return cat.set_categories(cat.categories.take(take_codes)) + + def equals(self, other): + """ + Returns True if categorical arrays are equal. + + Parameters + ---------- + other : `Categorical` + + Returns + ------- + are_equal : boolean + """ + if self.is_dtype_equal(other): + if self.categories.equals(other.categories): + # fastpath to avoid re-coding + other_codes = other._codes + else: + other_codes = _recode_for_categories(other.codes, + other.categories, + self.categories) + return np.array_equal(self._codes, other_codes) + return False + + def is_dtype_equal(self, other): + """ + Returns True if categoricals are the same dtype + same categories, and same ordered + + Parameters + ---------- + other : Categorical + + Returns + ------- + are_equal : boolean + """ + + try: + return hash(self.dtype) == hash(other.dtype) + except (AttributeError, TypeError): + return False + + def describe(self): + """ Describes this Categorical + + Returns + ------- + description: `DataFrame` + A dataframe with frequency and counts by category. + """ + counts = self.value_counts(dropna=False) + freqs = counts / float(counts.sum()) + + from pandas.core.reshape.concat import concat + result = concat([counts, freqs], axis=1) + result.columns = ['counts', 'freqs'] + result.index.name = 'categories' + + return result + + def repeat(self, repeats, *args, **kwargs): + """ + Repeat elements of a Categorical. + + See also + -------- + numpy.ndarray.repeat + + """ + nv.validate_repeat(args, kwargs) + codes = self._codes.repeat(repeats) + return self._constructor(values=codes, categories=self.categories, + ordered=self.ordered, fastpath=True) + +# The Series.cat accessor + + +class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin): + """ + Accessor object for categorical properties of the Series values. + + Be aware that assigning to `categories` is a inplace operation, while all + methods return new categorical data per default (but can be called with + `inplace=True`). + + Parameters + ---------- + data : Series or CategoricalIndex + + Examples + -------- + >>> s.cat.categories + >>> s.cat.categories = list('abc') + >>> s.cat.rename_categories(list('cab')) + >>> s.cat.reorder_categories(list('cab')) + >>> s.cat.add_categories(['d','e']) + >>> s.cat.remove_categories(['d']) + >>> s.cat.remove_unused_categories() + >>> s.cat.set_categories(list('abcde')) + >>> s.cat.as_ordered() + >>> s.cat.as_unordered() + + """ + + def __init__(self, data): + self._validate(data) + self.categorical = data.values + self.index = data.index + self.name = data.name + self._freeze() + + @staticmethod + def _validate(data): + if not is_categorical_dtype(data.dtype): + raise AttributeError("Can only use .cat accessor with a " + "'category' dtype") + + def _delegate_property_get(self, name): + return getattr(self.categorical, name) + + def _delegate_property_set(self, name, new_values): + return setattr(self.categorical, name, new_values) + + @property + def codes(self): + from pandas import Series + return Series(self.categorical.codes, index=self.index) + + def _delegate_method(self, name, *args, **kwargs): + from pandas import Series + method = getattr(self.categorical, name) + res = method(*args, **kwargs) + if res is not None: + return Series(res, index=self.index, name=self.name) + + +CategoricalAccessor._add_delegate_accessors(delegate=Categorical, + accessors=["categories", + "ordered"], + typ='property') +CategoricalAccessor._add_delegate_accessors(delegate=Categorical, accessors=[ + "rename_categories", "reorder_categories", "add_categories", + "remove_categories", "remove_unused_categories", "set_categories", + "as_ordered", "as_unordered"], typ='method') + +# utility routines + + +def _get_codes_for_values(values, categories): + """ + utility routine to turn values into codes given the specified categories + """ + + from pandas.core.algorithms import _get_data_algo, _hashtables + if not is_dtype_equal(values.dtype, categories.dtype): + values = _ensure_object(values) + categories = _ensure_object(categories) + + (hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables) + (_, _), cats = _get_data_algo(categories, _hashtables) + t = hash_klass(len(cats)) + t.map_locations(cats) + return coerce_indexer_dtype(t.lookup(vals), cats) + + +def _recode_for_categories(codes, old_categories, new_categories): + """ + Convert a set of codes for to a new set of categories + + Parameters + ---------- + codes : array + old_categories, new_categories : Index + + Returns + ------- + new_codes : array + + Examples + -------- + >>> old_cat = pd.Index(['b', 'a', 'c']) + >>> new_cat = pd.Index(['a', 'b']) + >>> codes = np.array([0, 1, 1, 2]) + >>> _recode_for_categories(codes, old_cat, new_cat) + array([ 1, 0, 0, -1]) + """ + from pandas.core.algorithms import take_1d + + if len(old_categories) == 0: + # All null anyway, so just retain the nulls + return codes.copy() + indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories), + new_categories) + new_codes = take_1d(indexer, codes.copy(), fill_value=-1) + return new_codes + + +def _convert_to_list_like(list_like): + if hasattr(list_like, "dtype"): + return list_like + if isinstance(list_like, list): + return list_like + if (is_sequence(list_like) or isinstance(list_like, tuple) or + isinstance(list_like, types.GeneratorType)): + return list(list_like) + elif is_scalar(list_like): + return [list_like] + else: + # is this reached? + return [list_like] + + +def _factorize_from_iterable(values): + """ + Factorize an input `values` into `categories` and `codes`. Preserves + categorical dtype in `categories`. + + *This is an internal function* + + Parameters + ---------- + values : list-like + + Returns + ------- + codes : ndarray + categories : Index + If `values` has a categorical dtype, then `categories` is + a CategoricalIndex keeping the categories and order of `values`. + """ + from pandas.core.indexes.category import CategoricalIndex + + if not is_list_like(values): + raise TypeError("Input must be list-like") + + if is_categorical(values): + if isinstance(values, (ABCCategoricalIndex, ABCSeries)): + values = values._values + categories = CategoricalIndex(values.categories, + categories=values.categories, + ordered=values.ordered) + codes = values.codes + else: + cat = Categorical(values, ordered=True) + categories = cat.categories + codes = cat.codes + return codes, categories + + +def _factorize_from_iterables(iterables): + """ + A higher-level wrapper over `_factorize_from_iterable`. + + *This is an internal function* + + Parameters + ---------- + iterables : list-like of list-likes + + Returns + ------- + codes_list : list of ndarrays + categories_list : list of Indexes + + Notes + ----- + See `_factorize_from_iterable` for more info. + """ + if len(iterables) == 0: + # For consistency, it should return a list of 2 lists. + return [[], []] + return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables])) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 7b11e37a14b51..17435dfc48bde 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -1,2331 +1,7 @@ -# pylint: disable=E1101,W0232 +import warnings -import numpy as np -from warnings import warn -import types +# TODO: Remove after 0.23.x +warnings.warn("'pandas.core' is private. Use 'pandas.Categorical'", + FutureWarning, stacklevel=2) -from pandas import compat -from pandas.compat import u, lzip -from pandas._libs import lib, algos as libalgos - -from pandas.core.dtypes.generic import ( - ABCSeries, ABCIndexClass, ABCCategoricalIndex) -from pandas.core.dtypes.missing import isna, notna -from pandas.core.dtypes.cast import ( - maybe_infer_to_datetimelike, - coerce_indexer_dtype) -from pandas.core.dtypes.dtypes import CategoricalDtype -from pandas.core.dtypes.common import ( - _ensure_int64, - _ensure_object, - _ensure_platform_int, - is_dtype_equal, - is_datetimelike, - is_datetime64_dtype, - is_timedelta64_dtype, - is_categorical, - is_categorical_dtype, - is_list_like, is_sequence, - is_scalar, - is_dict_like) -from pandas.core.common import is_null_slice, _maybe_box_datetimelike - -from pandas.core.algorithms import factorize, take_1d, unique1d -from pandas.core.accessor import PandasDelegate -from pandas.core.base import (PandasObject, - NoNewAttributesMixin, _shared_docs) -import pandas.core.common as com -from pandas.core.missing import interpolate_2d -from pandas.compat.numpy import function as nv -from pandas.util._decorators import ( - Appender, cache_readonly, deprecate_kwarg, Substitution) - -from pandas.io.formats.terminal import get_terminal_size -from pandas.util._validators import validate_bool_kwarg -from pandas.core.config import get_option - - -def _cat_compare_op(op): - def f(self, other): - # On python2, you can usually compare any type to any type, and - # Categoricals can be seen as a custom type, but having different - # results depending whether categories are the same or not is kind of - # insane, so be a bit stricter here and use the python3 idea of - # comparing only things of equal type. - if not self.ordered: - if op in ['__lt__', '__gt__', '__le__', '__ge__']: - raise TypeError("Unordered Categoricals can only compare " - "equality or not") - if isinstance(other, Categorical): - # Two Categoricals can only be be compared if the categories are - # the same (maybe up to ordering, depending on ordered) - - msg = ("Categoricals can only be compared if " - "'categories' are the same.") - if len(self.categories) != len(other.categories): - raise TypeError(msg + " Categories are different lengths") - elif (self.ordered and not (self.categories == - other.categories).all()): - raise TypeError(msg) - elif not set(self.categories) == set(other.categories): - raise TypeError(msg) - - if not (self.ordered == other.ordered): - raise TypeError("Categoricals can only be compared if " - "'ordered' is the same") - if not self.ordered and not self.categories.equals( - other.categories): - # both unordered and different order - other_codes = _get_codes_for_values(other, self.categories) - else: - other_codes = other._codes - - na_mask = (self._codes == -1) | (other_codes == -1) - f = getattr(self._codes, op) - ret = f(other_codes) - if na_mask.any(): - # In other series, the leads to False, so do that here too - ret[na_mask] = False - return ret - - # Numpy-1.9 and earlier may convert a scalar to a zerodim array during - # comparison operation when second arg has higher priority, e.g. - # - # cat[0] < cat - # - # With cat[0], for example, being ``np.int64(1)`` by the time it gets - # into this function would become ``np.array(1)``. - other = lib.item_from_zerodim(other) - if is_scalar(other): - if other in self.categories: - i = self.categories.get_loc(other) - return getattr(self._codes, op)(i) - else: - if op == '__eq__': - return np.repeat(False, len(self)) - elif op == '__ne__': - return np.repeat(True, len(self)) - else: - msg = ("Cannot compare a Categorical for op {op} with a " - "scalar, which is not a category.") - raise TypeError(msg.format(op=op)) - else: - - # allow categorical vs object dtype array comparisons for equality - # these are only positional comparisons - if op in ['__eq__', '__ne__']: - return getattr(np.array(self), op)(np.array(other)) - - msg = ("Cannot compare a Categorical for op {op} with type {typ}." - "\nIf you want to compare values, use 'np.asarray(cat) " - "<op> other'.") - raise TypeError(msg.format(op=op, typ=type(other))) - - f.__name__ = op - - return f - - -def _maybe_to_categorical(array): - """ - Coerce to a categorical if a series is given. - - Internal use ONLY. - """ - if isinstance(array, (ABCSeries, ABCCategoricalIndex)): - return array._values - elif isinstance(array, np.ndarray): - return Categorical(array) - return array - - -_codes_doc = """The category codes of this categorical. - -Level codes are an array if integer which are the positions of the real -values in the categories array. - -There is not setter, use the other categorical methods and the normal item -setter to change values in the categorical. -""" - - -class Categorical(PandasObject): - """ - Represents a categorical variable in classic R / S-plus fashion - - `Categoricals` can only take on only a limited, and usually fixed, number - of possible values (`categories`). In contrast to statistical categorical - variables, a `Categorical` might have an order, but numerical operations - (additions, divisions, ...) are not possible. - - All values of the `Categorical` are either in `categories` or `np.nan`. - Assigning values outside of `categories` will raise a `ValueError`. Order - is defined by the order of the `categories`, not lexical order of the - values. - - Parameters - ---------- - values : list-like - The values of the categorical. If categories are given, values not in - categories will be replaced with NaN. - categories : Index-like (unique), optional - The unique categories for this categorical. If not given, the - categories are assumed to be the unique values of values. - ordered : boolean, (default False) - Whether or not this categorical is treated as a ordered categorical. - If not given, the resulting categorical will not be ordered. - dtype : CategoricalDtype - An instance of ``CategoricalDtype`` to use for this categorical - - .. versionadded:: 0.21.0 - - Attributes - ---------- - categories : Index - The categories of this categorical - codes : ndarray - The codes (integer positions, which point to the categories) of this - categorical, read only. - ordered : boolean - Whether or not this Categorical is ordered. - dtype : CategoricalDtype - The instance of ``CategoricalDtype`` storing the ``categories`` - and ``ordered``. - - .. versionadded:: 0.21.0 - - Methods - ------- - from_codes - __array__ - - Raises - ------ - ValueError - If the categories do not validate. - TypeError - If an explicit ``ordered=True`` is given but no `categories` and the - `values` are not sortable. - - Examples - -------- - >>> pd.Categorical([1, 2, 3, 1, 2, 3]) - [1, 2, 3, 1, 2, 3] - Categories (3, int64): [1, 2, 3] - - >>> pd.Categorical(['a', 'b', 'c', 'a', 'b', 'c']) - [a, b, c, a, b, c] - Categories (3, object): [a, b, c] - - Ordered `Categoricals` can be sorted according to the custom order - of the categories and can have a min and max value. - - >>> c = pd.Categorical(['a','b','c','a','b','c'], ordered=True, - ... categories=['c', 'b', 'a']) - >>> c - [a, b, c, a, b, c] - Categories (3, object): [c < b < a] - >>> c.min() - 'c' - - Notes - ----- - See the `user guide - <http://pandas.pydata.org/pandas-docs/stable/categorical.html>`_ for more. - - See also - -------- - pandas.api.types.CategoricalDtype : Type for categorical data - CategoricalIndex : An Index with an underlying ``Categorical`` - """ - - # For comparisons, so that numpy uses our implementation if the compare - # ops, which raise - __array_priority__ = 1000 - _dtype = CategoricalDtype() - _deprecations = frozenset(['labels']) - _typ = 'categorical' - - def __init__(self, values, categories=None, ordered=None, dtype=None, - fastpath=False): - - # Ways of specifying the dtype (prioritized ordered) - # 1. dtype is a CategoricalDtype - # a.) with known categories, use dtype.categories - # b.) else with Categorical values, use values.dtype - # c.) else, infer from values - # d.) specifying dtype=CategoricalDtype and categories is an error - # 2. dtype is a string 'category' - # a.) use categories, ordered - # b.) use values.dtype - # c.) infer from values - # 3. dtype is None - # a.) use categories, ordered - # b.) use values.dtype - # c.) infer from values - - if dtype is not None: - # The dtype argument takes precedence over values.dtype (if any) - if isinstance(dtype, compat.string_types): - if dtype == 'category': - dtype = CategoricalDtype(categories, ordered) - else: - msg = "Unknown `dtype` {dtype}" - raise ValueError(msg.format(dtype=dtype)) - elif categories is not None or ordered is not None: - raise ValueError("Cannot specify both `dtype` and `categories`" - " or `ordered`.") - - categories = dtype.categories - ordered = dtype.ordered - - elif is_categorical(values): - # If no "dtype" was passed, use the one from "values", but honor - # the "ordered" and "categories" arguments - dtype = values.dtype._from_categorical_dtype(values.dtype, - categories, ordered) - else: - # If dtype=None and values is not categorical, create a new dtype - dtype = CategoricalDtype(categories, ordered) - - # At this point, dtype is always a CategoricalDtype - # if dtype.categories is None, we are inferring - - if fastpath: - self._codes = coerce_indexer_dtype(values, categories) - self._dtype = dtype - return - - # null_mask indicates missing values we want to exclude from inference. - # This means: only missing values in list-likes (not arrays/ndframes). - null_mask = np.array(False) - - # sanitize input - if is_categorical_dtype(values): - if dtype.categories is None: - dtype = CategoricalDtype(values.categories, dtype.ordered) - - elif not isinstance(values, (ABCIndexClass, ABCSeries)): - # _sanitize_array coerces np.nan to a string under certain versions - # of numpy - values = maybe_infer_to_datetimelike(values, convert_dates=True) - if not isinstance(values, np.ndarray): - values = _convert_to_list_like(values) - from pandas.core.series import _sanitize_array - # By convention, empty lists result in object dtype: - if len(values) == 0: - sanitize_dtype = 'object' - else: - sanitize_dtype = None - null_mask = isna(values) - if null_mask.any(): - values = [values[idx] for idx in np.where(~null_mask)[0]] - values = _sanitize_array(values, None, dtype=sanitize_dtype) - - if dtype.categories is None: - try: - codes, categories = factorize(values, sort=True) - except TypeError: - codes, categories = factorize(values, sort=False) - if dtype.ordered: - # raise, as we don't have a sortable data structure and so - # the user should give us one by specifying categories - raise TypeError("'values' is not ordered, please " - "explicitly specify the categories order " - "by passing in a categories argument.") - except ValueError: - - # FIXME - raise NotImplementedError("> 1 ndim Categorical are not " - "supported at this time") - - # we're inferring from values - dtype = CategoricalDtype(categories, dtype.ordered) - - elif is_categorical_dtype(values): - old_codes = (values.cat.codes if isinstance(values, ABCSeries) - else values.codes) - codes = _recode_for_categories(old_codes, values.dtype.categories, - dtype.categories) - - else: - codes = _get_codes_for_values(values, dtype.categories) - - if null_mask.any(): - # Reinsert -1 placeholders for previously removed missing values - full_codes = - np.ones(null_mask.shape, dtype=codes.dtype) - full_codes[~null_mask] = codes - codes = full_codes - - self._dtype = dtype - self._codes = coerce_indexer_dtype(codes, dtype.categories) - - @property - def categories(self): - """The categories of this categorical. - - Setting assigns new values to each category (effectively a rename of - each individual category). - - The assigned value has to be a list-like object. All items must be - unique and the number of items in the new categories must be the same - as the number of items in the old categories. - - Assigning to `categories` is a inplace operation! - - Raises - ------ - ValueError - If the new categories do not validate as categories or if the - number of new categories is unequal the number of old categories - - See also - -------- - rename_categories - reorder_categories - add_categories - remove_categories - remove_unused_categories - set_categories - """ - return self.dtype.categories - - @categories.setter - def categories(self, categories): - new_dtype = CategoricalDtype(categories, ordered=self.ordered) - if (self.dtype.categories is not None and - len(self.dtype.categories) != len(new_dtype.categories)): - raise ValueError("new categories need to have the same number of " - "items as the old categories!") - self._dtype = new_dtype - - @property - def ordered(self): - """Whether the categories have an ordered relationship""" - return self.dtype.ordered - - @property - def dtype(self): - """The :class:`~pandas.api.types.CategoricalDtype` for this instance""" - return self._dtype - - @property - def _constructor(self): - return Categorical - - def copy(self): - """ Copy constructor. """ - return self._constructor(values=self._codes.copy(), - categories=self.categories, - ordered=self.ordered, - fastpath=True) - - def astype(self, dtype, copy=True): - """ - Coerce this type to another dtype - - Parameters - ---------- - dtype : numpy dtype or pandas type - copy : bool, default True - By default, astype always returns a newly allocated object. - If copy is set to False and dtype is categorical, the original - object is returned. - - .. versionadded:: 0.19.0 - - """ - if is_categorical_dtype(dtype): - # GH 10696/18593 - dtype = self.dtype._update_dtype(dtype) - self = self.copy() if copy else self - if dtype == self.dtype: - return self - return self._set_dtype(dtype) - return np.array(self, dtype=dtype, copy=copy) - - @cache_readonly - def ndim(self): - """Number of dimensions of the Categorical """ - return self._codes.ndim - - @cache_readonly - def size(self): - """ return the len of myself """ - return len(self) - - @cache_readonly - def itemsize(self): - """ return the size of a single category """ - return self.categories.itemsize - - def tolist(self): - """ - Return a list of the values. - - These are each a scalar type, which is a Python scalar - (for str, int, float) or a pandas scalar - (for Timestamp/Timedelta/Interval/Period) - """ - if is_datetimelike(self.categories): - return [_maybe_box_datetimelike(x) for x in self] - return np.array(self).tolist() - - @property - def base(self): - """ compat, we are always our own object """ - return None - - @classmethod - def _from_inferred_categories(cls, inferred_categories, inferred_codes, - dtype): - """Construct a Categorical from inferred values - - For inferred categories (`dtype` is None) the categories are sorted. - For explicit `dtype`, the `inferred_categories` are cast to the - appropriate type. - - Parameters - ---------- - - inferred_categories : Index - inferred_codes : Index - dtype : CategoricalDtype or 'category' - - Returns - ------- - Categorical - """ - from pandas import Index, to_numeric, to_datetime, to_timedelta - - cats = Index(inferred_categories) - - known_categories = (isinstance(dtype, CategoricalDtype) and - dtype.categories is not None) - - if known_categories: - # Convert to a specialzed type with `dtype` if specified - if dtype.categories.is_numeric(): - cats = to_numeric(inferred_categories, errors='coerce') - elif is_datetime64_dtype(dtype.categories): - cats = to_datetime(inferred_categories, errors='coerce') - elif is_timedelta64_dtype(dtype.categories): - cats = to_timedelta(inferred_categories, errors='coerce') - - if known_categories: - # recode from observation oder to dtype.categories order - categories = dtype.categories - codes = _recode_for_categories(inferred_codes, cats, categories) - elif not cats.is_monotonic_increasing: - # sort categories and recode for unknown categories - unsorted = cats.copy() - categories = cats.sort_values() - codes = _recode_for_categories(inferred_codes, unsorted, - categories) - dtype = CategoricalDtype(categories, ordered=False) - else: - dtype = CategoricalDtype(cats, ordered=False) - codes = inferred_codes - - return cls(codes, dtype=dtype, fastpath=True) - - @classmethod - def from_codes(cls, codes, categories, ordered=False): - """ - Make a Categorical type from codes and categories arrays. - - This constructor is useful if you already have codes and categories and - so do not need the (computation intensive) factorization step, which is - usually done on the constructor. - - If your data does not follow this convention, please use the normal - constructor. - - Parameters - ---------- - codes : array-like, integers - An integer array, where each integer points to a category in - categories or -1 for NaN - categories : index-like - The categories for the categorical. Items need to be unique. - ordered : boolean, (default False) - Whether or not this categorical is treated as a ordered - categorical. If not given, the resulting categorical will be - unordered. - """ - try: - codes = np.asarray(codes, np.int64) - except: - raise ValueError( - "codes need to be convertible to an arrays of integers") - - categories = CategoricalDtype._validate_categories(categories) - - if len(codes) and (codes.max() >= len(categories) or codes.min() < -1): - raise ValueError("codes need to be between -1 and " - "len(categories)-1") - - return cls(codes, categories=categories, ordered=ordered, - fastpath=True) - - _codes = None - - def _get_codes(self): - """ Get the codes. - - Returns - ------- - codes : integer array view - A non writable view of the `codes` array. - """ - v = self._codes.view() - v.flags.writeable = False - return v - - def _set_codes(self, codes): - """ - Not settable by the user directly - """ - raise ValueError("cannot set Categorical codes directly") - - codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc) - - def _set_categories(self, categories, fastpath=False): - """ Sets new categories inplace - - Parameters - ---------- - fastpath : boolean (default: False) - Don't perform validation of the categories for uniqueness or nulls - - Examples - -------- - >>> c = Categorical(['a', 'b']) - >>> c - [a, b] - Categories (2, object): [a, b] - - >>> c._set_categories(pd.Index(['a', 'c'])) - >>> c - [a, c] - Categories (2, object): [a, c] - """ - - if fastpath: - new_dtype = CategoricalDtype._from_fastpath(categories, - self.ordered) - else: - new_dtype = CategoricalDtype(categories, ordered=self.ordered) - if (not fastpath and self.dtype.categories is not None and - len(new_dtype.categories) != len(self.dtype.categories)): - raise ValueError("new categories need to have the same number of " - "items than the old categories!") - - self._dtype = new_dtype - - def _codes_for_groupby(self, sort): - """ - If sort=False, return a copy of self, coded with categories as - returned by .unique(), followed by any categories not appearing in - the data. If sort=True, return self. - - This method is needed solely to ensure the categorical index of the - GroupBy result has categories in the order of appearance in the data - (GH-8868). - - Parameters - ---------- - sort : boolean - The value of the sort parameter groupby was called with. - - Returns - ------- - Categorical - If sort=False, the new categories are set to the order of - appearance in codes (unless ordered=True, in which case the - original order is preserved), followed by any unrepresented - categories in the original order. - """ - - # Already sorted according to self.categories; all is fine - if sort: - return self - - # sort=False should order groups in as-encountered order (GH-8868) - cat = self.unique() - - # But for groupby to work, all categories should be present, - # including those missing from the data (GH-13179), which .unique() - # above dropped - cat.add_categories( - self.categories[~self.categories.isin(cat.categories)], - inplace=True) - - return self.reorder_categories(cat.categories) - - def _set_dtype(self, dtype): - """Internal method for directly updating the CategoricalDtype - - Parameters - ---------- - dtype : CategoricalDtype - - Notes - ----- - We don't do any validation here. It's assumed that the dtype is - a (valid) instance of `CategoricalDtype`. - """ - codes = _recode_for_categories(self.codes, self.categories, - dtype.categories) - return type(self)(codes, dtype=dtype, fastpath=True) - - def set_ordered(self, value, inplace=False): - """ - Sets the ordered attribute to the boolean value - - Parameters - ---------- - value : boolean to set whether this categorical is ordered (True) or - not (False) - inplace : boolean (default: False) - Whether or not to set the ordered attribute inplace or return a copy - of this categorical with ordered set to the value - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - new_dtype = CategoricalDtype(self.categories, ordered=value) - cat = self if inplace else self.copy() - cat._dtype = new_dtype - if not inplace: - return cat - - def as_ordered(self, inplace=False): - """ - Sets the Categorical to be ordered - - Parameters - ---------- - inplace : boolean (default: False) - Whether or not to set the ordered attribute inplace or return a copy - of this categorical with ordered set to True - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - return self.set_ordered(True, inplace=inplace) - - def as_unordered(self, inplace=False): - """ - Sets the Categorical to be unordered - - Parameters - ---------- - inplace : boolean (default: False) - Whether or not to set the ordered attribute inplace or return a copy - of this categorical with ordered set to False - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - return self.set_ordered(False, inplace=inplace) - - def set_categories(self, new_categories, ordered=None, rename=False, - inplace=False): - """ Sets the categories to the specified new_categories. - - `new_categories` can include new categories (which will result in - unused categories) or remove old categories (which results in values - set to NaN). If `rename==True`, the categories will simple be renamed - (less or more items than in old categories will result in values set to - NaN or in unused categories respectively). - - This method can be used to perform more than one action of adding, - removing, and reordering simultaneously and is therefore faster than - performing the individual steps via the more specialised methods. - - On the other hand this methods does not do checks (e.g., whether the - old categories are included in the new categories on a reorder), which - can result in surprising changes, for example when using special string - dtypes on python3, which does not considers a S1 string equal to a - single char python string. - - Raises - ------ - ValueError - If new_categories does not validate as categories - - Parameters - ---------- - new_categories : Index-like - The categories in new order. - ordered : boolean, (default: False) - Whether or not the categorical is treated as a ordered categorical. - If not given, do not change the ordered information. - rename : boolean (default: False) - Whether or not the new_categories should be considered as a rename - of the old categories or as reordered categories. - inplace : boolean (default: False) - Whether or not to reorder the categories inplace or return a copy of - this categorical with reordered categories. - - Returns - ------- - cat : Categorical with reordered categories or None if inplace. - - See also - -------- - rename_categories - reorder_categories - add_categories - remove_categories - remove_unused_categories - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - if ordered is None: - ordered = self.dtype.ordered - new_dtype = CategoricalDtype(new_categories, ordered=ordered) - - cat = self if inplace else self.copy() - if rename: - if (cat.dtype.categories is not None and - len(new_dtype.categories) < len(cat.dtype.categories)): - # remove all _codes which are larger and set to -1/NaN - self._codes[self._codes >= len(new_dtype.categories)] = -1 - else: - codes = _recode_for_categories(self.codes, self.categories, - new_dtype.categories) - cat._codes = codes - cat._dtype = new_dtype - - if not inplace: - return cat - - def rename_categories(self, new_categories, inplace=False): - """ Renames categories. - - Raises - ------ - ValueError - If new categories are list-like and do not have the same number of - items than the current categories or do not validate as categories - - Parameters - ---------- - new_categories : list-like, dict-like or callable - - * list-like: all items must be unique and the number of items in - the new categories must match the existing number of categories. - - * dict-like: specifies a mapping from - old categories to new. Categories not contained in the mapping - are passed through and extra categories in the mapping are - ignored. - - .. versionadded:: 0.21.0 - - * callable : a callable that is called on all items in the old - categories and whose return values comprise the new categories. - - .. versionadded:: 0.23.0 - - .. warning:: - - Currently, Series are considered list like. In a future version - of pandas they'll be considered dict-like. - - inplace : boolean (default: False) - Whether or not to rename the categories inplace or return a copy of - this categorical with renamed categories. - - Returns - ------- - cat : Categorical or None - With ``inplace=False``, the new categorical is returned. - With ``inplace=True``, there is no return value. - - See also - -------- - reorder_categories - add_categories - remove_categories - remove_unused_categories - set_categories - - Examples - -------- - >>> c = Categorical(['a', 'a', 'b']) - >>> c.rename_categories([0, 1]) - [0, 0, 1] - Categories (2, int64): [0, 1] - - For dict-like ``new_categories``, extra keys are ignored and - categories not in the dictionary are passed through - - >>> c.rename_categories({'a': 'A', 'c': 'C'}) - [A, A, b] - Categories (2, object): [A, b] - - You may also provide a callable to create the new categories - - >>> c.rename_categories(lambda x: x.upper()) - [A, A, B] - Categories (2, object): [A, B] - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - cat = self if inplace else self.copy() - - if isinstance(new_categories, ABCSeries): - msg = ("Treating Series 'new_categories' as a list-like and using " - "the values. In a future version, 'rename_categories' will " - "treat Series like a dictionary.\n" - "For dict-like, use 'new_categories.to_dict()'\n" - "For list-like, use 'new_categories.values'.") - warn(msg, FutureWarning, stacklevel=2) - new_categories = list(new_categories) - - if is_dict_like(new_categories): - cat.categories = [new_categories.get(item, item) - for item in cat.categories] - elif callable(new_categories): - cat.categories = [new_categories(item) for item in cat.categories] - else: - cat.categories = new_categories - if not inplace: - return cat - - def reorder_categories(self, new_categories, ordered=None, inplace=False): - """ Reorders categories as specified in new_categories. - - `new_categories` need to include all old categories and no new category - items. - - Raises - ------ - ValueError - If the new categories do not contain all old category items or any - new ones - - Parameters - ---------- - new_categories : Index-like - The categories in new order. - ordered : boolean, optional - Whether or not the categorical is treated as a ordered categorical. - If not given, do not change the ordered information. - inplace : boolean (default: False) - Whether or not to reorder the categories inplace or return a copy of - this categorical with reordered categories. - - Returns - ------- - cat : Categorical with reordered categories or None if inplace. - - See also - -------- - rename_categories - add_categories - remove_categories - remove_unused_categories - set_categories - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - if set(self.dtype.categories) != set(new_categories): - raise ValueError("items in new_categories are not the same as in " - "old categories") - return self.set_categories(new_categories, ordered=ordered, - inplace=inplace) - - def add_categories(self, new_categories, inplace=False): - """ Add new categories. - - `new_categories` will be included at the last/highest place in the - categories and will be unused directly after this call. - - Raises - ------ - ValueError - If the new categories include old categories or do not validate as - categories - - Parameters - ---------- - new_categories : category or list-like of category - The new categories to be included. - inplace : boolean (default: False) - Whether or not to add the categories inplace or return a copy of - this categorical with added categories. - - Returns - ------- - cat : Categorical with new categories added or None if inplace. - - See also - -------- - rename_categories - reorder_categories - remove_categories - remove_unused_categories - set_categories - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - if not is_list_like(new_categories): - new_categories = [new_categories] - already_included = set(new_categories) & set(self.dtype.categories) - if len(already_included) != 0: - msg = ("new categories must not include old categories: " - "{already_included!s}") - raise ValueError(msg.format(already_included=already_included)) - new_categories = list(self.dtype.categories) + list(new_categories) - new_dtype = CategoricalDtype(new_categories, self.ordered) - - cat = self if inplace else self.copy() - cat._dtype = new_dtype - cat._codes = coerce_indexer_dtype(cat._codes, new_dtype.categories) - if not inplace: - return cat - - def remove_categories(self, removals, inplace=False): - """ Removes the specified categories. - - `removals` must be included in the old categories. Values which were in - the removed categories will be set to NaN - - Raises - ------ - ValueError - If the removals are not contained in the categories - - Parameters - ---------- - removals : category or list of categories - The categories which should be removed. - inplace : boolean (default: False) - Whether or not to remove the categories inplace or return a copy of - this categorical with removed categories. - - Returns - ------- - cat : Categorical with removed categories or None if inplace. - - See also - -------- - rename_categories - reorder_categories - add_categories - remove_unused_categories - set_categories - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - if not is_list_like(removals): - removals = [removals] - - removal_set = set(list(removals)) - not_included = removal_set - set(self.dtype.categories) - new_categories = [c for c in self.dtype.categories - if c not in removal_set] - - # GH 10156 - if any(isna(removals)): - not_included = [x for x in not_included if notna(x)] - new_categories = [x for x in new_categories if notna(x)] - - if len(not_included) != 0: - msg = "removals must all be in old categories: {not_included!s}" - raise ValueError(msg.format(not_included=not_included)) - - return self.set_categories(new_categories, ordered=self.ordered, - rename=False, inplace=inplace) - - def remove_unused_categories(self, inplace=False): - """ Removes categories which are not used. - - Parameters - ---------- - inplace : boolean (default: False) - Whether or not to drop unused categories inplace or return a copy of - this categorical with unused categories dropped. - - Returns - ------- - cat : Categorical with unused categories dropped or None if inplace. - - See also - -------- - rename_categories - reorder_categories - add_categories - remove_categories - set_categories - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - cat = self if inplace else self.copy() - idx, inv = np.unique(cat._codes, return_inverse=True) - - if idx.size != 0 and idx[0] == -1: # na sentinel - idx, inv = idx[1:], inv - 1 - - new_categories = cat.dtype.categories.take(idx) - new_dtype = CategoricalDtype._from_fastpath(new_categories, - ordered=self.ordered) - cat._dtype = new_dtype - cat._codes = coerce_indexer_dtype(inv, new_dtype.categories) - - if not inplace: - return cat - - def map(self, mapper): - """Apply mapper function to its categories (not codes). - - Parameters - ---------- - mapper : callable - Function to be applied. When all categories are mapped - to different categories, the result will be Categorical which has - the same order property as the original. Otherwise, the result will - be np.ndarray. - - Returns - ------- - applied : Categorical or Index. - - """ - new_categories = self.categories.map(mapper) - try: - return self.from_codes(self._codes.copy(), - categories=new_categories, - ordered=self.ordered) - except ValueError: - return np.take(new_categories, self._codes) - - __eq__ = _cat_compare_op('__eq__') - __ne__ = _cat_compare_op('__ne__') - __lt__ = _cat_compare_op('__lt__') - __gt__ = _cat_compare_op('__gt__') - __le__ = _cat_compare_op('__le__') - __ge__ = _cat_compare_op('__ge__') - - # for Series/ndarray like compat - @property - def shape(self): - """ Shape of the Categorical. - - For internal compatibility with numpy arrays. - - Returns - ------- - shape : tuple - """ - - return tuple([len(self._codes)]) - - def shift(self, periods): - """ - Shift Categorical by desired number of periods. - - Parameters - ---------- - periods : int - Number of periods to move, can be positive or negative - - Returns - ------- - shifted : Categorical - """ - # since categoricals always have ndim == 1, an axis parameter - # doesn't make any sense here. - codes = self.codes - if codes.ndim > 1: - raise NotImplementedError("Categorical with ndim > 1.") - if np.prod(codes.shape) and (periods != 0): - codes = np.roll(codes, _ensure_platform_int(periods), axis=0) - if periods > 0: - codes[:periods] = -1 - else: - codes[periods:] = -1 - - return self.from_codes(codes, categories=self.categories, - ordered=self.ordered) - - def __array__(self, dtype=None): - """ - The numpy array interface. - - Returns - ------- - values : numpy array - A numpy array of either the specified dtype or, - if dtype==None (default), the same dtype as - categorical.categories.dtype - """ - ret = take_1d(self.categories.values, self._codes) - if dtype and not is_dtype_equal(dtype, self.categories.dtype): - return np.asarray(ret, dtype) - return ret - - def __setstate__(self, state): - """Necessary for making this object picklable""" - if not isinstance(state, dict): - raise Exception('invalid pickle state') - - # Provide compatibility with pre-0.15.0 Categoricals. - if '_categories' not in state and '_levels' in state: - state['_categories'] = self.dtype._validate_categories(state.pop( - '_levels')) - if '_codes' not in state and 'labels' in state: - state['_codes'] = coerce_indexer_dtype( - state.pop('labels'), state['_categories']) - - # 0.16.0 ordered change - if '_ordered' not in state: - - # >=15.0 < 0.16.0 - if 'ordered' in state: - state['_ordered'] = state.pop('ordered') - else: - state['_ordered'] = False - - # 0.21.0 CategoricalDtype change - if '_dtype' not in state: - state['_dtype'] = CategoricalDtype(state['_categories'], - state['_ordered']) - - for k, v in compat.iteritems(state): - setattr(self, k, v) - - @property - def T(self): - return self - - @property - def nbytes(self): - return self._codes.nbytes + self.dtype.categories.values.nbytes - - def memory_usage(self, deep=False): - """ - Memory usage of my values - - Parameters - ---------- - deep : bool - Introspect the data deeply, interrogate - `object` dtypes for system-level memory consumption - - Returns - ------- - bytes used - - Notes - ----- - Memory usage does not include memory consumed by elements that - are not components of the array if deep=False - - See Also - -------- - numpy.ndarray.nbytes - """ - return self._codes.nbytes + self.dtype.categories.memory_usage( - deep=deep) - - @Substitution(klass='Categorical') - @Appender(_shared_docs['searchsorted']) - @deprecate_kwarg(old_arg_name='v', new_arg_name='value') - def searchsorted(self, value, side='left', sorter=None): - if not self.ordered: - raise ValueError("Categorical not ordered\nyou can use " - ".as_ordered() to change the Categorical to an " - "ordered one") - - from pandas.core.series import Series - - values_as_codes = _get_codes_for_values(Series(value).values, - self.categories) - - if -1 in values_as_codes: - raise ValueError("Value(s) to be inserted must be in categories.") - - return self.codes.searchsorted(values_as_codes, side=side, - sorter=sorter) - - def isna(self): - """ - Detect missing values - - Both missing values (-1 in .codes) and NA as a category are detected. - - Returns - ------- - a boolean array of whether my values are null - - See also - -------- - isna : top-level isna - isnull : alias of isna - Categorical.notna : boolean inverse of Categorical.isna - - """ - - ret = self._codes == -1 - - # String/object and float categories can hold np.nan - if self.categories.dtype.kind in ['S', 'O', 'f']: - if np.nan in self.categories: - nan_pos = np.where(isna(self.categories))[0] - # we only have one NA in categories - ret = np.logical_or(ret, self._codes == nan_pos) - return ret - isnull = isna - - def notna(self): - """ - Inverse of isna - - Both missing values (-1 in .codes) and NA as a category are detected as - null. - - Returns - ------- - a boolean array of whether my values are not null - - See also - -------- - notna : top-level notna - notnull : alias of notna - Categorical.isna : boolean inverse of Categorical.notna - - """ - return ~self.isna() - notnull = notna - - def put(self, *args, **kwargs): - """ - Replace specific elements in the Categorical with given values. - """ - raise NotImplementedError(("'put' is not yet implemented " - "for Categorical")) - - def dropna(self): - """ - Return the Categorical without null values. - - Both missing values (-1 in .codes) and NA as a category are detected. - NA is removed from the categories if present. - - Returns - ------- - valid : Categorical - """ - result = self[self.notna()] - if isna(result.categories).any(): - result = result.remove_categories([np.nan]) - return result - - def value_counts(self, dropna=True): - """ - Returns a Series containing counts of each category. - - Every category will have an entry, even those with a count of 0. - - Parameters - ---------- - dropna : boolean, default True - Don't include counts of NaN, even if NaN is a category. - - Returns - ------- - counts : Series - - See Also - -------- - Series.value_counts - - """ - from numpy import bincount - from pandas import isna, Series, CategoricalIndex - - obj = (self.remove_categories([np.nan]) if dropna and - isna(self.categories).any() else self) - code, cat = obj._codes, obj.categories - ncat, mask = len(cat), 0 <= code - ix, clean = np.arange(ncat), mask.all() - - if dropna or clean: - obs = code if clean else code[mask] - count = bincount(obs, minlength=ncat or None) - else: - count = bincount(np.where(mask, code, ncat)) - ix = np.append(ix, -1) - - ix = self._constructor(ix, dtype=self.dtype, - fastpath=True) - - return Series(count, index=CategoricalIndex(ix), dtype='int64') - - def get_values(self): - """ Return the values. - - For internal compatibility with pandas formatting. - - Returns - ------- - values : numpy array - A numpy array of the same dtype as categorical.categories.dtype or - Index if datetime / periods - """ - # if we are a datetime and period index, return Index to keep metadata - if is_datetimelike(self.categories): - return self.categories.take(self._codes, fill_value=np.nan) - return np.array(self) - - def check_for_ordered(self, op): - """ assert that we are ordered """ - if not self.ordered: - raise TypeError("Categorical is not ordered for operation {op}\n" - "you can use .as_ordered() to change the " - "Categorical to an ordered one\n".format(op=op)) - - def argsort(self, ascending=True, kind='quicksort', *args, **kwargs): - """ - Returns the indices that would sort the Categorical instance if - 'sort_values' was called. This function is implemented to provide - compatibility with numpy ndarray objects. - - While an ordering is applied to the category values, arg-sorting - in this context refers more to organizing and grouping together - based on matching category values. Thus, this function can be - called on an unordered Categorical instance unlike the functions - 'Categorical.min' and 'Categorical.max'. - - Returns - ------- - argsorted : numpy array - - See also - -------- - numpy.ndarray.argsort - """ - ascending = nv.validate_argsort_with_ascending(ascending, args, kwargs) - result = np.argsort(self._codes.copy(), kind=kind, **kwargs) - if not ascending: - result = result[::-1] - return result - - def sort_values(self, inplace=False, ascending=True, na_position='last'): - """ Sorts the Categorical by category value returning a new - Categorical by default. - - While an ordering is applied to the category values, sorting in this - context refers more to organizing and grouping together based on - matching category values. Thus, this function can be called on an - unordered Categorical instance unlike the functions 'Categorical.min' - and 'Categorical.max'. - - Parameters - ---------- - inplace : boolean, default False - Do operation in place. - ascending : boolean, default True - Order ascending. Passing False orders descending. The - ordering parameter provides the method by which the - category values are organized. - na_position : {'first', 'last'} (optional, default='last') - 'first' puts NaNs at the beginning - 'last' puts NaNs at the end - - Returns - ------- - y : Categorical or None - - See Also - -------- - Categorical.sort - Series.sort_values - - Examples - -------- - >>> c = pd.Categorical([1, 2, 2, 1, 5]) - >>> c - [1, 2, 2, 1, 5] - Categories (3, int64): [1, 2, 5] - >>> c.sort_values() - [1, 1, 2, 2, 5] - Categories (3, int64): [1, 2, 5] - >>> c.sort_values(ascending=False) - [5, 2, 2, 1, 1] - Categories (3, int64): [1, 2, 5] - - Inplace sorting can be done as well: - - >>> c.sort_values(inplace=True) - >>> c - [1, 1, 2, 2, 5] - Categories (3, int64): [1, 2, 5] - >>> - >>> c = pd.Categorical([1, 2, 2, 1, 5]) - - 'sort_values' behaviour with NaNs. Note that 'na_position' - is independent of the 'ascending' parameter: - - >>> c = pd.Categorical([np.nan, 2, 2, np.nan, 5]) - >>> c - [NaN, 2.0, 2.0, NaN, 5.0] - Categories (2, int64): [2, 5] - >>> c.sort_values() - [2.0, 2.0, 5.0, NaN, NaN] - Categories (2, int64): [2, 5] - >>> c.sort_values(ascending=False) - [5.0, 2.0, 2.0, NaN, NaN] - Categories (2, int64): [2, 5] - >>> c.sort_values(na_position='first') - [NaN, NaN, 2.0, 2.0, 5.0] - Categories (2, int64): [2, 5] - >>> c.sort_values(ascending=False, na_position='first') - [NaN, NaN, 5.0, 2.0, 2.0] - Categories (2, int64): [2, 5] - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - if na_position not in ['last', 'first']: - msg = 'invalid na_position: {na_position!r}' - raise ValueError(msg.format(na_position=na_position)) - - codes = np.sort(self._codes) - if not ascending: - codes = codes[::-1] - - # NaN handling - na_mask = (codes == -1) - if na_mask.any(): - n_nans = len(codes[na_mask]) - if na_position == "first": - # in this case sort to the front - new_codes = codes.copy() - new_codes[0:n_nans] = -1 - new_codes[n_nans:] = codes[~na_mask] - codes = new_codes - elif na_position == "last": - # ... and to the end - new_codes = codes.copy() - pos = len(codes) - n_nans - new_codes[0:pos] = codes[~na_mask] - new_codes[pos:] = -1 - codes = new_codes - if inplace: - self._codes = codes - return - else: - return self._constructor(values=codes, categories=self.categories, - ordered=self.ordered, fastpath=True) - - def _values_for_rank(self): - """ - For correctly ranking ordered categorical data. See GH#15420 - - Ordered categorical data should be ranked on the basis of - codes with -1 translated to NaN. - - Returns - ------- - numpy array - - """ - from pandas import Series - if self.ordered: - values = self.codes - mask = values == -1 - if mask.any(): - values = values.astype('float64') - values[mask] = np.nan - elif self.categories.is_numeric(): - values = np.array(self) - else: - # reorder the categories (so rank can use the float codes) - # instead of passing an object array to rank - values = np.array( - self.rename_categories(Series(self.categories).rank().values) - ) - return values - - def ravel(self, order='C'): - """ Return a flattened (numpy) array. - - For internal compatibility with numpy arrays. - - Returns - ------- - raveled : numpy array - """ - return np.array(self) - - def view(self): - """Return a view of myself. - - For internal compatibility with numpy arrays. - - Returns - ------- - view : Categorical - Returns `self`! - """ - return self - - def to_dense(self): - """Return my 'dense' representation - - For internal compatibility with numpy arrays. - - Returns - ------- - dense : array - """ - return np.asarray(self) - - @deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value') - def fillna(self, value=None, method=None, limit=None): - """ Fill NA/NaN values using the specified method. - - Parameters - ---------- - method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None - Method to use for filling holes in reindexed Series - pad / ffill: propagate last valid observation forward to next valid - backfill / bfill: use NEXT valid observation to fill gap - value : scalar, dict, Series - If a scalar value is passed it is used to fill all missing values. - Alternatively, a Series or dict can be used to fill in different - values for each index. The value should not be a list. The - value(s) passed should either be in the categories or should be - NaN. - limit : int, default None - (Not implemented yet for Categorical!) - If method is specified, this is the maximum number of consecutive - NaN values to forward/backward fill. In other words, if there is - a gap with more than this number of consecutive NaNs, it will only - be partially filled. If method is not specified, this is the - maximum number of entries along the entire axis where NaNs will be - filled. - - Returns - ------- - filled : Categorical with NA/NaN filled - """ - - if value is None: - value = np.nan - if limit is not None: - raise NotImplementedError("specifying a limit for fillna has not " - "been implemented yet") - - values = self._codes - - # Make sure that we also get NA in categories - if self.categories.dtype.kind in ['S', 'O', 'f']: - if np.nan in self.categories: - values = values.copy() - nan_pos = np.where(isna(self.categories))[0] - # we only have one NA in categories - values[values == nan_pos] = -1 - - # pad / bfill - if method is not None: - - values = self.to_dense().reshape(-1, len(self)) - values = interpolate_2d(values, method, 0, None, - value).astype(self.categories.dtype)[0] - values = _get_codes_for_values(values, self.categories) - - else: - - # If value is a dict or a Series (a dict value has already - # been converted to a Series) - if isinstance(value, ABCSeries): - if not value[~value.isin(self.categories)].isna().all(): - raise ValueError("fill value must be in categories") - - values_codes = _get_codes_for_values(value, self.categories) - indexer = np.where(values_codes != -1) - values[indexer] = values_codes[values_codes != -1] - - # If value is not a dict or Series it should be a scalar - elif is_scalar(value): - if not isna(value) and value not in self.categories: - raise ValueError("fill value must be in categories") - - mask = values == -1 - if mask.any(): - values = values.copy() - if isna(value): - values[mask] = -1 - else: - values[mask] = self.categories.get_loc(value) - - else: - raise TypeError('"value" parameter must be a scalar, dict ' - 'or Series, but you passed a ' - '"{0}"'.format(type(value).__name__)) - - return self._constructor(values, categories=self.categories, - ordered=self.ordered, fastpath=True) - - def take_nd(self, indexer, allow_fill=True, fill_value=None): - """ Take the codes by the indexer, fill with the fill_value. - - For internal compatibility with numpy arrays. - """ - - # filling must always be None/nan here - # but is passed thru internally - assert isna(fill_value) - - codes = take_1d(self._codes, indexer, allow_fill=True, fill_value=-1) - result = self._constructor(codes, categories=self.categories, - ordered=self.ordered, fastpath=True) - return result - - take = take_nd - - def _slice(self, slicer): - """ Return a slice of myself. - - For internal compatibility with numpy arrays. - """ - - # only allow 1 dimensional slicing, but can - # in a 2-d case be passd (slice(None),....) - if isinstance(slicer, tuple) and len(slicer) == 2: - if not is_null_slice(slicer[0]): - raise AssertionError("invalid slicing for a 1-ndim " - "categorical") - slicer = slicer[1] - - _codes = self._codes[slicer] - return self._constructor(values=_codes, categories=self.categories, - ordered=self.ordered, fastpath=True) - - def __len__(self): - """The length of this Categorical.""" - return len(self._codes) - - def __iter__(self): - """Returns an Iterator over the values of this Categorical.""" - return iter(self.get_values()) - - def _tidy_repr(self, max_vals=10, footer=True): - """ a short repr displaying only max_vals and an optional (but default - footer) - """ - num = max_vals // 2 - head = self[:num]._get_repr(length=False, footer=False) - tail = self[-(max_vals - num):]._get_repr(length=False, footer=False) - - result = u('{head}, ..., {tail}').format(head=head[:-1], tail=tail[1:]) - if footer: - result = u('{result}\n{footer}').format(result=result, - footer=self._repr_footer()) - - return compat.text_type(result) - - def _repr_categories(self): - """ return the base repr for the categories """ - max_categories = (10 if get_option("display.max_categories") == 0 else - get_option("display.max_categories")) - from pandas.io.formats import format as fmt - if len(self.categories) > max_categories: - num = max_categories // 2 - head = fmt.format_array(self.categories[:num], None) - tail = fmt.format_array(self.categories[-num:], None) - category_strs = head + ["..."] + tail - else: - category_strs = fmt.format_array(self.categories, None) - - # Strip all leading spaces, which format_array adds for columns... - category_strs = [x.strip() for x in category_strs] - return category_strs - - def _repr_categories_info(self): - """ Returns a string representation of the footer.""" - - category_strs = self._repr_categories() - dtype = getattr(self.categories, 'dtype_str', - str(self.categories.dtype)) - - levheader = "Categories ({length}, {dtype}): ".format( - length=len(self.categories), dtype=dtype) - width, height = get_terminal_size() - max_width = get_option("display.width") or width - if com.in_ipython_frontend(): - # 0 = no breaks - max_width = 0 - levstring = "" - start = True - cur_col_len = len(levheader) # header - sep_len, sep = (3, " < ") if self.ordered else (2, ", ") - linesep = sep.rstrip() + "\n" # remove whitespace - for val in category_strs: - if max_width != 0 and cur_col_len + sep_len + len(val) > max_width: - levstring += linesep + (" " * (len(levheader) + 1)) - cur_col_len = len(levheader) + 1 # header + a whitespace - elif not start: - levstring += sep - cur_col_len += len(val) - levstring += val - start = False - # replace to simple save space by - return levheader + "[" + levstring.replace(" < ... < ", " ... ") + "]" - - def _repr_footer(self): - - return u('Length: {length}\n{info}').format( - length=len(self), info=self._repr_categories_info()) - - def _get_repr(self, length=True, na_rep='NaN', footer=True): - from pandas.io.formats import format as fmt - formatter = fmt.CategoricalFormatter(self, length=length, - na_rep=na_rep, footer=footer) - result = formatter.to_string() - return compat.text_type(result) - - def __unicode__(self): - """ Unicode representation. """ - _maxlen = 10 - if len(self._codes) > _maxlen: - result = self._tidy_repr(_maxlen) - elif len(self._codes) > 0: - result = self._get_repr(length=len(self) > _maxlen) - else: - msg = self._get_repr(length=False, footer=True).replace("\n", ", ") - result = ('[], {repr_msg}'.format(repr_msg=msg)) - - return result - - def _maybe_coerce_indexer(self, indexer): - """ return an indexer coerced to the codes dtype """ - if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i': - indexer = indexer.astype(self._codes.dtype) - return indexer - - def __getitem__(self, key): - """ Return an item. """ - if isinstance(key, (int, np.integer)): - i = self._codes[key] - if i == -1: - return np.nan - else: - return self.categories[i] - else: - return self._constructor(values=self._codes[key], - categories=self.categories, - ordered=self.ordered, fastpath=True) - - def __setitem__(self, key, value): - """ Item assignment. - - - Raises - ------ - ValueError - If (one or more) Value is not in categories or if a assigned - `Categorical` does not have the same categories - """ - - # require identical categories set - if isinstance(value, Categorical): - if not value.categories.equals(self.categories): - raise ValueError("Cannot set a Categorical with another, " - "without identical categories") - - rvalue = value if is_list_like(value) else [value] - - from pandas import Index - to_add = Index(rvalue).difference(self.categories) - - # no assignments of values not in categories, but it's always ok to set - # something to np.nan - if len(to_add) and not isna(to_add).all(): - raise ValueError("Cannot setitem on a Categorical with a new " - "category, set the categories first") - - # set by position - if isinstance(key, (int, np.integer)): - pass - - # tuple of indexers (dataframe) - elif isinstance(key, tuple): - # only allow 1 dimensional slicing, but can - # in a 2-d case be passd (slice(None),....) - if len(key) == 2: - if not is_null_slice(key[0]): - raise AssertionError("invalid slicing for a 1-ndim " - "categorical") - key = key[1] - elif len(key) == 1: - key = key[0] - else: - raise AssertionError("invalid slicing for a 1-ndim " - "categorical") - - # slicing in Series or Categorical - elif isinstance(key, slice): - pass - - # Array of True/False in Series or Categorical - else: - # There is a bug in numpy, which does not accept a Series as a - # indexer - # https://github.com/pandas-dev/pandas/issues/6168 - # https://github.com/numpy/numpy/issues/4240 -> fixed in numpy 1.9 - # FIXME: remove when numpy 1.9 is the lowest numpy version pandas - # accepts... - key = np.asarray(key) - - lindexer = self.categories.get_indexer(rvalue) - - # FIXME: the following can be removed after GH7820 is fixed: - # https://github.com/pandas-dev/pandas/issues/7820 - # float categories do currently return -1 for np.nan, even if np.nan is - # included in the index -> "repair" this here - if isna(rvalue).any() and isna(self.categories).any(): - nan_pos = np.where(isna(self.categories))[0] - lindexer[lindexer == -1] = nan_pos - - lindexer = self._maybe_coerce_indexer(lindexer) - self._codes[key] = lindexer - - def _reverse_indexer(self): - """ - Compute the inverse of a categorical, returning - a dict of categories -> indexers. - - *This is an internal function* - - Returns - ------- - dict of categories -> indexers - - Example - ------- - In [1]: c = pd.Categorical(list('aabca')) - - In [2]: c - Out[2]: - [a, a, b, c, a] - Categories (3, object): [a, b, c] - - In [3]: c.categories - Out[3]: Index([u'a', u'b', u'c'], dtype='object') - - In [4]: c.codes - Out[4]: array([0, 0, 1, 2, 0], dtype=int8) - - In [5]: c._reverse_indexer() - Out[5]: {'a': array([0, 1, 4]), 'b': array([2]), 'c': array([3])} - - """ - categories = self.categories - r, counts = libalgos.groupsort_indexer(self.codes.astype('int64'), - categories.size) - counts = counts.cumsum() - result = [r[counts[indexer]:counts[indexer + 1]] - for indexer in range(len(counts) - 1)] - result = dict(zip(categories, result)) - return result - - # reduction ops # - def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, - filter_type=None, **kwds): - """ perform the reduction type operation """ - func = getattr(self, name, None) - if func is None: - msg = 'Categorical cannot perform the operation {op}' - raise TypeError(msg.format(op=name)) - return func(numeric_only=numeric_only, **kwds) - - def min(self, numeric_only=None, **kwargs): - """ The minimum value of the object. - - Only ordered `Categoricals` have a minimum! - - Raises - ------ - TypeError - If the `Categorical` is not `ordered`. - - Returns - ------- - min : the minimum of this `Categorical` - """ - self.check_for_ordered('min') - if numeric_only: - good = self._codes != -1 - pointer = self._codes[good].min(**kwargs) - else: - pointer = self._codes.min(**kwargs) - if pointer == -1: - return np.nan - else: - return self.categories[pointer] - - def max(self, numeric_only=None, **kwargs): - """ The maximum value of the object. - - Only ordered `Categoricals` have a maximum! - - Raises - ------ - TypeError - If the `Categorical` is not `ordered`. - - Returns - ------- - max : the maximum of this `Categorical` - """ - self.check_for_ordered('max') - if numeric_only: - good = self._codes != -1 - pointer = self._codes[good].max(**kwargs) - else: - pointer = self._codes.max(**kwargs) - if pointer == -1: - return np.nan - else: - return self.categories[pointer] - - def mode(self): - """ - Returns the mode(s) of the Categorical. - - Always returns `Categorical` even if only one value. - - Returns - ------- - modes : `Categorical` (sorted) - """ - - import pandas._libs.hashtable as htable - good = self._codes != -1 - values = sorted(htable.mode_int64(_ensure_int64(self._codes[good]))) - result = self._constructor(values=values, categories=self.categories, - ordered=self.ordered, fastpath=True) - return result - - def unique(self): - """ - Return the ``Categorical`` which ``categories`` and ``codes`` are - unique. Unused categories are NOT returned. - - - unordered category: values and categories are sorted by appearance - order. - - ordered category: values are sorted by appearance order, categories - keeps existing order. - - Returns - ------- - unique values : ``Categorical`` - - Examples - -------- - An unordered Categorical will return categories in the - order of appearance. - - >>> pd.Categorical(list('baabc')) - [b, a, c] - Categories (3, object): [b, a, c] - - >>> pd.Categorical(list('baabc'), categories=list('abc')) - [b, a, c] - Categories (3, object): [b, a, c] - - An ordered Categorical preserves the category ordering. - - >>> pd.Categorical(list('baabc'), - ... categories=list('abc'), - ... ordered=True) - [b, a, c] - Categories (3, object): [a < b < c] - - See Also - -------- - unique - CategoricalIndex.unique - Series.unique - - """ - - # unlike np.unique, unique1d does not sort - unique_codes = unique1d(self.codes) - cat = self.copy() - - # keep nan in codes - cat._codes = unique_codes - - # exclude nan from indexer for categories - take_codes = unique_codes[unique_codes != -1] - if self.ordered: - take_codes = sorted(take_codes) - return cat.set_categories(cat.categories.take(take_codes)) - - def equals(self, other): - """ - Returns True if categorical arrays are equal. - - Parameters - ---------- - other : `Categorical` - - Returns - ------- - are_equal : boolean - """ - if self.is_dtype_equal(other): - if self.categories.equals(other.categories): - # fastpath to avoid re-coding - other_codes = other._codes - else: - other_codes = _recode_for_categories(other.codes, - other.categories, - self.categories) - return np.array_equal(self._codes, other_codes) - return False - - def is_dtype_equal(self, other): - """ - Returns True if categoricals are the same dtype - same categories, and same ordered - - Parameters - ---------- - other : Categorical - - Returns - ------- - are_equal : boolean - """ - - try: - return hash(self.dtype) == hash(other.dtype) - except (AttributeError, TypeError): - return False - - def describe(self): - """ Describes this Categorical - - Returns - ------- - description: `DataFrame` - A dataframe with frequency and counts by category. - """ - counts = self.value_counts(dropna=False) - freqs = counts / float(counts.sum()) - - from pandas.core.reshape.concat import concat - result = concat([counts, freqs], axis=1) - result.columns = ['counts', 'freqs'] - result.index.name = 'categories' - - return result - - def repeat(self, repeats, *args, **kwargs): - """ - Repeat elements of a Categorical. - - See also - -------- - numpy.ndarray.repeat - - """ - nv.validate_repeat(args, kwargs) - codes = self._codes.repeat(repeats) - return self._constructor(values=codes, categories=self.categories, - ordered=self.ordered, fastpath=True) - -# The Series.cat accessor - - -class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin): - """ - Accessor object for categorical properties of the Series values. - - Be aware that assigning to `categories` is a inplace operation, while all - methods return new categorical data per default (but can be called with - `inplace=True`). - - Parameters - ---------- - data : Series or CategoricalIndex - - Examples - -------- - >>> s.cat.categories - >>> s.cat.categories = list('abc') - >>> s.cat.rename_categories(list('cab')) - >>> s.cat.reorder_categories(list('cab')) - >>> s.cat.add_categories(['d','e']) - >>> s.cat.remove_categories(['d']) - >>> s.cat.remove_unused_categories() - >>> s.cat.set_categories(list('abcde')) - >>> s.cat.as_ordered() - >>> s.cat.as_unordered() - - """ - - def __init__(self, data): - self._validate(data) - self.categorical = data.values - self.index = data.index - self.name = data.name - self._freeze() - - @staticmethod - def _validate(data): - if not is_categorical_dtype(data.dtype): - raise AttributeError("Can only use .cat accessor with a " - "'category' dtype") - - def _delegate_property_get(self, name): - return getattr(self.categorical, name) - - def _delegate_property_set(self, name, new_values): - return setattr(self.categorical, name, new_values) - - @property - def codes(self): - from pandas import Series - return Series(self.categorical.codes, index=self.index) - - def _delegate_method(self, name, *args, **kwargs): - from pandas import Series - method = getattr(self.categorical, name) - res = method(*args, **kwargs) - if res is not None: - return Series(res, index=self.index, name=self.name) - - -CategoricalAccessor._add_delegate_accessors(delegate=Categorical, - accessors=["categories", - "ordered"], - typ='property') -CategoricalAccessor._add_delegate_accessors(delegate=Categorical, accessors=[ - "rename_categories", "reorder_categories", "add_categories", - "remove_categories", "remove_unused_categories", "set_categories", - "as_ordered", "as_unordered"], typ='method') - -# utility routines - - -def _get_codes_for_values(values, categories): - """ - utility routine to turn values into codes given the specified categories - """ - - from pandas.core.algorithms import _get_data_algo, _hashtables - if not is_dtype_equal(values.dtype, categories.dtype): - values = _ensure_object(values) - categories = _ensure_object(categories) - - (hash_klass, vec_klass), vals = _get_data_algo(values, _hashtables) - (_, _), cats = _get_data_algo(categories, _hashtables) - t = hash_klass(len(cats)) - t.map_locations(cats) - return coerce_indexer_dtype(t.lookup(vals), cats) - - -def _recode_for_categories(codes, old_categories, new_categories): - """ - Convert a set of codes for to a new set of categories - - Parameters - ---------- - codes : array - old_categories, new_categories : Index - - Returns - ------- - new_codes : array - - Examples - -------- - >>> old_cat = pd.Index(['b', 'a', 'c']) - >>> new_cat = pd.Index(['a', 'b']) - >>> codes = np.array([0, 1, 1, 2]) - >>> _recode_for_categories(codes, old_cat, new_cat) - array([ 1, 0, 0, -1]) - """ - from pandas.core.algorithms import take_1d - - if len(old_categories) == 0: - # All null anyway, so just retain the nulls - return codes.copy() - indexer = coerce_indexer_dtype(new_categories.get_indexer(old_categories), - new_categories) - new_codes = take_1d(indexer, codes.copy(), fill_value=-1) - return new_codes - - -def _convert_to_list_like(list_like): - if hasattr(list_like, "dtype"): - return list_like - if isinstance(list_like, list): - return list_like - if (is_sequence(list_like) or isinstance(list_like, tuple) or - isinstance(list_like, types.GeneratorType)): - return list(list_like) - elif is_scalar(list_like): - return [list_like] - else: - # is this reached? - return [list_like] - - -def _factorize_from_iterable(values): - """ - Factorize an input `values` into `categories` and `codes`. Preserves - categorical dtype in `categories`. - - *This is an internal function* - - Parameters - ---------- - values : list-like - - Returns - ------- - codes : ndarray - categories : Index - If `values` has a categorical dtype, then `categories` is - a CategoricalIndex keeping the categories and order of `values`. - """ - from pandas.core.indexes.category import CategoricalIndex - - if not is_list_like(values): - raise TypeError("Input must be list-like") - - if is_categorical(values): - if isinstance(values, (ABCCategoricalIndex, ABCSeries)): - values = values._values - categories = CategoricalIndex(values.categories, - categories=values.categories, - ordered=values.ordered) - codes = values.codes - else: - cat = Categorical(values, ordered=True) - categories = cat.categories - codes = cat.codes - return codes, categories - - -def _factorize_from_iterables(iterables): - """ - A higher-level wrapper over `_factorize_from_iterable`. - - *This is an internal function* - - Parameters - ---------- - iterables : list-like of list-likes - - Returns - ------- - codes_list : list of ndarrays - categories_list : list of Indexes - - Notes - ----- - See `_factorize_from_iterable` for more info. - """ - if len(iterables) == 0: - # For consistency, it should return a list of 2 lists. - return [[], []] - return map(list, lzip(*[_factorize_from_iterable(it) for it in iterables])) +from pandas.core.arrays import Categorical # noqa diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 5e6193d673756..3e54ce61cd5b2 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -314,7 +314,7 @@ def union_categoricals(to_union, sort_categories=False, ignore_order=False): Categories (3, object): [b, c, a] """ from pandas import Index, Categorical, CategoricalIndex, Series - from pandas.core.categorical import _recode_for_categories + from pandas.core.arrays.categorical import _recode_for_categories if len(to_union) == 0: raise ValueError('No Categoricals to union') diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 2c05eefa5706e..7771060ad82c7 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -77,7 +77,7 @@ create_block_manager_from_arrays, create_block_manager_from_blocks) from pandas.core.series import Series -from pandas.core.categorical import Categorical +from pandas.core.arrays import Categorical import pandas.core.algorithms as algorithms from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u, OrderedDict, raise_with_traceback) diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 082b6e2a8b1a0..25e44589488ee 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -47,7 +47,7 @@ DataError, SpecificationError) from pandas.core.index import (Index, MultiIndex, CategoricalIndex, _ensure_index) -from pandas.core.categorical import Categorical +from pandas.core.arrays import Categorical from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame, _shared_docs from pandas.core.internals import BlockManager, make_block diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index ac7cb30fa823d..9a6210db1aacb 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -125,7 +125,7 @@ def _create_from_codes(self, codes, categories=None, ordered=None, CategoricalIndex """ - from pandas.core.categorical import Categorical + from pandas.core.arrays import Categorical if categories is None: categories = self.categories if ordered is None: @@ -162,7 +162,7 @@ def _create_categorical(self, data, categories=None, ordered=None, if not isinstance(data, ABCCategorical): if ordered is None and dtype is None: ordered = False - from pandas.core.categorical import Categorical + from pandas.core.arrays import Categorical data = Categorical(data, categories=categories, ordered=ordered, dtype=dtype) else: @@ -462,7 +462,7 @@ def where(self, cond, other=None): other = self._na_value values = np.where(cond, self.values, other) - from pandas.core.categorical import Categorical + from pandas.core.arrays import Categorical cat = Categorical(values, categories=self.categories, ordered=self.ordered) @@ -775,7 +775,7 @@ def _delegate_method(self, name, *args, **kwargs): def _add_accessors(cls): """ add in Categorical accessor methods """ - from pandas.core.categorical import Categorical + from pandas.core.arrays import Categorical CategoricalIndex._add_delegate_accessors( delegate=Categorical, accessors=["rename_categories", "reorder_categories", diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 5739c8dfd8b53..608553b9c3bf2 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1182,7 +1182,7 @@ def from_arrays(cls, arrays, sortorder=None, names=None): if len(arrays[i]) != len(arrays[i - 1]): raise ValueError('all arrays must be same length') - from pandas.core.categorical import _factorize_from_iterables + from pandas.core.arrays.categorical import _factorize_from_iterables labels, levels = _factorize_from_iterables(arrays) if names is None: @@ -1276,7 +1276,7 @@ def from_product(cls, iterables, sortorder=None, names=None): MultiIndex.from_arrays : Convert list of arrays to MultiIndex MultiIndex.from_tuples : Convert list of tuples to MultiIndex """ - from pandas.core.categorical import _factorize_from_iterables + from pandas.core.arrays.categorical import _factorize_from_iterables from pandas.core.reshape.util import cartesian_product if not is_list_like(iterables): @@ -1749,7 +1749,7 @@ def _get_labels_for_sorting(self): for sorting, where we need to disambiguate that -1 is not a valid valid """ - from pandas.core.categorical import Categorical + from pandas.core.arrays import Categorical def cats(label): return np.arange(np.array(label).max() + 1 if len(label) else 0, diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 3c923133477df..45618282ab4f7 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -59,7 +59,7 @@ from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import maybe_convert_indices, length_of_indexer -from pandas.core.categorical import Categorical, _maybe_to_categorical +from pandas.core.arrays.categorical import Categorical, _maybe_to_categorical from pandas.core.indexes.datetimes import DatetimeIndex from pandas.io.formats.printing import pprint_thing diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index aaadf6d3ca32f..20f4384a3d698 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -7,8 +7,8 @@ from pandas.core.index import (_get_objs_combined_axis, _ensure_index, _get_consensus_names, _all_indexes_same) -from pandas.core.categorical import (_factorize_from_iterable, - _factorize_from_iterables) +from pandas.core.arrays.categorical import (_factorize_from_iterable, + _factorize_from_iterables) from pandas.core.internals import concatenate_block_managers from pandas.core import common as com from pandas.core.generic import NDFrame diff --git a/pandas/core/reshape/melt.py b/pandas/core/reshape/melt.py index 28e9694681912..01445eb30a9e5 100644 --- a/pandas/core/reshape/melt.py +++ b/pandas/core/reshape/melt.py @@ -4,7 +4,7 @@ from pandas.core.dtypes.common import is_list_like from pandas import compat -from pandas.core.categorical import Categorical +from pandas.core.arrays import Categorical from pandas.core.dtypes.generic import ABCMultiIndex diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index f7a0fab9998d0..c8bca476c65f2 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -21,7 +21,8 @@ from pandas.core.sparse.array import SparseArray from pandas._libs.sparse import IntIndex -from pandas.core.categorical import Categorical, _factorize_from_iterable +from pandas.core.arrays import Categorical +from pandas.core.arrays.categorical import _factorize_from_iterable from pandas.core.sorting import (get_group_index, get_compressed_ids, compress_group_index, decons_obs_group_ids) diff --git a/pandas/core/series.py b/pandas/core/series.py index 73a7fe1fd89e9..be40f65186d2d 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -53,7 +53,7 @@ from pandas.core.indexing import check_bool_indexer, maybe_convert_indices from pandas.core import generic, base from pandas.core.internals import SingleBlockManager -from pandas.core.categorical import Categorical, CategoricalAccessor +from pandas.core.arrays.categorical import Categorical, CategoricalAccessor from pandas.core.indexes.accessors import CombinedDatetimelikeProperties from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex diff --git a/pandas/core/sorting.py b/pandas/core/sorting.py index 27252b9616a44..e550976d1deeb 100644 --- a/pandas/core/sorting.py +++ b/pandas/core/sorting.py @@ -182,7 +182,7 @@ def indexer_from_factorized(labels, shape, compress=True): def lexsort_indexer(keys, orders=None, na_position='last'): - from pandas.core.categorical import Categorical + from pandas.core.arrays import Categorical labels = [] shape = [] diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 150fccde81a60..1a2f62442a063 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -28,7 +28,7 @@ _ensure_index_from_sequences) from pandas.core.series import Series from pandas.core.frame import DataFrame -from pandas.core.categorical import Categorical +from pandas.core.arrays import Categorical from pandas.core import algorithms from pandas.core.common import AbstractMethodError from pandas.io.date_converters import generic_parser diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 72543bb6f825e..c8490167022e5 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -36,7 +36,8 @@ from pandas.errors import PerformanceWarning from pandas.core.common import _asarray_tuplesafe, _all_none from pandas.core.algorithms import match, unique -from pandas.core.categorical import Categorical, _factorize_from_iterables +from pandas.core.arrays.categorical import (Categorical, + _factorize_from_iterables) from pandas.core.internals import (BlockManager, make_block, _block2d_to_blocknd, _factor_indexer, _block_shape) diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 2b97b447921bb..b409cf20e9a09 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -24,7 +24,7 @@ from pandas.compat import (lrange, lmap, lzip, text_type, string_types, range, zip, BytesIO) from pandas.core.base import StringMixin -from pandas.core.categorical import Categorical +from pandas.core.arrays import Categorical from pandas.core.dtypes.common import (is_categorical_dtype, _ensure_object, is_datetime64_dtype) from pandas.core.frame import DataFrame diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 821c7858c7a5c..4a10ed6e7402c 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- - +import sys from warnings import catch_warnings import pytest @@ -249,3 +249,13 @@ def test_deprecation_cdaterange(self): with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): cdate_range('2017-01-01', '2017-12-31') + + +class TestCategoricalMove(object): + + def test_categorical_move(self): + # May have been cached by another import, e.g. pickle tests. + sys.modules.pop("pandas.core.categorical", None) + + with tm.assert_produces_warning(FutureWarning): + from pandas.core.categorical import Categorical # noqa diff --git a/pandas/tests/categorical/test_api.py b/pandas/tests/categorical/test_api.py index 0af2857091b74..ad5b78b36438b 100644 --- a/pandas/tests/categorical/test_api.py +++ b/pandas/tests/categorical/test_api.py @@ -7,7 +7,7 @@ import pandas.util.testing as tm from pandas import Categorical, CategoricalIndex, Index, Series, DataFrame -from pandas.core.categorical import _recode_for_categories +from pandas.core.arrays.categorical import _recode_for_categories from pandas.tests.categorical.common import TestCategorical diff --git a/pandas/tests/series/test_api.py b/pandas/tests/series/test_api.py index 73cc87855acbd..cf8698bc5ed5e 100644 --- a/pandas/tests/series/test_api.py +++ b/pandas/tests/series/test_api.py @@ -511,8 +511,7 @@ def test_cat_accessor(self): def test_cat_accessor_api(self): # GH 9322 - from pandas.core.categorical import CategoricalAccessor - + from pandas.core.arrays.categorical import CategoricalAccessor assert Series.cat is CategoricalAccessor s = Series(list('aabbcde')).astype('category') assert isinstance(s.cat, CategoricalAccessor)
Prep for https://github.com/pandas-dev/pandas/pull/19268
https://api.github.com/repos/pandas-dev/pandas/pulls/19269
2018-01-16T16:06:55Z
2018-01-18T17:31:43Z
2018-01-18T17:31:42Z
2018-01-18T17:31:46Z
Array Interface and Categorical internals Refactor
diff --git a/pandas/core/arrays/__init__.py b/pandas/core/arrays/__init__.py index ee32b12f0e712..f8adcf520c15b 100644 --- a/pandas/core/arrays/__init__.py +++ b/pandas/core/arrays/__init__.py @@ -1 +1,2 @@ +from .base import ExtensionArray # noqa from .categorical import Categorical # noqa diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py new file mode 100644 index 0000000000000..1556b653819a6 --- /dev/null +++ b/pandas/core/arrays/base.py @@ -0,0 +1,247 @@ +"""An interface for extending pandas with custom arrays.""" +from pandas.errors import AbstractMethodError + +_not_implemented_message = "{} does not implement {}." + + +class ExtensionArray(object): + """Abstract base class for custom 1-D array types. + + pandas will recognize instances of this class as proper arrays + with a custom type and will not attempt to coerce them to objects. They + may be stored directly inside a :class:`DataFrame` or :class:`Series`. + + Notes + ----- + The interface includes the following abstract methods that must be + implemented by subclasses: + + * __getitem__ + * __len__ + * dtype + * nbytes + * isna + * take + * copy + * _formatting_values + * _concat_same_type + + Some additional methods are required to satisfy pandas' internal, private + block API. + + * _concat_same_type + * _can_hold_na + + This class does not inherit from 'abc.ABCMeta' for performance reasons. + Methods and properties required by the interface raise + ``pandas.errors.AbstractMethodError`` and no ``register`` method is + provided for registering virtual subclasses. + + ExtensionArrays are limited to 1 dimension. + + They may be backed by none, one, or many NumPy ararys. For example, + ``pandas.Categorical`` is an extension array backed by two arrays, + one for codes and one for categories. An array of IPv6 address may + be backed by a NumPy structured array with two fields, one for the + lower 64 bits and one for the upper 64 bits. Or they may be backed + by some other storage type, like Python lists. Pandas makes no + assumptions on how the data are stored, just that it can be converted + to a NumPy array. + + Extension arrays should be able to be constructed with instances of + the class, i.e. ``ExtensionArray(extension_array)`` should return + an instance, not error. + + Additionally, certain methods and interfaces are required for proper + this array to be properly stored inside a ``DataFrame`` or ``Series``. + """ + # ------------------------------------------------------------------------ + # Must be a Sequence + # ------------------------------------------------------------------------ + def __getitem__(self, item): + # type (Any) -> Any + """Select a subset of self. + + Parameters + ---------- + item : int, slice, or ndarray + * int: The position in 'self' to get. + + * slice: A slice object, where 'start', 'stop', and 'step' are + integers or None + + * ndarray: A 1-d boolean NumPy ndarray the same length as 'self' + + Returns + ------- + item : scalar or ExtensionArray + + Notes + ----- + For scalar ``item``, return a scalar value suitable for the array's + type. This should be an instance of ``self.dtype.type``. + + For slice ``key``, return an instance of ``ExtensionArray``, even + if the slice is length 0 or 1. + + For a boolean mask, return an instance of ``ExtensionArray``, filtered + to the values where ``item`` is True. + """ + raise AbstractMethodError(self) + + def __setitem__(self, key, value): + # type: (Any, Any) -> None + raise NotImplementedError(_not_implemented_message.format( + type(self), '__setitem__') + ) + + def __len__(self): + """Length of this array + + Returns + ------- + length : int + """ + # type: () -> int + raise AbstractMethodError(self) + + # ------------------------------------------------------------------------ + # Required attributes + # ------------------------------------------------------------------------ + @property + def dtype(self): + # type: () -> ExtensionDtype + """An instance of 'ExtensionDtype'.""" + raise AbstractMethodError(self) + + @property + def shape(self): + # type: () -> Tuple[int, ...] + return (len(self),) + + @property + def ndim(self): + # type: () -> int + """Extension Arrays are only allowed to be 1-dimensional.""" + return 1 + + @property + def nbytes(self): + # type: () -> int + """The number of bytes needed to store this object in memory. + + If this is expensive to compute, return an approximate lower bound + on the number of bytes needed. + """ + raise AbstractMethodError(self) + + # ------------------------------------------------------------------------ + # Additional Methods + # ------------------------------------------------------------------------ + def isna(self): + # type: () -> np.ndarray + """Boolean NumPy array indicating if each value is missing. + + This should return a 1-D array the same length as 'self'. + """ + raise AbstractMethodError(self) + + # ------------------------------------------------------------------------ + # Indexing methods + # ------------------------------------------------------------------------ + def take(self, indexer, allow_fill=True, fill_value=None): + # type: (Sequence[int], bool, Optional[Any]) -> ExtensionArray + """Take elements from an array. + + Parameters + ---------- + indexer : sequence of integers + indices to be taken. -1 is used to indicate values + that are missing. + allow_fill : bool, default True + If False, indexer is assumed to contain no -1 values so no filling + will be done. This short-circuits computation of a mask. Result is + undefined if allow_fill == False and -1 is present in indexer. + fill_value : any, default None + Fill value to replace -1 values with. By default, this uses + the missing value sentinel for this type, ``self._fill_value``. + + Notes + ----- + This should follow pandas' semantics where -1 indicates missing values. + Positions where indexer is ``-1`` should be filled with the missing + value for this type. + + This is called by ``Series.__getitem__``, ``.loc``, ``iloc``, when the + indexer is a sequence of values. + + Examples + -------- + Suppose the extension array somehow backed by a NumPy structured array + and that the underlying structured array is stored as ``self.data``. + Then ``take`` may be written as + + .. code-block:: python + + def take(self, indexer, allow_fill=True, fill_value=None): + mask = indexer == -1 + result = self.data.take(indexer) + result[mask] = self._fill_value + return type(self)(result) + """ + raise AbstractMethodError(self) + + def copy(self, deep=False): + # type: (bool) -> ExtensionArray + """Return a copy of the array. + + Parameters + ---------- + deep : bool, default False + Also copy the underlying data backing this array. + + Returns + ------- + ExtensionArray + """ + raise AbstractMethodError(self) + + # ------------------------------------------------------------------------ + # Block-related methods + # ------------------------------------------------------------------------ + @property + def _fill_value(self): + # type: () -> Any + """The missing value for this type, e.g. np.nan""" + return None + + def _formatting_values(self): + # type: () -> np.ndarray + # At the moment, this has to be an array since we use result.dtype + """An array of values to be printed in, e.g. the Series repr""" + raise AbstractMethodError(self) + + @classmethod + def _concat_same_type(cls, to_concat): + # type: (Sequence[ExtensionArray]) -> ExtensionArray + """Concatenate multiple array + + Parameters + ---------- + to_concat : sequence of this type + + Returns + ------- + ExtensionArray + """ + raise AbstractMethodError(cls) + + def _can_hold_na(self): + # type: () -> bool + """Whether your array can hold missing values. True by default. + + Notes + ----- + Setting this to false will optimize some operations like fillna. + """ + return True diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index b50e01b0fb55a..62c6a6b16cbe9 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -43,6 +43,8 @@ from pandas.util._validators import validate_bool_kwarg from pandas.core.config import get_option +from .base import ExtensionArray + def _cat_compare_op(op): def f(self, other): @@ -148,7 +150,7 @@ def _maybe_to_categorical(array): """ -class Categorical(PandasObject): +class Categorical(ExtensionArray, PandasObject): """ Represents a categorical variable in classic R / S-plus fashion @@ -2130,6 +2132,20 @@ def repeat(self, repeats, *args, **kwargs): return self._constructor(values=codes, categories=self.categories, ordered=self.ordered, fastpath=True) + # Implement the ExtensionArray interface + @property + def _can_hold_na(self): + return True + + @classmethod + def _concat_same_type(self, to_concat): + from pandas.core.dtypes.concat import _concat_categorical + + return _concat_categorical(to_concat) + + def _formatting_values(self): + return self + # The Series.cat accessor diff --git a/pandas/core/common.py b/pandas/core/common.py index e606be3cc2a23..6748db825acf0 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -25,7 +25,8 @@ # compat from pandas.errors import ( # noqa - PerformanceWarning, UnsupportedFunctionCall, UnsortedIndexError) + PerformanceWarning, UnsupportedFunctionCall, UnsortedIndexError, + AbstractMethodError) # back-compat of public API # deprecate these functions @@ -88,19 +89,6 @@ class SettingWithCopyWarning(Warning): pass -class AbstractMethodError(NotImplementedError): - """Raise this error instead of NotImplementedError for abstract methods - while keeping compatibility with Python 2 and Python 3. - """ - - def __init__(self, class_instance): - self.class_instance = class_instance - - def __str__(self): - msg = "This method must be defined in the concrete class of {name}" - return (msg.format(name=self.class_instance.__class__.__name__)) - - def flatten(l): """Flatten an arbitrarily nested sequence. diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py new file mode 100644 index 0000000000000..c7c5378801f02 --- /dev/null +++ b/pandas/core/dtypes/base.py @@ -0,0 +1,129 @@ +"""Extend pandas with custom array types""" +from pandas.errors import AbstractMethodError + + +class ExtensionDtype(object): + """A custom data type, to be paired with an ExtensionArray. + + Notes + ----- + The interface includes the following abstract methods that must + be implemented by subclasses: + + * type + * name + * construct_from_string + + This class does not inherit from 'abc.ABCMeta' for performance reasons. + Methods and properties required by the interface raise + ``pandas.errors.AbstractMethodError`` and no ``register`` method is + provided for registering virtual subclasses. + """ + + def __str__(self): + return self.name + + @property + def type(self): + # type: () -> type + """The scalar type for the array, e.g. ``int`` + + It's expected ``ExtensionArray[item]`` returns an instance + of ``ExtensionDtype.type`` for scalar ``item``. + """ + raise AbstractMethodError(self) + + @property + def kind(self): + # type () -> str + """A character code (one of 'biufcmMOSUV'), default 'O' + + This should match the NumPy dtype used when the array is + converted to an ndarray, which is probably 'O' for object if + the extension type cannot be represented as a built-in NumPy + type. + + See Also + -------- + numpy.dtype.kind + """ + return 'O' + + @property + def name(self): + # type: () -> str + """A string identifying the data type. + + Will be used for display in, e.g. ``Series.dtype`` + """ + raise AbstractMethodError(self) + + @property + def names(self): + # type: () -> Optional[List[str]] + """Ordered list of field names, or None if there are no fields. + + This is for compatibility with NumPy arrays, and may be removed in the + future. + """ + return None + + @classmethod + def construct_from_string(cls, string): + """Attempt to construct this type from a string. + + Parameters + ---------- + string : str + + Returns + ------- + self : instance of 'cls' + + Raises + ------ + TypeError + If a class cannot be constructed from this 'string'. + + Examples + -------- + If the extension dtype can be constructed without any arguments, + the following may be an adequate implementation. + + >>> @classmethod + ... def construct_from_string(cls, string) + ... if string == cls.name: + ... return cls() + ... else: + ... raise TypeError("Cannot construct a '{}' from " + ... "'{}'".format(cls, string)) + """ + raise AbstractMethodError(cls) + + @classmethod + def is_dtype(cls, dtype): + """Check if we match 'dtype' + + Parameters + ---------- + dtype : str or dtype + + Returns + ------- + is_dtype : bool + + Notes + ----- + The default implementation is True if + + 1. ``cls.construct_from_string(dtype)`` is an instance + of ``cls``. + 2. 'dtype' is ``cls`` or a subclass of ``cls``. + """ + if isinstance(dtype, str): + try: + return isinstance(cls.construct_from_string(dtype), cls) + except TypeError: + return False + else: + return issubclass(dtype, cls) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index dca9a5fde0d74..c66e7fcfc6978 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -1685,6 +1685,35 @@ def is_extension_type(arr): return False +def is_extension_array_dtype(arr_or_dtype): + """Check if an object is a pandas extension array type. + + Parameters + ---------- + arr_or_dtype : object + + Returns + ------- + bool + + Notes + ----- + This checks whether an object implements the pandas extension + array interface. In pandas, this includes: + + * Categorical + + Third-party libraries may implement arrays or types satisfying + this interface as well. + """ + from pandas.core.arrays import ExtensionArray + + # we want to unpack series, anything else? + if isinstance(arr_or_dtype, ABCSeries): + arr_or_dtype = arr_or_dtype._values + return isinstance(arr_or_dtype, (ExtensionDtype, ExtensionArray)) + + def is_complex_dtype(arr_or_dtype): """ Check whether the provided array or dtype is of a complex dtype. diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 1eb87aa99fd1e..d8d3a96992757 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -5,15 +5,15 @@ from pandas import compat from pandas.core.dtypes.generic import ABCIndexClass, ABCCategoricalIndex +from .base import ExtensionDtype -class ExtensionDtype(object): + +class PandasExtensionDtype(ExtensionDtype): """ A np.dtype duck-typed class, suitable for holding a custom dtype. THIS IS NOT A REAL NUMPY DTYPE """ - name = None - names = None type = None subdtype = None kind = None @@ -108,7 +108,7 @@ class CategoricalDtypeType(type): pass -class CategoricalDtype(ExtensionDtype): +class CategoricalDtype(PandasExtensionDtype): """ Type for categorical data with the categories and orderedness @@ -387,7 +387,7 @@ class DatetimeTZDtypeType(type): pass -class DatetimeTZDtype(ExtensionDtype): +class DatetimeTZDtype(PandasExtensionDtype): """ A np.dtype duck-typed class, suitable for holding a custom datetime with tz @@ -501,8 +501,7 @@ class PeriodDtypeType(type): pass -class PeriodDtype(ExtensionDtype): - __metaclass__ = PeriodDtypeType +class PeriodDtype(PandasExtensionDtype): """ A Period duck-typed class, suitable for holding a period with freq dtype. @@ -619,8 +618,7 @@ class IntervalDtypeType(type): pass -class IntervalDtype(ExtensionDtype): - __metaclass__ = IntervalDtypeType +class IntervalDtype(PandasExtensionDtype): """ A Interval duck-typed class, suitable for holding an interval diff --git a/pandas/core/internals.py b/pandas/core/internals.py index f3e5e4c99a899..cef5b776eff66 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -33,6 +33,7 @@ is_datetimelike_v_numeric, is_float_dtype, is_numeric_dtype, is_numeric_v_string_like, is_extension_type, + is_extension_array_dtype, is_list_like, is_re, is_re_compilable, @@ -61,8 +62,9 @@ from pandas.core.index import Index, MultiIndex, _ensure_index from pandas.core.indexing import maybe_convert_indices, length_of_indexer -from pandas.core.arrays.categorical import Categorical, _maybe_to_categorical +from pandas.core.arrays import Categorical from pandas.core.indexes.datetimes import DatetimeIndex +from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.io.formats.printing import pprint_thing import pandas.core.missing as missing @@ -103,24 +105,58 @@ class Block(PandasObject): _verify_integrity = True _validate_ndim = True _ftype = 'dense' - _holder = None _concatenator = staticmethod(np.concatenate) def __init__(self, values, placement, ndim=None): - if ndim is None: - ndim = values.ndim - elif values.ndim != ndim: - raise ValueError('Wrong number of dimensions') - self.ndim = ndim - + self.ndim = self._check_ndim(values, ndim) self.mgr_locs = placement self.values = values - if ndim and len(self.mgr_locs) != len(self.values): + if (self._validate_ndim and self.ndim and + len(self.mgr_locs) != len(self.values)): raise ValueError( 'Wrong number of items passed {val}, placement implies ' '{mgr}'.format(val=len(self.values), mgr=len(self.mgr_locs))) + def _check_ndim(self, values, ndim): + """ndim inference and validation. + + Infers ndim from 'values' if not provided to __init__. + Validates that values.ndim and ndim are consistent if and only if + the class variable '_validate_ndim' is True. + + Parameters + ---------- + values : array-like + ndim : int or None + + Returns + ------- + ndim : int + + Raises + ------ + ValueError : the number of dimensions do not match + """ + if ndim is None: + ndim = values.ndim + + if self._validate_ndim and values.ndim != ndim: + msg = ("Wrong number of dimensions. values.ndim != ndim " + "[{} != {}]") + raise ValueError(msg.format(values.ndim, ndim)) + + return ndim + + @property + def _holder(self): + """The array-like that can hold the underlying values. + + None for 'Block', overridden by subclasses that don't + use an ndarray. + """ + return None + @property def _consolidate_key(self): return (self._can_consolidate, self.dtype.name) @@ -279,7 +315,6 @@ def reshape_nd(self, labels, shape, ref_items, mgr=None): return a new block that is transformed to a nd block """ - return _block2d_to_blocknd(values=self.get_values().T, placement=self.mgr_locs, shape=shape, labels=labels, ref_items=ref_items) @@ -535,15 +570,20 @@ def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs): def _astype(self, dtype, copy=False, errors='raise', values=None, klass=None, mgr=None, **kwargs): - """ - Coerce to the new type + """Coerce to the new type + Parameters + ---------- dtype : str, dtype convertible copy : boolean, default False copy if indicated errors : str, {'raise', 'ignore'}, default 'ignore' - ``raise`` : allow exceptions to be raised - ``ignore`` : suppress exceptions. On error return original object + + Returns + ------- + Block """ errors_legal_values = ('raise', 'ignore') @@ -1671,27 +1711,28 @@ class NonConsolidatableMixIn(object): _can_consolidate = False _verify_integrity = False _validate_ndim = False - _holder = None def __init__(self, values, placement, ndim=None): + """Initialize a non-consolidatable block. - # Placement must be converted to BlockPlacement via property setter - # before ndim logic, because placement may be a slice which doesn't - # have a length. - self.mgr_locs = placement + 'ndim' may be inferred from 'placement'. - # kludgetastic + This will call continue to call __init__ for the other base + classes mixed in with this Mixin. + """ + # Placement must be converted to BlockPlacement so that we can check + # its length + if not isinstance(placement, BlockPlacement): + placement = BlockPlacement(placement) + + # Maybe infer ndim from placement if ndim is None: - if len(self.mgr_locs) != 1: + if len(placement) != 1: ndim = 1 else: ndim = 2 - self.ndim = ndim - - if not isinstance(values, self._holder): - raise TypeError("values must be {0}".format(self._holder.__name__)) - - self.values = values + super(NonConsolidatableMixIn, self).__init__(values, placement, + ndim=ndim) @property def shape(self): @@ -1742,7 +1783,7 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, Returns ------- - a new block(s), the result of the putmask + a new block, the result of the putmask """ inplace = validate_bool_kwarg(inplace, 'inplace') @@ -1800,6 +1841,92 @@ def _unstack(self, unstacker_func, new_columns): return blocks, mask +class ExtensionBlock(NonConsolidatableMixIn, Block): + """Block for holding extension types. + + Notes + ----- + This holds all 3rd-party extension array types. It's also the immediate + parent class for our internal extension types' blocks, CategoricalBlock. + + ExtensionArrays are limited to 1-D. + """ + @property + def _holder(self): + # For extension blocks, the holder is values-dependent. + return type(self.values) + + @property + def is_view(self): + """Extension arrays are never treated as views.""" + return False + + def get_values(self, dtype=None): + # ExtensionArrays must be iterable, so this works. + values = np.asarray(self.values) + if values.ndim == self.ndim - 1: + values = values.reshape((1,) + values.shape) + return values + + def to_dense(self): + return np.asarray(self.values) + + def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): + """ + Take values according to indexer and return them as a block. + """ + if fill_tuple is None: + fill_value = None + else: + fill_value = fill_tuple[0] + + # axis doesn't matter; we are really a single-dim object + # but are passed the axis depending on the calling routing + # if its REALLY axis 0, then this will be a reindex and not a take + new_values = self.values.take(indexer, fill_value=fill_value) + + # if we are a 1-dim object, then always place at 0 + if self.ndim == 1: + new_mgr_locs = [0] + else: + if new_mgr_locs is None: + new_mgr_locs = self.mgr_locs + + return self.make_block_same_class(new_values, new_mgr_locs) + + def _can_hold_element(self, element): + # XXX: We may need to think about pushing this onto the array. + # We're doing the same as CategoricalBlock here. + return True + + def _slice(self, slicer): + """ return a slice of my values """ + + # slice the category + # return same dims as we currently have + + if isinstance(slicer, tuple) and len(slicer) == 2: + if not com.is_null_slice(slicer[0]): + raise AssertionError("invalid slicing for a 1-ndim " + "categorical") + slicer = slicer[1] + + return self.values[slicer] + + def formatting_values(self): + return self.values._formatting_values() + + def concat_same_type(self, to_concat, placement=None): + """ + Concatenate list of single blocks of the same type. + """ + values = self._holder._concat_same_type( + [blk.values for blk in to_concat]) + placement = placement or slice(0, len(values), 1) + return self.make_block_same_class(values, ndim=self.ndim, + placement=placement) + + class NumericBlock(Block): __slots__ = () is_numeric = True @@ -1905,6 +2032,11 @@ def should_store(self, value): class DatetimeLikeBlockMixin(object): + """Mixin class for DatetimeBlock and DatetimeTZBlock.""" + + @property + def _holder(self): + return DatetimeIndex @property def _na_value(self): @@ -1937,6 +2069,10 @@ def __init__(self, values, placement, ndim=None): super(TimeDeltaBlock, self).__init__(values, placement=placement, ndim=ndim) + @property + def _holder(self): + return TimedeltaIndex + @property def _box_func(self): return lambda x: tslib.Timedelta(x, unit='ns') @@ -2312,30 +2448,24 @@ def re_replacer(s): return block -class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock): +class CategoricalBlock(ExtensionBlock): __slots__ = () is_categorical = True _verify_integrity = True _can_hold_na = True - _holder = Categorical _concatenator = staticmethod(_concat._concat_categorical) def __init__(self, values, placement, ndim=None): + from pandas.core.arrays.categorical import _maybe_to_categorical # coerce to categorical if we can super(CategoricalBlock, self).__init__(_maybe_to_categorical(values), - placement=placement, ndim=ndim) + placement=placement, + ndim=ndim) @property - def is_view(self): - """ I am never a view """ - return False - - def to_dense(self): - return self.values.to_dense().view() - - def convert(self, copy=True, **kwargs): - return self.copy() if copy else self + def _holder(self): + return Categorical @property def array_dtype(self): @@ -2344,13 +2474,6 @@ def array_dtype(self): """ return np.object_ - def _slice(self, slicer): - """ return a slice of my values """ - - # slice the category - # return same dims as we currently have - return self.values._slice(slicer) - def _try_coerce_result(self, result): """ reverse of try_coerce_args """ @@ -2387,28 +2510,11 @@ def shift(self, periods, axis=0, mgr=None): return self.make_block_same_class(values=self.values.shift(periods), placement=self.mgr_locs) - def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): - """ - Take values according to indexer and return them as a block.bb - """ - if fill_tuple is None: - fill_value = None - else: - fill_value = fill_tuple[0] - - # axis doesn't matter; we are really a single-dim object - # but are passed the axis depending on the calling routing - # if its REALLY axis 0, then this will be a reindex and not a take - new_values = self.values.take_nd(indexer, fill_value=fill_value) - - # if we are a 1-dim object, then always place at 0 - if self.ndim == 1: - new_mgr_locs = [0] - else: - if new_mgr_locs is None: - new_mgr_locs = self.mgr_locs - - return self.make_block_same_class(new_values, new_mgr_locs) + def to_dense(self): + # Categorical.get_values returns a DatetimeIndex for datetime + # categories, so we can't simply use `np.asarray(self.values)` like + # other types. + return self.values.get_values() def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ @@ -2427,6 +2533,15 @@ def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): def concat_same_type(self, to_concat, placement=None): """ Concatenate list of single blocks of the same type. + + Note that this CategoricalBlock._concat_same_type *may* not + return a CategoricalBlock. When the categories in `to_concat` + differ, this will return an object ndarray. + + If / when we decide we don't like that behavior: + + 1. Change Categorical._concat_same_type to use union_categoricals + 2. Delete this method. """ values = self._concatenator([blk.values for blk in to_concat], axis=self.ndim - 1) @@ -2442,12 +2557,29 @@ class DatetimeBlock(DatetimeLikeBlockMixin, Block): _can_hold_na = True def __init__(self, values, placement, ndim=None): - if values.dtype != _NS_DTYPE: - values = conversion.ensure_datetime64ns(values) - + values = self._maybe_coerce_values(values) super(DatetimeBlock, self).__init__(values, placement=placement, ndim=ndim) + def _maybe_coerce_values(self, values): + """Input validation for values passed to __init__. Ensure that + we have datetime64ns, coercing if nescessary. + + Parametetrs + ----------- + values : array-like + Must be convertable to datetime64 + + Returns + ------- + values : ndarray[datetime64ns] + + Overridden by DatetimeTZBlock. + """ + if values.dtype != _NS_DTYPE: + values = conversion.ensure_datetime64ns(values) + return values + def _astype(self, dtype, mgr=None, **kwargs): """ these automatically copy, so copy=True has no effect @@ -2573,12 +2705,37 @@ def set(self, locs, values, check=False): class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock): """ implement a datetime64 block with a tz attribute """ __slots__ = () - _holder = DatetimeIndex _concatenator = staticmethod(_concat._concat_datetime) is_datetimetz = True def __init__(self, values, placement, ndim=2, dtype=None): + # XXX: This will end up calling _maybe_coerce_values twice + # when dtype is not None. It's relatively cheap (just an isinstance) + # but it'd nice to avoid. + # + # If we can remove dtype from __init__, and push that conversion + # push onto the callers, then we can remove this entire __init__ + # and just use DatetimeBlock's. + if dtype is not None: + values = self._maybe_coerce_values(values, dtype=dtype) + super(DatetimeTZBlock, self).__init__(values, placement=placement, + ndim=ndim) + + def _maybe_coerce_values(self, values, dtype=None): + """Input validation for values passed to __init__. Ensure that + we have datetime64TZ, coercing if nescessary. + Parametetrs + ----------- + values : array-like + Must be convertable to datetime64 + dtype : string or DatetimeTZDtype, optional + Does a shallow copy to this tz + + Returns + ------- + values : ndarray[datetime64ns] + """ if not isinstance(values, self._holder): values = self._holder(values) @@ -2590,8 +2747,7 @@ def __init__(self, values, placement, ndim=2, dtype=None): if values.tz is None: raise ValueError("cannot create a DatetimeTZBlock without a tz") - super(DatetimeTZBlock, self).__init__(values, placement=placement, - ndim=ndim) + return values def copy(self, deep=True, mgr=None): """ copy constructor """ @@ -2731,9 +2887,19 @@ class SparseBlock(NonConsolidatableMixIn, Block): _box_to_block_values = False _can_hold_na = True _ftype = 'sparse' - _holder = SparseArray _concatenator = staticmethod(_concat._concat_sparse) + def __init__(self, values, placement, ndim=None): + # Ensure that we have the underlying SparseArray here... + if isinstance(values, ABCSeries): + values = values.values + assert isinstance(values, SparseArray) + super(SparseBlock, self).__init__(values, placement, ndim=ndim) + + @property + def _holder(self): + return SparseArray + @property def shape(self): return (len(self.mgr_locs), self.sp_index.length) @@ -2907,6 +3073,8 @@ def get_block_type(values, dtype=None): cls = BoolBlock elif is_categorical(values): cls = CategoricalBlock + elif is_extension_array_dtype(values): + cls = ExtensionBlock else: cls = ObjectBlock return cls @@ -4660,6 +4828,19 @@ def form_blocks(arrays, names, axes): for i, _, array in items_dict['CategoricalBlock']] blocks.extend(cat_blocks) + if len(items_dict['ExtensionBlock']): + + external_blocks = [] + for i, _, array in items_dict['ExtensionBlock']: + if isinstance(array, ABCSeries): + array = array.values + # Allow our internal arrays to chose their block type. + block_type = getattr(array, '_block_type', ExtensionBlock) + external_blocks.append( + make_block(array, klass=block_type, + fastpath=True, placement=[i])) + blocks.extend(external_blocks) + if len(extra_locs): shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index 22b6d33be9d38..af4e83f506257 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -77,3 +77,26 @@ class NullFrequencyError(ValueError): class AccessorRegistrationWarning(Warning): """Warning for attribute conflicts in accessor registration.""" + + +class AbstractMethodError(NotImplementedError): + """Raise this error instead of NotImplementedError for abstract methods + while keeping compatibility with Python 2 and Python 3. + """ + + def __init__(self, class_instance, methodtype='method'): + types = {'method', 'classmethod', 'staticmethod', 'property'} + if methodtype not in types: + msg = 'methodtype must be one of {}, got {} instead.'.format( + methodtype, types) + raise ValueError(msg) + self.methodtype = methodtype + self.class_instance = class_instance + + def __str__(self): + if self.methodtype == 'classmethod': + name = self.class_instance.__name__ + else: + name = self.class_instance.__class__.__name__ + msg = "This {methodtype} must be defined in the concrete class {name}" + return (msg.format(methodtype=self.methodtype, name=name)) diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index d800a7b92b559..eca4dd4cf2106 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -10,12 +10,14 @@ Series, Categorical, CategoricalIndex, IntervalIndex, date_range) from pandas.compat import string_types +from pandas.core.arrays import ExtensionArray from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, PeriodDtype, - IntervalDtype, CategoricalDtype) + IntervalDtype, CategoricalDtype, ExtensionDtype) from pandas.core.dtypes.common import ( is_categorical_dtype, is_categorical, is_datetime64tz_dtype, is_datetimetz, + is_extension_array_dtype, is_period_dtype, is_period, is_dtype_equal, is_datetime64_ns_dtype, is_datetime64_dtype, is_interval_dtype, @@ -742,3 +744,31 @@ def test_categorical_categories(self): tm.assert_index_equal(c1.categories, pd.Index(['a', 'b'])) c1 = CategoricalDtype(CategoricalIndex(['a', 'b'])) tm.assert_index_equal(c1.categories, pd.Index(['a', 'b'])) + + +class DummyArray(ExtensionArray): + pass + + +class DummyDtype(ExtensionDtype): + pass + + +class TestExtensionArrayDtype(object): + + @pytest.mark.parametrize('values', [ + pd.Categorical([]), + pd.Categorical([]).dtype, + pd.Series(pd.Categorical([])), + DummyDtype(), + DummyArray(), + ]) + def test_is_extension_array_dtype(self, values): + assert is_extension_array_dtype(values) + + @pytest.mark.parametrize('values', [ + np.array([]), + pd.Series(np.array([])), + ]) + def test_is_not_extension_array_dtype(self, values): + assert not is_extension_array_dtype(values) diff --git a/pandas/tests/internals/test_external_block.py b/pandas/tests/internals/test_external_block.py index 729ee0093b6dc..2487363df8f99 100644 --- a/pandas/tests/internals/test_external_block.py +++ b/pandas/tests/internals/test_external_block.py @@ -5,12 +5,12 @@ import pandas as pd from pandas.core.internals import ( - Block, BlockManager, SingleBlockManager, NonConsolidatableMixIn) + BlockManager, SingleBlockManager, ExtensionBlock) import pytest -class CustomBlock(NonConsolidatableMixIn, Block): +class CustomBlock(ExtensionBlock): _holder = np.ndarray diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index e3490f465b24a..9338aba90d7cb 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -11,9 +11,8 @@ from distutils.version import LooseVersion import itertools from pandas import (Index, MultiIndex, DataFrame, DatetimeIndex, - Series, Categorical) + Series, Categorical, TimedeltaIndex, SparseArray) from pandas.compat import OrderedDict, lrange -from pandas.core.sparse.array import SparseArray from pandas.core.internals import (BlockPlacement, SingleBlockManager, make_block, BlockManager) import pandas.core.algorithms as algos @@ -1263,9 +1262,30 @@ def test_binop_other(self, op, value, dtype): assert_series_equal(result, expected) +@pytest.mark.parametrize('typestr, holder', [ + ('category', Categorical), + ('M8[ns]', DatetimeIndex), + ('M8[ns, US/Central]', DatetimeIndex), + ('m8[ns]', TimedeltaIndex), + ('sparse', SparseArray), +]) +def test_holder(typestr, holder): + blk = create_block(typestr, [1]) + assert blk._holder is holder + + def test_deprecated_fastpath(): # GH#19265 values = np.random.rand(3, 3) with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): make_block(values, placement=np.arange(3), fastpath=True) + + +def test_validate_ndim(): + values = np.array([1.0, 2.0]) + placement = slice(2) + msg = "Wrong number of dimensions. values.ndim != ndim \[1 != 2\]" + + with tm.assert_raises_regex(ValueError, msg): + make_block(values, placement, ndim=2) diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index 0b7948cc32d24..54f567bcd2a8c 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -574,6 +574,15 @@ def test_setitem_array(self): self.frame['F'].reindex(index), check_names=False) + def test_setitem_chained_no_consolidate(self): + # https://github.com/pandas-dev/pandas/pull/19268 + # issuecomment-361696418 + # chained setitem used to cause consolidation + sdf = pd.SparseDataFrame([[np.nan, 1], [2, np.nan]]) + with pd.option_context('mode.chained_assignment', None): + sdf[0][1] = 2 + assert len(sdf._data.blocks) == 2 + def test_delitem(self): A = self.frame['A'] C = self.frame['C'] diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py index babf88ef1df8d..e2a142366a89e 100644 --- a/pandas/tests/test_errors.py +++ b/pandas/tests/test_errors.py @@ -4,6 +4,8 @@ from warnings import catch_warnings import pandas # noqa import pandas as pd +from pandas.errors import AbstractMethodError +import pandas.util.testing as tm @pytest.mark.parametrize( @@ -50,3 +52,30 @@ def test_error_rename(): raise ParserError() except pd.parser.CParserError: pass + + +class Foo: + @classmethod + def classmethod(cls): + raise AbstractMethodError(cls, methodtype='classmethod') + + @property + def property(self): + raise AbstractMethodError(self, methodtype='property') + + def method(self): + raise AbstractMethodError(self) + + +def test_AbstractMethodError_classmethod(): + xpr = "This classmethod must be defined in the concrete class Foo" + with tm.assert_raises_regex(AbstractMethodError, xpr): + Foo.classmethod() + + xpr = "This property must be defined in the concrete class Foo" + with tm.assert_raises_regex(AbstractMethodError, xpr): + Foo().property + + xpr = "This method must be defined in the concrete class Foo" + with tm.assert_raises_regex(AbstractMethodError, xpr): + Foo().method()
(edit post categorical-move) Rebased on master. Summary of the changes from master: Added the ExtensionArray class Categorical subclasses ExtensionArray Implements the new methods for the interface (all private. No public API changes) Adapted the ExtensionDtype class to be the public ABC a. Subclass that with PandasExtensionClass that does non-interface things like reprs, caching, etc. b. All our custom dtypes inherit from PandasExtensionClass, so they implement the interface. Internals Changes: a. Added an ExtensionBlock. This will be a parent for our current custom blocks, and the block type for all 3rd-party extension arrays. Added a new is_extension_array_dtype method. I think this is nescessary for now, until we've handled DatetimeTZ. This isn't really a test of whether extension arrays work yet, since we're still using Categorical for everything. I have a followup PR that implements an IntervalArray that requires additional changes to, e.g., the constructors so that things work. But all the changes from core/internals.py required to make that work are present here. --- 1. New class hierarchy in internals Old: ```python class CategoricalBlock(NonConsolidatableMixin, ObjectBlock): pass ``` new: ```python class ExtensionBlock(NonConsolidatableMixin, Block): pass class CategoricalBlock(ExtensionBlock): pass ``` Figuring out which methods of `ObjectBlock` were required on `CategoricalBlock` wasn't trivial for me. I probably messed some up. I think that eventually we can remove `NonConsolidatableMixin`, with the idea that all non-consolidatable blocks are blocks for extension dtypes? That's true today anyway. Followup PRs: 1. Making `core/arrays/period.py` and refactoring `PeriodIndex` 2. Making `core/arrays/interval.py` and refactoring `IntervalIndex` 3. Adding docs and generic tests like https://github.com/pandas-dev/pandas/pull/19174/files#diff-e448fe09dbe8aed468d89a4c90e65cff for our interface (once it's stabilized a bit).
https://api.github.com/repos/pandas-dev/pandas/pulls/19268
2018-01-16T15:25:09Z
2018-02-02T21:34:21Z
2018-02-02T21:34:21Z
2018-02-02T21:38:06Z
Remove unused fastpath kwarg from Blocks
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 2177fa541b13e..d616ef441a31b 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -104,7 +104,7 @@ class Block(PandasObject): _holder = None _concatenator = staticmethod(np.concatenate) - def __init__(self, values, placement, ndim=None, fastpath=False): + def __init__(self, values, placement, ndim=None): if ndim is None: ndim = values.ndim elif values.ndim != ndim: @@ -204,7 +204,7 @@ def array_dtype(self): """ return self.dtype - def make_block(self, values, placement=None, ndim=None, **kwargs): + def make_block(self, values, placement=None, ndim=None): """ Create a new block, with type inference propagate any values that are not specified @@ -214,21 +214,20 @@ def make_block(self, values, placement=None, ndim=None, **kwargs): if ndim is None: ndim = self.ndim - return make_block(values, placement=placement, ndim=ndim, **kwargs) + return make_block(values, placement=placement, ndim=ndim) - def make_block_scalar(self, values, **kwargs): + def make_block_scalar(self, values): """ Create a ScalarBlock """ return ScalarBlock(values) - def make_block_same_class(self, values, placement=None, fastpath=True, - **kwargs): + def make_block_same_class(self, values, placement=None, ndim=None): """ Wrap given values in a block of same type as self. """ if placement is None: placement = self.mgr_locs - return make_block(values, placement=placement, klass=self.__class__, - fastpath=fastpath, **kwargs) + return make_block(values, placement=placement, ndim=ndim, + klass=self.__class__) def __unicode__(self): @@ -339,7 +338,7 @@ def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, new_values = algos.take_nd(self.values, indexer, axis, fill_value=fill_value, mask_info=mask_info) - return self.make_block(new_values, fastpath=True) + return self.make_block(new_values) def iget(self, i): return self.values[i] @@ -458,7 +457,7 @@ def make_a_block(nv, ref_loc): except (AttributeError, NotImplementedError): pass block = self.make_block(values=nv, - placement=ref_loc, fastpath=True) + placement=ref_loc) return block # ndim == 1 @@ -517,7 +516,7 @@ def downcast(self, dtypes=None, mgr=None): dtypes = 'infer' nv = maybe_downcast_to_dtype(values, dtypes) - return self.make_block(nv, fastpath=True) + return self.make_block(nv) # ndim > 1 if dtypes is None: @@ -908,7 +907,7 @@ def _is_empty_indexer(indexer): # coerce and try to infer the dtypes of the result values = self._try_coerce_and_cast_result(values, dtype) - block = self.make_block(transf(values), fastpath=True) + block = self.make_block(transf(values)) return block def putmask(self, mask, new, align=True, inplace=False, axis=0, @@ -1024,7 +1023,7 @@ def f(m, v, i): if transpose: new_values = new_values.T - return [self.make_block(new_values, fastpath=True)] + return [self.make_block(new_values)] def coerce_to_target_dtype(self, other): """ @@ -1159,7 +1158,7 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False, dtype=self.dtype) values = self._try_coerce_result(values) - blocks = [self.make_block(values, klass=self.__class__, fastpath=True)] + blocks = [self.make_block_same_class(values, ndim=self.ndim)] return self._maybe_downcast(blocks, downcast) def _interpolate(self, method=None, index=None, values=None, @@ -1199,8 +1198,7 @@ def func(x): # interp each column independently interp_values = np.apply_along_axis(func, axis, data) - blocks = [self.make_block(interp_values, klass=self.__class__, - fastpath=True)] + blocks = [self.make_block_same_class(interp_values)] return self._maybe_downcast(blocks, downcast) def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): @@ -1244,7 +1242,7 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): def diff(self, n, axis=1, mgr=None): """ return block for the diff of the values """ new_values = algos.diff(self.values, n, axis=axis) - return [self.make_block(values=new_values, fastpath=True)] + return [self.make_block(values=new_values)] def shift(self, periods, axis=0, mgr=None): """ shift the block by periods, possibly upcast """ @@ -1274,7 +1272,7 @@ def shift(self, periods, axis=0, mgr=None): if f_ordered: new_values = new_values.T - return [self.make_block(new_values, fastpath=True)] + return [self.make_block(new_values)] def eval(self, func, other, errors='raise', try_cast=False, mgr=None): """ @@ -1414,7 +1412,7 @@ def handle_error(): result = self._try_cast_result(result) result = _block_shape(result, ndim=self.ndim) - return [self.make_block(result, fastpath=True, )] + return [self.make_block(result)] def where(self, other, cond, align=True, errors='raise', try_cast=False, axis=0, transpose=False, mgr=None): @@ -1694,7 +1692,7 @@ class NonConsolidatableMixIn(object): _validate_ndim = False _holder = None - def __init__(self, values, placement, ndim=None, fastpath=False, **kwargs): + def __init__(self, values, placement, ndim=None): # Placement must be converted to BlockPlacement via property setter # before ndim logic, because placement may be a slice which doesn't @@ -1951,12 +1949,12 @@ class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): _can_hold_na = True is_numeric = False - def __init__(self, values, placement, fastpath=False, **kwargs): + def __init__(self, values, placement, ndim=None): if values.dtype != _TD_DTYPE: values = conversion.ensure_timedelta64ns(values) - super(TimeDeltaBlock, self).__init__(values, fastpath=True, - placement=placement, **kwargs) + super(TimeDeltaBlock, self).__init__(values, + placement=placement, ndim=ndim) @property def _box_func(self): @@ -2089,13 +2087,12 @@ class ObjectBlock(Block): is_object = True _can_hold_na = True - def __init__(self, values, ndim=2, fastpath=False, placement=None, - **kwargs): + def __init__(self, values, placement=None, ndim=2): if issubclass(values.dtype.type, compat.string_types): values = np.array(values, dtype=object) - super(ObjectBlock, self).__init__(values, ndim=ndim, fastpath=fastpath, - placement=placement, **kwargs) + super(ObjectBlock, self).__init__(values, ndim=ndim, + placement=placement) @property def is_bool(self): @@ -2342,12 +2339,11 @@ class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock): _holder = Categorical _concatenator = staticmethod(_concat._concat_categorical) - def __init__(self, values, placement, fastpath=False, **kwargs): + def __init__(self, values, placement, ndim=None): # coerce to categorical if we can super(CategoricalBlock, self).__init__(_maybe_to_categorical(values), - fastpath=True, - placement=placement, **kwargs) + placement=placement, ndim=ndim) @property def is_view(self): @@ -2464,12 +2460,12 @@ class DatetimeBlock(DatetimeLikeBlockMixin, Block): is_datetime = True _can_hold_na = True - def __init__(self, values, placement, fastpath=False, **kwargs): + def __init__(self, values, placement, ndim=None): if values.dtype != _NS_DTYPE: values = conversion.ensure_datetime64ns(values) - super(DatetimeBlock, self).__init__(values, fastpath=True, - placement=placement, **kwargs) + super(DatetimeBlock, self).__init__(values, + placement=placement, ndim=ndim) def _astype(self, dtype, mgr=None, **kwargs): """ @@ -2600,13 +2596,11 @@ class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock): _concatenator = staticmethod(_concat._concat_datetime) is_datetimetz = True - def __init__(self, values, placement, ndim=2, **kwargs): + def __init__(self, values, placement, ndim=2, dtype=None): if not isinstance(values, self._holder): values = self._holder(values) - dtype = kwargs.pop('dtype', None) - if dtype is not None: if isinstance(dtype, compat.string_types): dtype = DatetimeTZDtype.construct_from_string(dtype) @@ -2616,7 +2610,7 @@ def __init__(self, values, placement, ndim=2, **kwargs): raise ValueError("cannot create a DatetimeTZBlock without a tz") super(DatetimeTZBlock, self).__init__(values, placement=placement, - ndim=ndim, **kwargs) + ndim=ndim) def copy(self, deep=True, mgr=None): """ copy constructor """ @@ -2822,7 +2816,7 @@ def copy(self, deep=True, mgr=None): def make_block_same_class(self, values, placement, sparse_index=None, kind=None, dtype=None, fill_value=None, - copy=False, fastpath=True, **kwargs): + copy=False, ndim=None): """ return a new block """ if dtype is None: dtype = values.dtype @@ -2841,8 +2835,7 @@ def make_block_same_class(self, values, placement, sparse_index=None, # won't take space since there's 0 items, plus it will preserve # the dtype. return self.make_block(np.empty(values.shape, dtype=dtype), - placement, - fastpath=True) + placement) elif nitems > 1: raise ValueError("Only 1-item 2d sparse blocks are supported") else: @@ -2851,7 +2844,7 @@ def make_block_same_class(self, values, placement, sparse_index=None, new_values = SparseArray(values, sparse_index=sparse_index, kind=kind or self.kind, dtype=dtype, fill_value=fill_value, copy=copy) - return self.make_block(new_values, fastpath=fastpath, + return self.make_block(new_values, placement=placement) def interpolate(self, method='pad', axis=0, inplace=False, limit=None, @@ -2960,16 +2953,20 @@ def get_block_type(values, dtype=None): def make_block(values, placement, klass=None, ndim=None, dtype=None, - fastpath=False): + fastpath=None): + if fastpath is not None: + # GH#19265 pyarrow is passing this + warnings.warn("fastpath argument is deprecated, will be removed " + "in a future release.", DeprecationWarning) if klass is None: dtype = dtype or values.dtype klass = get_block_type(values, dtype) elif klass is DatetimeTZBlock and not is_datetimetz(values): - return klass(values, ndim=ndim, fastpath=fastpath, + return klass(values, ndim=ndim, placement=placement, dtype=dtype) - return klass(values, ndim=ndim, fastpath=fastpath, placement=placement) + return klass(values, ndim=ndim, placement=placement) # TODO: flexible with index=None and/or items=None @@ -3029,7 +3026,7 @@ class BlockManager(PandasObject): __slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated', '_is_consolidated', '_blknos', '_blklocs'] - def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True): + def __init__(self, blocks, axes, do_integrity_check=True): self.axes = [_ensure_index(ax) for ax in axes] self.blocks = tuple(blocks) @@ -3640,8 +3637,7 @@ def get_slice(self, slobj, axis=0): new_axes = list(self.axes) new_axes[axis] = new_axes[axis][slobj] - bm = self.__class__(new_blocks, new_axes, do_integrity_check=False, - fastpath=True) + bm = self.__class__(new_blocks, new_axes, do_integrity_check=False) bm._consolidate_inplace() return bm @@ -3796,7 +3792,7 @@ def xs(self, key, axis=1, copy=True, takeable=False): # we must copy here as we are mixed type for blk in self.blocks: newb = make_block(values=blk.values[slicer], - klass=blk.__class__, fastpath=True, + klass=blk.__class__, placement=blk.mgr_locs) new_blocks.append(newb) elif len(self.blocks) == 1: @@ -3806,8 +3802,7 @@ def xs(self, key, axis=1, copy=True, takeable=False): vals = vals.copy() new_blocks = [make_block(values=vals, placement=block.mgr_locs, - klass=block.__class__, - fastpath=True, )] + klass=block.__class__)] return self.__class__(new_blocks, new_axes) @@ -3910,7 +3905,7 @@ def iget(self, i, fastpath=True): return SingleBlockManager( [block.make_block_same_class(values, placement=slice(0, len(values)), - ndim=1, fastpath=True)], + ndim=1)], self.axes[1]) def get_scalar(self, tup): @@ -4432,8 +4427,7 @@ def __init__(self, block, axis, do_integrity_check=False, fastpath=False): block = block[0] if not isinstance(block, Block): - block = make_block(block, placement=slice(0, len(axis)), ndim=1, - fastpath=True) + block = make_block(block, placement=slice(0, len(axis)), ndim=1) self.blocks = [block] @@ -4725,7 +4719,6 @@ def form_blocks(arrays, names, axes): if len(items_dict['DatetimeTZBlock']): dttz_blocks = [make_block(array, klass=DatetimeTZBlock, - fastpath=True, placement=[i]) for i, _, array in items_dict['DatetimeTZBlock']] blocks.extend(dttz_blocks) @@ -4743,8 +4736,7 @@ def form_blocks(arrays, names, axes): blocks.extend(sparse_blocks) if len(items_dict['CategoricalBlock']) > 0: - cat_blocks = [make_block(array, klass=CategoricalBlock, fastpath=True, - placement=[i]) + cat_blocks = [make_block(array, klass=CategoricalBlock, placement=[i]) for i, _, array in items_dict['CategoricalBlock']] blocks.extend(cat_blocks) @@ -4800,8 +4792,7 @@ def _sparse_blockify(tuples, dtype=None): new_blocks = [] for i, names, array in tuples: array = _maybe_to_sparse(array) - block = make_block(array, klass=SparseBlock, fastpath=True, - placement=[i]) + block = make_block(array, klass=SparseBlock, placement=[i]) new_blocks.append(block) return new_blocks @@ -4885,7 +4876,7 @@ def _merge_blocks(blocks, dtype=None, _can_consolidate=True): new_values = new_values[argsort] new_mgr_locs = new_mgr_locs[argsort] - return make_block(new_values, fastpath=True, placement=new_mgr_locs) + return make_block(new_values, placement=new_mgr_locs) # no merge return blocks diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 623d2d39607c2..b1f89829c95a5 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -1254,3 +1254,11 @@ def test_binop_other(self, op, value, dtype): result = op(s, e).dtypes expected = op(s, value).dtypes assert_series_equal(result, expected) + + +def test_deprecated_fastpath(): + # GH#19265 + values = np.random.rand(3, 3) + with tm.assert_produces_warning(DeprecationWarning, + check_stacklevel=False): + make_block(values, placement=np.arange(3), fastpath=True)
The `fastpath` kwarg in `Block.__init__` must be vestigial or something. It isn't used anywhere in the blocks themselves. This PR removes the unused kwarg.
https://api.github.com/repos/pandas-dev/pandas/pulls/19265
2018-01-16T06:30:52Z
2018-01-19T21:50:33Z
2018-01-19T21:50:33Z
2018-12-07T22:36:12Z
CLN: put mgr_locs setter next to property definition
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 3c923133477df..43fdd454250a5 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -191,6 +191,13 @@ def fill_value(self): def mgr_locs(self): return self._mgr_locs + @mgr_locs.setter + def mgr_locs(self, new_mgr_locs): + if not isinstance(new_mgr_locs, BlockPlacement): + new_mgr_locs = BlockPlacement(new_mgr_locs) + + self._mgr_locs = new_mgr_locs + @property def array_dtype(self): """ the dtype to return if I want to construct this block as an @@ -224,13 +231,6 @@ def make_block_same_class(self, values, placement=None, fastpath=True, return make_block(values, placement=placement, klass=self.__class__, fastpath=fastpath, **kwargs) - @mgr_locs.setter - def mgr_locs(self, new_mgr_locs): - if not isinstance(new_mgr_locs, BlockPlacement): - new_mgr_locs = BlockPlacement(new_mgr_locs) - - self._mgr_locs = new_mgr_locs - def __unicode__(self): # don't want to print out all of the items here @@ -840,7 +840,6 @@ def setitem(self, indexer, value, mgr=None): transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x) values = transf(values) - l = len(values) # length checking # boolean with truth values == len of the value is ok too @@ -855,7 +854,7 @@ def setitem(self, indexer, value, mgr=None): # slice elif isinstance(indexer, slice): - if is_list_like(value) and l: + if is_list_like(value) and len(values): if len(value) != length_of_indexer(indexer, values): raise ValueError("cannot set using a slice indexer with a " "different length than the value")
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19264
2018-01-16T04:42:29Z
2018-01-17T00:15:31Z
2018-01-17T00:15:31Z
2018-02-11T21:59:07Z
BUG: Patch handling of keep_default_na=False
diff --git a/doc/source/io.rst b/doc/source/io.rst index 2f29e390c0ba1..ae04996b4fddf 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -214,8 +214,20 @@ na_values : scalar, str, list-like, or dict, default ``None`` for a list of the values interpreted as NaN by default. keep_default_na : boolean, default ``True`` - If na_values are specified and keep_default_na is ``False`` the default NaN - values are overridden, otherwise they're appended to. + Whether or not to include the default NaN values when parsing the data. + Depending on whether `na_values` is passed in, the behavior is as follows: + + * If `keep_default_na` is True, and `na_values` are specified, `na_values` + is appended to the default NaN values used for parsing. + * If `keep_default_na` is True, and `na_values` are not specified, only + the default NaN values are used for parsing. + * If `keep_default_na` is False, and `na_values` are specified, only + the NaN values specified `na_values` are used for parsing. + * If `keep_default_na` is False, and `na_values` are not specified, no + strings will be parsed as NaN. + + Note that if `na_filter` is passed in as False, the `keep_default_na` and + `na_values` parameters will be ignored. na_filter : boolean, default ``True`` Detect missing value markers (empty strings and the value of na_values). In data without any NAs, passing ``na_filter=False`` can improve the performance diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 853d5cee11cd1..326673a54acfa 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -463,6 +463,7 @@ I/O - :func:`read_html` now rewinds seekable IO objects after parse failure, before attempting to parse with a new parser. If a parser errors and the object is non-seekable, an informative error is raised suggesting the use of a different parser (:issue:`17975`) - Bug in :func:`read_msgpack` with a non existent file is passed in Python 2 (:issue:`15296`) - Bug in :func:`read_csv` where a ``MultiIndex`` with duplicate columns was not being mangled appropriately (:issue:`18062`) +- Bug in :func:`read_csv` where missing values were not being handled properly when ``keep_default_na=False`` with dictionary ``na_values`` (:issue:`19227`) - Bug in :func:`read_sas` where a file with 0 variables gave an ``AttributeError`` incorrectly. Now it gives an ``EmptyDataError`` (:issue:`18184`) - Bug in :func:`DataFrame.to_latex()` where pairs of braces meant to serve as invisible placeholders were escaped (:issue:`18667`) - Bug in :func:`read_json` where large numeric values were causing an ``OverflowError`` (:issue:`18842`) diff --git a/pandas/_libs/parsers.pyx b/pandas/_libs/parsers.pyx index cf63b5083885e..5efe2147f6f8e 100644 --- a/pandas/_libs/parsers.pyx +++ b/pandas/_libs/parsers.pyx @@ -288,7 +288,7 @@ cdef class TextReader: object file_handle, na_fvalues object true_values, false_values object handle - bint na_filter, verbose, has_usecols, has_mi_columns + bint na_filter, keep_default_na, verbose, has_usecols, has_mi_columns int64_t parser_start list clocks char *c_encoding @@ -352,6 +352,8 @@ cdef class TextReader: na_filter=True, na_values=None, na_fvalues=None, + keep_default_na=True, + true_values=None, false_values=None, allow_leading_cols=True, @@ -378,8 +380,8 @@ cdef class TextReader: self.parser = parser_new() self.parser.chunksize = tokenize_chunksize - self.mangle_dupe_cols=mangle_dupe_cols - self.tupleize_cols=tupleize_cols + self.mangle_dupe_cols = mangle_dupe_cols + self.tupleize_cols = tupleize_cols # For timekeeping self.clocks = [] @@ -477,6 +479,7 @@ cdef class TextReader: self.true_set = kset_from_list(self.true_values) self.false_set = kset_from_list(self.false_values) + self.keep_default_na = keep_default_na self.converters = converters self.na_filter = na_filter @@ -1299,7 +1302,10 @@ cdef class TextReader: elif i in self.na_values: key = i else: # No na_values provided for this column. - return _NA_VALUES, set() + if self.keep_default_na: + return _NA_VALUES, set() + + return list(), set() values = self.na_values[key] if values is not None and not isinstance(values, list): diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 150fccde81a60..1ba687541eecf 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -149,8 +149,20 @@ NaN: '""" + fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ") + """'. keep_default_na : bool, default True - If na_values are specified and keep_default_na is False the default NaN - values are overridden, otherwise they're appended to. + Whether or not to include the default NaN values when parsing the data. + Depending on whether `na_values` is passed in, the behavior is as follows: + + * If `keep_default_na` is True, and `na_values` are specified, `na_values` + is appended to the default NaN values used for parsing. + * If `keep_default_na` is True, and `na_values` are not specified, only + the default NaN values are used for parsing. + * If `keep_default_na` is False, and `na_values` are specified, only + the NaN values specified `na_values` are used for parsing. + * If `keep_default_na` is False, and `na_values` are not specified, no + strings will be parsed as NaN. + + Note that if `na_filter` is passed in as False, the `keep_default_na` and + `na_values` parameters will be ignored. na_filter : boolean, default True Detect missing value markers (empty strings and the value of na_values). In data without any NAs, passing na_filter=False can improve the performance @@ -910,9 +922,6 @@ def _clean_options(self, options, engine): na_values = options['na_values'] skiprows = options['skiprows'] - # really delete this one - keep_default_na = result.pop('keep_default_na') - _validate_header_arg(options['header']) depr_warning = '' @@ -957,6 +966,7 @@ def _clean_options(self, options, engine): converters = {} # Converting values to NA + keep_default_na = options['keep_default_na'] na_values, na_fvalues = _clean_na_values(na_values, keep_default_na) # handle skiprows; this is internally handled by the @@ -1225,6 +1235,7 @@ def __init__(self, kwds): self.na_values = kwds.get('na_values') self.na_fvalues = kwds.get('na_fvalues') self.na_filter = kwds.get('na_filter', False) + self.keep_default_na = kwds.get('keep_default_na', True) self.true_values = kwds.get('true_values') self.false_values = kwds.get('false_values') @@ -1487,7 +1498,8 @@ def _agg_index(self, index, try_parse_dates=True): col_name = self.index_names[i] if col_name is not None: col_na_values, col_na_fvalues = _get_na_values( - col_name, self.na_values, self.na_fvalues) + col_name, self.na_values, self.na_fvalues, + self.keep_default_na) arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues) arrays.append(arr) @@ -1510,7 +1522,7 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False, if self.na_filter: col_na_values, col_na_fvalues = _get_na_values( - c, na_values, na_fvalues) + c, na_values, na_fvalues, self.keep_default_na) else: col_na_values, col_na_fvalues = set(), set() @@ -3097,16 +3109,23 @@ def _clean_na_values(na_values, keep_default_na=True): na_values = set() na_fvalues = set() elif isinstance(na_values, dict): - na_values = na_values.copy() # Prevent aliasing. - if keep_default_na: - for k, v in compat.iteritems(na_values): - if not is_list_like(v): - v = [v] + old_na_values = na_values.copy() + na_values = {} # Prevent aliasing. + + # Convert the values in the na_values dictionary + # into array-likes for further use. This is also + # where we append the default NaN values, provided + # that `keep_default_na=True`. + for k, v in compat.iteritems(old_na_values): + if not is_list_like(v): + v = [v] + + if keep_default_na: v = set(v) | _NA_VALUES - na_values[k] = v - na_fvalues = dict( - (k, _floatify_na_values(v)) for k, v in na_values.items() # noqa - ) + + na_values[k] = v + na_fvalues = dict((k, _floatify_na_values(v)) + for k, v in na_values.items()) else: if not is_list_like(na_values): na_values = [na_values] @@ -3225,12 +3244,38 @@ def _stringify_na_values(na_values): return set(result) -def _get_na_values(col, na_values, na_fvalues): +def _get_na_values(col, na_values, na_fvalues, keep_default_na): + """ + Get the NaN values for a given column. + + Parameters + ---------- + col : str + The name of the column. + na_values : array-like, dict + The object listing the NaN values as strings. + na_fvalues : array-like, dict + The object listing the NaN values as floats. + keep_default_na : bool + If `na_values` is a dict, and the column is not mapped in the + dictionary, whether to return the default NaN values or the empty set. + + Returns + ------- + nan_tuple : A length-two tuple composed of + + 1) na_values : the string NaN values for that column. + 2) na_fvalues : the float NaN values for that column. + """ + if isinstance(na_values, dict): if col in na_values: return na_values[col], na_fvalues[col] else: - return _NA_VALUES, set() + if keep_default_na: + return _NA_VALUES, set() + + return set(), set() else: return na_values, na_fvalues diff --git a/pandas/tests/io/parser/na_values.py b/pandas/tests/io/parser/na_values.py index f8906d5a1f7ba..d2c3f82e95c4d 100644 --- a/pandas/tests/io/parser/na_values.py +++ b/pandas/tests/io/parser/na_values.py @@ -224,6 +224,45 @@ def test_na_values_keep_default(self): 'seven']}) tm.assert_frame_equal(xp.reindex(columns=df.columns), df) + def test_no_keep_default_na_dict_na_values(self): + # see gh-19227 + data = "a,b\n,2" + + df = self.read_csv(StringIO(data), na_values={"b": ["2"]}, + keep_default_na=False) + expected = DataFrame({"a": [""], "b": [np.nan]}) + tm.assert_frame_equal(df, expected) + + # Scalar values shouldn't cause the parsing to crash or fail. + data = "a,b\n1,2" + + df = self.read_csv(StringIO(data), na_values={"b": 2}, + keep_default_na=False) + expected = DataFrame({"a": [1], "b": [np.nan]}) + tm.assert_frame_equal(df, expected) + + data = """\ +113125,"blah","/blaha",kjsdkj,412.166,225.874,214.008 +729639,"qwer","",asdfkj,466.681,,252.373 +""" + expected = DataFrame({0: [np.nan, 729639.0], + 1: [np.nan, "qwer"], + 2: ["/blaha", np.nan], + 3: ["kjsdkj", "asdfkj"], + 4: [412.166, 466.681], + 5: ["225.874", ""], + 6: [np.nan, 252.373]}) + + df = self.read_csv(StringIO(data), header=None, keep_default_na=False, + na_values={2: "", 6: "214.008", + 1: "blah", 0: 113125}) + tm.assert_frame_equal(df, expected) + + df = self.read_csv(StringIO(data), header=None, keep_default_na=False, + na_values={2: "", 6: "214.008", + 1: "blah", 0: "113125"}) + tm.assert_frame_equal(df, expected) + def test_na_values_na_filter_override(self): data = """\ A,B
Patches very buggy behavior of `keep_default_na=False` whenever `na_values` is a dict * Respect `keep_default_na` for column that doesn't exist in `na_values` dictionary * Don't crash / break when `na_value` is a scalar in the `na_values` dictionary. In addition, clarifies documentation on the handling of the keep `keep_default_na` parameter with respect to `na_filter` and `na_values`. Closes #19227. cc @neilser
https://api.github.com/repos/pandas-dev/pandas/pulls/19260
2018-01-16T00:22:46Z
2018-01-18T00:23:17Z
2018-01-18T00:23:16Z
2018-01-18T04:21:37Z
DOC: corrects Expanding min_periods default in docstring
diff --git a/pandas/core/window.py b/pandas/core/window.py index 76ba76b7a9da9..5d2fa16876c11 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -1286,7 +1286,7 @@ class Expanding(_Rolling_and_Expanding): Parameters ---------- - min_periods : int, default None + min_periods : int, default 1 Minimum number of observations in window required to have a value (otherwise result is NA). center : boolean, default False
Currently the doc string says the ``min_periods`` default is ``None``, while it in the code actually is 1. This PR fixes this.
https://api.github.com/repos/pandas-dev/pandas/pulls/19259
2018-01-15T23:55:43Z
2018-01-16T00:13:43Z
2018-01-16T00:13:43Z
2018-01-17T10:11:49Z
move panel-specific swaplevel to panel
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index cef1e551f948e..d4fdb56b8b9a6 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -720,31 +720,6 @@ def squeeze(self, axis=None): except Exception: return self - def swaplevel(self, i=-2, j=-1, axis=0): - """ - Swap levels i and j in a MultiIndex on a particular axis - - Parameters - ---------- - i, j : int, string (can be mixed) - Level of index to be swapped. Can pass level name as string. - - Returns - ------- - swapped : type of caller (new object) - - .. versionchanged:: 0.18.1 - - The indexes ``i`` and ``j`` are now optional, and default to - the two innermost levels of the index. - - """ - axis = self._get_axis_number(axis) - result = self.copy() - labels = result._data.axes[axis] - result._data.set_axis(axis, labels.swaplevel(i, j)) - return result - # ---------------------------------------------------------------------- # Rename diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 26e7c192ad0af..0a63ad8598907 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1241,6 +1241,31 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True, copy=copy, limit=limit, fill_value=fill_value) + def swaplevel(self, i=-2, j=-1, axis=0): + """ + Swap levels i and j in a MultiIndex on a particular axis + + Parameters + ---------- + i, j : int, string (can be mixed) + Level of index to be swapped. Can pass level name as string. + + Returns + ------- + swapped : type of caller (new object) + + .. versionchanged:: 0.18.1 + + The indexes ``i`` and ``j`` are now optional, and default to + the two innermost levels of the index. + + """ + axis = self._get_axis_number(axis) + result = self.copy() + labels = result._data.axes[axis] + result._data.set_axis(axis, labels.swaplevel(i, j)) + return result + @Appender(_shared_docs['transpose'] % _shared_doc_kwargs) def transpose(self, *args, **kwargs): # check if a list of axes was passed in instead as a
NDFrame.swaplevel is overriden by both Series and DataFrame, so in practice the "generic" implementation is specific to Panel. This PR just moves the method from NDFrame to Panel. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19258
2018-01-15T23:40:30Z
2018-01-15T23:47:11Z
null
2018-01-15T23:47:17Z
PERF: remove use of Panel & perf in rolling corr/cov
diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py index 59cf7d090a622..75990d83f8212 100644 --- a/asv_bench/benchmarks/rolling.py +++ b/asv_bench/benchmarks/rolling.py @@ -11,8 +11,8 @@ class Methods(object): [10, 1000], ['int', 'float'], ['median', 'mean', 'max', 'min', 'std', 'count', 'skew', 'kurt', - 'sum', 'corr', 'cov']) - param_names = ['constructor', 'window', 'dtype', 'method'] + 'sum']) + param_names = ['contructor', 'window', 'dtype', 'method'] def setup(self, constructor, window, dtype, method): N = 10**5 @@ -23,6 +23,27 @@ def time_rolling(self, constructor, window, dtype, method): getattr(self.roll, method)() +class Pairwise(object): + + sample_time = 0.2 + params = ([10, 1000, None], + ['corr', 'cov'], + [True, False]) + param_names = ['window', 'method', 'pairwise'] + + def setup(self, window, method, pairwise): + N = 10**4 + arr = np.random.random(N) + self.df = pd.DataFrame(arr) + + def time_pairwise(self, window, method, pairwise): + if window is None: + r = self.df.expanding() + else: + r = self.df.rolling(window=window) + getattr(r, method)(self.df, pairwise=pairwise) + + class Quantile(object): sample_time = 0.2 diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 2bd2bb199bf1f..5db29cb76b106 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -383,7 +383,7 @@ Performance Improvements - :func:`Series` / :func:`DataFrame` tab completion limits to 100 values, for better performance. (:issue:`18587`) - Improved performance of :func:`DataFrame.median` with ``axis=1`` when bottleneck is not installed (:issue:`16468`) - Improved performance of :func:`MultiIndex.get_loc` for large indexes, at the cost of a reduction in performance for small ones (:issue:`18519`) - +- Improved performance of pairwise ``.rolling()`` and ``.expanding()`` with ``.cov()`` and ``.corr()`` operations (:issue:`17917`) .. _whatsnew_0230.docs: diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 0e92fc4edce85..a4c9848dca900 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -99,19 +99,15 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', if not dropna: from pandas import MultiIndex - try: + if table.index.nlevels > 1: m = MultiIndex.from_arrays(cartesian_product(table.index.levels), names=table.index.names) table = table.reindex(m, axis=0) - except AttributeError: - pass # it's a single level - try: + if table.columns.nlevels > 1: m = MultiIndex.from_arrays(cartesian_product(table.columns.levels), names=table.columns.names) table = table.reindex(m, axis=1) - except AttributeError: - pass # it's a single level or a series if isinstance(table, ABCDataFrame): table = table.sort_index(axis=1) diff --git a/pandas/core/window.py b/pandas/core/window.py index 4d6a1de60f59b..a3f19ef50459d 100644 --- a/pandas/core/window.py +++ b/pandas/core/window.py @@ -1863,25 +1863,38 @@ def dataframe_from_int_dict(data, frame_template): results[i][j] = f(*_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j])) - # TODO: not the most efficient (perf-wise) - # though not bad code-wise - from pandas import Panel, MultiIndex, concat - - with warnings.catch_warnings(record=True): - p = Panel.from_dict(results).swapaxes('items', 'major') - if len(p.major_axis) > 0: - p.major_axis = arg1.columns[p.major_axis] - if len(p.minor_axis) > 0: - p.minor_axis = arg2.columns[p.minor_axis] - - if len(p.items): + from pandas import MultiIndex, concat + + result_index = arg1.index.union(arg2.index) + if len(result_index): + + # construct result frame result = concat( - [p.iloc[i].T for i in range(len(p.items))], - keys=p.items) + [concat([results[i][j] + for j, c in enumerate(arg2.columns)], + ignore_index=True) + for i, c in enumerate(arg1.columns)], + ignore_index=True, + axis=1) + result.columns = arg1.columns + + # set the index and reorder + if arg2.columns.nlevels > 1: + result.index = MultiIndex.from_product( + arg2.columns.levels + [result_index]) + result = result.reorder_levels([2, 0, 1]).sort_index() + else: + result.index = MultiIndex.from_product( + [range(len(arg2.columns)), + range(len(result_index))]) + result = result.swaplevel(1, 0).sort_index() + result.index = MultiIndex.from_product( + [result_index] + [arg2.columns]) else: + # empty result result = DataFrame( - index=MultiIndex(levels=[arg1.index, arg1.columns], + index=MultiIndex(levels=[arg1.index, arg2.columns], labels=[[], []]), columns=arg2.columns, dtype='float64') @@ -1890,9 +1903,9 @@ def dataframe_from_int_dict(data, frame_template): # reset our column names to arg2 names # careful not to mutate the original names result.columns = result.columns.set_names( - arg2.columns.names) + arg1.columns.names) result.index = result.index.set_names( - arg1.index.names + arg1.columns.names) + result_index.names + arg2.columns.names) return result diff --git a/pandas/tests/test_window.py b/pandas/tests/test_window.py index 22526d14a7168..dabdb1e8e689c 100644 --- a/pandas/tests/test_window.py +++ b/pandas/tests/test_window.py @@ -14,6 +14,7 @@ import pandas.tseries.offsets as offsets from pandas.core.base import SpecificationError from pandas.errors import UnsupportedFunctionCall +from pandas.core.sorting import safe_sort import pandas.util.testing as tm import pandas.util._test_decorators as td from pandas.compat import range, zip @@ -1645,7 +1646,7 @@ def compare(self, result, expected): result = result.dropna().values expected = expected.dropna().values - tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result, expected, check_dtype=False) @pytest.mark.parametrize('f', [lambda x: x.cov(), lambda x: x.corr()]) def test_no_flex(self, f): @@ -1670,15 +1671,19 @@ def test_no_flex(self, f): def test_pairwise_with_self(self, f): # DataFrame with itself, pairwise=True - results = [f(df) for df in self.df1s] - for (df, result) in zip(self.df1s, results): + # note that we may construct the 1st level of the MI + # in a non-motononic way, so compare accordingly + results = [] + for i, df in enumerate(self.df1s): + result = f(df) tm.assert_index_equal(result.index.levels[0], df.index, check_names=False) - tm.assert_index_equal(result.index.levels[1], - df.columns, - check_names=False) + tm.assert_numpy_array_equal(safe_sort(result.index.levels[1]), + safe_sort(df.columns.unique())) tm.assert_index_equal(result.columns, df.columns) + results.append(df) + for i, result in enumerate(results): if i > 0: self.compare(result, results[0]) @@ -1716,9 +1721,8 @@ def test_pairwise_with_other(self, f): tm.assert_index_equal(result.index.levels[0], df.index, check_names=False) - tm.assert_index_equal(result.index.levels[1], - self.df2.columns, - check_names=False) + tm.assert_numpy_array_equal(safe_sort(result.index.levels[1]), + safe_sort(self.df2.columns.unique())) for i, result in enumerate(results): if i > 0: self.compare(result, results[0])
closes #17917 ``` before after ratio [aa9e0024] [872fe711] - 1.70s 16.02ms 0.01 rolling.Pairwise.time_pairwise(None, 'corr', True) - 1.84s 17.12ms 0.01 rolling.Pairwise.time_pairwise(10, 'corr', True) - 1.84s 16.98ms 0.01 rolling.Pairwise.time_pairwise(1000, 'corr', True) - 1.74s 15.59ms 0.01 rolling.Pairwise.time_pairwise(None, 'cov', True) - 1.77s 15.63ms 0.01 rolling.Pairwise.time_pairwise(1000, 'cov', True) - 1.83s 14.62ms 0.01 rolling.Pairwise.time_pairwise(10, 'cov', True) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19257
2018-01-15T23:37:15Z
2018-02-01T12:45:16Z
2018-02-01T12:45:16Z
2018-02-01T12:45:16Z
Refactor numeric tests
diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index dcd592345b91c..6dc07d8336f92 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -27,22 +27,22 @@ def full_like(array, value): class Numeric(Base): - def test_numeric_compat(self): + pass # override Base method + def test_mul_int(self): idx = self.create_index() - didx = idx * idx - result = idx * 1 tm.assert_index_equal(result, idx) + def test_rmul_int(self): + idx = self.create_index() + result = 1 * idx tm.assert_index_equal(result, idx) - # in general not true for RangeIndex - if not isinstance(idx, RangeIndex): - result = idx * idx - tm.assert_index_equal(result, idx ** 2) + def test_div_int(self): + idx = self.create_index() # truediv under PY3 result = idx / 1 @@ -57,9 +57,16 @@ def test_numeric_compat(self): expected = Index(idx.values / 2) tm.assert_index_equal(result, expected) + def test_floordiv_int(self): + idx = self.create_index() + result = idx // 1 tm.assert_index_equal(result, idx) + def test_mul_int_array(self): + idx = self.create_index() + didx = idx * idx + result = idx * np.array(5, dtype='int64') tm.assert_index_equal(result, idx * 5) @@ -67,19 +74,45 @@ def test_numeric_compat(self): result = idx * np.arange(5, dtype=arr_dtype) tm.assert_index_equal(result, didx) + def test_mul_int_series(self): + idx = self.create_index() + didx = idx * idx + + arr_dtype = 'uint64' if isinstance(idx, UInt64Index) else 'int64' result = idx * Series(np.arange(5, dtype=arr_dtype)) tm.assert_index_equal(result, didx) - result = idx * Series(np.arange(5, dtype='float64') + 0.1) - expected = Float64Index(np.arange(5, dtype='float64') * - (np.arange(5, dtype='float64') + 0.1)) + def test_mul_float_series(self): + idx = self.create_index() + rng5 = np.arange(5, dtype='float64') + + result = idx * Series(rng5 + 0.1) + expected = Float64Index(rng5 * (rng5 + 0.1)) tm.assert_index_equal(result, expected) - # invalid - pytest.raises(TypeError, - lambda: idx * date_range('20130101', periods=5)) - pytest.raises(ValueError, lambda: idx * idx[0:3]) - pytest.raises(ValueError, lambda: idx * np.array([1, 2])) + def test_mul_index(self): + idx = self.create_index() + + # in general not true for RangeIndex + if not isinstance(idx, RangeIndex): + result = idx * idx + tm.assert_index_equal(result, idx ** 2) + + def test_mul_datelike_raises(self): + idx = self.create_index() + with pytest.raises(TypeError): + idx * date_range('20130101', periods=5) + + def test_mul_size_mismatch_raises(self): + idx = self.create_index() + + with pytest.raises(ValueError): + idx * idx[0:3] + with pytest.raises(ValueError): + idx * np.array([1, 2]) + + def test_divmod(self): + idx = self.create_index() result = divmod(idx, 2) with np.errstate(all='ignore'): @@ -97,23 +130,27 @@ def test_numeric_compat(self): result = divmod(idx, Series(full_like(idx.values, 2))) with np.errstate(all='ignore'): - div, mod = divmod( - idx.values, - full_like(idx.values, 2), - ) + div, mod = divmod(idx.values, full_like(idx.values, 2)) expected = Index(div), Index(mod) for r, e in zip(result, expected): tm.assert_index_equal(r, e) + def test_pow_float(self): # test power calculations both ways, GH 14973 - expected = pd.Float64Index(2.0**idx.values) - result = 2.0**idx - tm.assert_index_equal(result, expected) + idx = self.create_index() expected = pd.Float64Index(idx.values**2.0) result = idx**2.0 tm.assert_index_equal(result, expected) + def test_rpow_float(self): + # test power calculations both ways, GH 14973 + idx = self.create_index() + + expected = pd.Float64Index(2.0**idx.values) + result = 2.0**idx + tm.assert_index_equal(result, expected) + def test_explicit_conversions(self): # GH 8608 diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 3ec918e391860..3b1f9c0bc5d45 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -128,40 +128,66 @@ def test_tdi_with_offset_series(self, names): with tm.assert_produces_warning(PerformanceWarning): anchored - tdi - # TODO: Split by ops, better name - def test_numeric_compat(self): + def test_mul_int(self): idx = self._holder(np.arange(5, dtype='int64')) - didx = self._holder(np.arange(5, dtype='int64') ** 2) result = idx * 1 tm.assert_index_equal(result, idx) + def test_rmul_int(self): + idx = self._holder(np.arange(5, dtype='int64')) result = 1 * idx tm.assert_index_equal(result, idx) + def test_div_int(self): + idx = self._holder(np.arange(5, dtype='int64')) result = idx / 1 tm.assert_index_equal(result, idx) + def test_floordiv_int(self): + idx = self._holder(np.arange(5, dtype='int64')) result = idx // 1 tm.assert_index_equal(result, idx) + def test_mul_int_array_zerodim(self): + rng5 = np.arange(5, dtype='int64') + idx = self._holder(rng5) + expected = self._holder(rng5 * 5) result = idx * np.array(5, dtype='int64') - tm.assert_index_equal(result, - self._holder(np.arange(5, dtype='int64') * 5)) + tm.assert_index_equal(result, expected) + + def test_mul_int_array(self): + rng5 = np.arange(5, dtype='int64') + idx = self._holder(rng5) + didx = self._holder(rng5 ** 2) - result = idx * np.arange(5, dtype='int64') + result = idx * rng5 tm.assert_index_equal(result, didx) + def test_mul_int_series(self): + idx = self._holder(np.arange(5, dtype='int64')) + didx = self._holder(np.arange(5, dtype='int64') ** 2) + result = idx * Series(np.arange(5, dtype='int64')) tm.assert_index_equal(result, didx) - result = idx * Series(np.arange(5, dtype='float64') + 0.1) - tm.assert_index_equal(result, self._holder(np.arange( - 5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1))) + def test_mul_float_series(self): + idx = self._holder(np.arange(5, dtype='int64')) + + rng5f = np.arange(5, dtype='float64') + result = idx * Series(rng5f + 0.1) + tm.assert_index_equal(result, self._holder(rng5f * (rng5f + 0.1))) - # invalid - pytest.raises(TypeError, lambda: idx * idx) - pytest.raises(ValueError, lambda: idx * self._holder(np.arange(3))) - pytest.raises(ValueError, lambda: idx * np.array([1, 2])) + def test_dti_mul_dti_raises(self): + idx = self._holder(np.arange(5, dtype='int64')) + with pytest.raises(TypeError): + idx * idx + + def test_dti_mul_too_short_raises(self): + idx = self._holder(np.arange(5, dtype='int64')) + with pytest.raises(ValueError): + idx * self._holder(np.arange(3)) + with pytest.raises(ValueError): + idx * np.array([1, 2]) def test_ufunc_coercions(self): # normal ops are also tested in tseries/test_timedeltas.py diff --git a/pandas/tests/series/arithmetic/__init__.py b/pandas/tests/series/arithmetic/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/series/arithmetic/test_comparisons.py b/pandas/tests/series/arithmetic/test_comparisons.py new file mode 100644 index 0000000000000..0d7da5e71354a --- /dev/null +++ b/pandas/tests/series/arithmetic/test_comparisons.py @@ -0,0 +1,559 @@ +# coding=utf-8 +from datetime import datetime +import operator + +import pytest + +import numpy as np +import pandas as pd + +from pandas import Index, Series, NaT, Categorical, date_range, bdate_range +from pandas.core import nanops +from pandas import compat + +import pandas.util.testing as tm + + +class TestSeriesComparisons(object): + def test_series_comparison_scalars(self): + series = Series(date_range('1/1/2000', periods=10)) + + val = datetime(2000, 1, 4) + result = series > val + expected = Series([x > val for x in series]) + tm.assert_series_equal(result, expected) + + val = series[5] + result = series > val + expected = Series([x > val for x in series]) + tm.assert_series_equal(result, expected) + + def test_comparisons(self): + left = np.random.randn(10) + right = np.random.randn(10) + left[:3] = np.nan + + result = nanops.nangt(left, right) + with np.errstate(invalid='ignore'): + expected = (left > right).astype('O') + expected[:3] = np.nan + + tm.assert_almost_equal(result, expected) + + s = Series(['a', 'b', 'c']) + s2 = Series([False, True, False]) + + # it works! + exp = Series([False, False, False]) + tm.assert_series_equal(s == s2, exp) + tm.assert_series_equal(s2 == s, exp) + + def test_operator_series_comparison_zerorank(self): + # GH 13006 + result = np.float64(0) > pd.Series([1, 2, 3]) + expected = 0.0 > pd.Series([1, 2, 3]) + tm.assert_series_equal(result, expected) + result = pd.Series([1, 2, 3]) < np.float64(0) + expected = pd.Series([1, 2, 3]) < 0.0 + tm.assert_series_equal(result, expected) + result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2]) + expected = 0.0 > pd.Series([1, 2, 3]) + tm.assert_series_equal(result, expected) + + def test_object_comparisons(self): + s = Series(['a', 'b', np.nan, 'c', 'a']) + + result = s == 'a' + expected = Series([True, False, False, False, True]) + tm.assert_series_equal(result, expected) + + result = s < 'a' + expected = Series([False, False, False, False, False]) + tm.assert_series_equal(result, expected) + + result = s != 'a' + expected = -(s == 'a') + tm.assert_series_equal(result, expected) + + def test_categorical_comparisons(self): + # GH 8938 + # allow equality comparisons + a = Series(list('abc'), dtype="category") + b = Series(list('abc'), dtype="object") + c = Series(['a', 'b', 'cc'], dtype="object") + d = Series(list('acb'), dtype="object") + e = Categorical(list('abc')) + f = Categorical(list('acb')) + + # vs scalar + assert not (a == 'a').all() + assert ((a != 'a') == ~(a == 'a')).all() + + assert not ('a' == a).all() + assert (a == 'a')[0] + assert ('a' == a)[0] + assert not ('a' != a)[0] + + # vs list-like + assert (a == a).all() + assert not (a != a).all() + + assert (a == list(a)).all() + assert (a == b).all() + assert (b == a).all() + assert ((~(a == b)) == (a != b)).all() + assert ((~(b == a)) == (b != a)).all() + + assert not (a == c).all() + assert not (c == a).all() + assert not (a == d).all() + assert not (d == a).all() + + # vs a cat-like + assert (a == e).all() + assert (e == a).all() + assert not (a == f).all() + assert not (f == a).all() + + assert ((~(a == e) == (a != e)).all()) + assert ((~(e == a) == (e != a)).all()) + assert ((~(a == f) == (a != f)).all()) + assert ((~(f == a) == (f != a)).all()) + + # non-equality is not comparable + pytest.raises(TypeError, lambda: a < b) + pytest.raises(TypeError, lambda: b < a) + pytest.raises(TypeError, lambda: a > b) + pytest.raises(TypeError, lambda: b > a) + + def test_comparison_tuples(self): + # GH11339 + # comparisons vs tuple + s = Series([(1, 1), (1, 2)]) + + result = s == (1, 2) + expected = Series([False, True]) + tm.assert_series_equal(result, expected) + + result = s != (1, 2) + expected = Series([True, False]) + tm.assert_series_equal(result, expected) + + result = s == (0, 0) + expected = Series([False, False]) + tm.assert_series_equal(result, expected) + + result = s != (0, 0) + expected = Series([True, True]) + tm.assert_series_equal(result, expected) + + s = Series([(1, 1), (1, 1)]) + + result = s == (1, 1) + expected = Series([True, True]) + tm.assert_series_equal(result, expected) + + result = s != (1, 1) + expected = Series([False, False]) + tm.assert_series_equal(result, expected) + + s = Series([frozenset([1]), frozenset([1, 2])]) + + result = s == frozenset([1]) + expected = Series([True, False]) + tm.assert_series_equal(result, expected) + + def test_comparison_operators_with_nas(self): + ser = Series(bdate_range('1/1/2000', periods=10), dtype=object) + ser[::2] = np.nan + + # test that comparisons work + ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne'] + for op in ops: + val = ser[5] + + f = getattr(operator, op) + result = f(ser, val) + + expected = f(ser.dropna(), val).reindex(ser.index) + + if op == 'ne': + expected = expected.fillna(True).astype(bool) + else: + expected = expected.fillna(False).astype(bool) + + tm.assert_series_equal(result, expected) + + # fffffffuuuuuuuuuuuu + # result = f(val, s) + # expected = f(val, s.dropna()).reindex(s.index) + # tm.assert_series_equal(result, expected) + + # boolean &, |, ^ should work with object arrays and propagate NAs + + ops = ['and_', 'or_', 'xor'] + mask = ser.isna() + for bool_op in ops: + func = getattr(operator, bool_op) + + filled = ser.fillna(ser[0]) + + result = func(ser < ser[9], ser > ser[3]) + + expected = func(filled < filled[9], filled > filled[3]) + expected[mask] = False + tm.assert_series_equal(result, expected) + + def test_comparison_object_numeric_nas(self): + ser = Series(np.random.randn(10), dtype=object) + shifted = ser.shift(2) + + ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne'] + for op in ops: + func = getattr(operator, op) + + result = func(ser, shifted) + expected = func(ser.astype(float), shifted.astype(float)) + tm.assert_series_equal(result, expected) + + def test_comparison_invalid(self): + # GH4968 + # invalid date/int comparisons + s = Series(range(5)) + s2 = Series(date_range('20010101', periods=5)) + + for (x, y) in [(s, s2), (s2, s)]: + pytest.raises(TypeError, lambda: x == y) + pytest.raises(TypeError, lambda: x != y) + pytest.raises(TypeError, lambda: x >= y) + pytest.raises(TypeError, lambda: x > y) + pytest.raises(TypeError, lambda: x < y) + pytest.raises(TypeError, lambda: x <= y) + + def test_unequal_categorical_comparison_raises_type_error(self): + # unequal comparison should raise for unordered cats + cat = Series(Categorical(list("abc"))) + + def f(): + cat > "b" + + pytest.raises(TypeError, f) + cat = Series(Categorical(list("abc"), ordered=False)) + + def f(): + cat > "b" + + pytest.raises(TypeError, f) + + # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057 + # and following comparisons with scalars not in categories should raise + # for unequal comps, but not for equal/not equal + cat = Series(Categorical(list("abc"), ordered=True)) + + pytest.raises(TypeError, lambda: cat < "d") + pytest.raises(TypeError, lambda: cat > "d") + pytest.raises(TypeError, lambda: "d" < cat) + pytest.raises(TypeError, lambda: "d" > cat) + + tm.assert_series_equal(cat == "d", Series([False, False, False])) + tm.assert_series_equal(cat != "d", Series([True, True, True])) + + @pytest.mark.parametrize('dtype', [None, object]) + def test_more_na_comparisons(self, dtype): + left = Series(['a', np.nan, 'c'], dtype=dtype) + right = Series(['a', np.nan, 'd'], dtype=dtype) + + result = left == right + expected = Series([True, False, False]) + tm.assert_series_equal(result, expected) + + result = left != right + expected = Series([False, True, True]) + tm.assert_series_equal(result, expected) + + result = left == np.nan + expected = Series([False, False, False]) + tm.assert_series_equal(result, expected) + + result = left != np.nan + expected = Series([True, True, True]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('pair', [ + ([pd.Timestamp('2011-01-01'), NaT, pd.Timestamp('2011-01-03')], + [NaT, NaT, pd.Timestamp('2011-01-03')]), + + ([pd.Timedelta('1 days'), NaT, pd.Timedelta('3 days')], + [NaT, NaT, pd.Timedelta('3 days')]), + + ([pd.Period('2011-01', freq='M'), NaT, pd.Period('2011-03', freq='M')], + [NaT, NaT, pd.Period('2011-03', freq='M')])]) + @pytest.mark.parametrize('reverse', [True, False]) + @pytest.mark.parametrize('box', [Series, Index]) + @pytest.mark.parametrize('dtype', [None, object]) + def test_nat_comparisons(self, dtype, box, reverse, pair): + l, r = pair + if reverse: + # add lhs / rhs switched data + l, r = r, l + + left = Series(l, dtype=dtype) + right = box(r, dtype=dtype) + # Series, Index + + expected = Series([False, False, True]) + tm.assert_series_equal(left == right, expected) + + expected = Series([True, True, False]) + tm.assert_series_equal(left != right, expected) + + expected = Series([False, False, False]) + tm.assert_series_equal(left < right, expected) + + expected = Series([False, False, False]) + tm.assert_series_equal(left > right, expected) + + expected = Series([False, False, True]) + tm.assert_series_equal(left >= right, expected) + + expected = Series([False, False, True]) + tm.assert_series_equal(left <= right, expected) + + @pytest.mark.parametrize('data', [ + [pd.Timestamp('2011-01-01'), NaT, pd.Timestamp('2011-01-03')], + [pd.Timedelta('1 days'), NaT, pd.Timedelta('3 days')], + [pd.Period('2011-01', freq='M'), NaT, pd.Period('2011-03', freq='M')] + ]) + @pytest.mark.parametrize('dtype', [None, object]) + def test_nat_comparisons_scalar(self, dtype, data): + left = Series(data, dtype=dtype) + + expected = Series([False, False, False]) + tm.assert_series_equal(left == pd.NaT, expected) + tm.assert_series_equal(pd.NaT == left, expected) + + expected = Series([True, True, True]) + tm.assert_series_equal(left != pd.NaT, expected) + tm.assert_series_equal(pd.NaT != left, expected) + + expected = Series([False, False, False]) + tm.assert_series_equal(left < pd.NaT, expected) + tm.assert_series_equal(pd.NaT > left, expected) + tm.assert_series_equal(left <= pd.NaT, expected) + tm.assert_series_equal(pd.NaT >= left, expected) + + tm.assert_series_equal(left > pd.NaT, expected) + tm.assert_series_equal(pd.NaT < left, expected) + tm.assert_series_equal(left >= pd.NaT, expected) + tm.assert_series_equal(pd.NaT <= left, expected) + + def test_comparison_different_length(self): + a = Series(['a', 'b', 'c']) + b = Series(['b', 'a']) + pytest.raises(ValueError, a.__lt__, b) + + a = Series([1, 2]) + b = Series([2, 3, 4]) + pytest.raises(ValueError, a.__eq__, b) + + def test_comparison_label_based(self): + + # GH 4947 + # comparisons should be label based + + a = Series([True, False, True], list('bca')) + b = Series([False, True, False], list('abc')) + + expected = Series([False, True, False], list('abc')) + result = a & b + tm.assert_series_equal(result, expected) + + expected = Series([True, True, False], list('abc')) + result = a | b + tm.assert_series_equal(result, expected) + + expected = Series([True, False, False], list('abc')) + result = a ^ b + tm.assert_series_equal(result, expected) + + # rhs is bigger + a = Series([True, False, True], list('bca')) + b = Series([False, True, False, True], list('abcd')) + + expected = Series([False, True, False, False], list('abcd')) + result = a & b + tm.assert_series_equal(result, expected) + + expected = Series([True, True, False, False], list('abcd')) + result = a | b + tm.assert_series_equal(result, expected) + + # filling + + # vs empty + result = a & Series([]) + expected = Series([False, False, False], list('bca')) + tm.assert_series_equal(result, expected) + + result = a | Series([]) + expected = Series([True, False, True], list('bca')) + tm.assert_series_equal(result, expected) + + # vs non-matching + result = a & Series([1], ['z']) + expected = Series([False, False, False, False], list('abcz')) + tm.assert_series_equal(result, expected) + + result = a | Series([1], ['z']) + expected = Series([True, True, False, False], list('abcz')) + tm.assert_series_equal(result, expected) + + # identity + # we would like s[s|e] == s to hold for any e, whether empty or not + for e in [Series([]), Series([1], ['z']), + Series(np.nan, b.index), Series(np.nan, a.index)]: + result = a[a | e] + tm.assert_series_equal(result, a[a]) + + for e in [Series(['z'])]: + if compat.PY3: + with tm.assert_produces_warning(RuntimeWarning): + result = a[a | e] + else: + result = a[a | e] + tm.assert_series_equal(result, a[a]) + + # vs scalars + index = list('bca') + t = Series([True, False, True]) + + for v in [True, 1, 2]: + result = Series([True, False, True], index=index) | v + expected = Series([True, True, True], index=index) + tm.assert_series_equal(result, expected) + + for v in [np.nan, 'foo']: + pytest.raises(TypeError, lambda: t | v) + + for v in [False, 0]: + result = Series([True, False, True], index=index) | v + expected = Series([True, False, True], index=index) + tm.assert_series_equal(result, expected) + + for v in [True, 1]: + result = Series([True, False, True], index=index) & v + expected = Series([True, False, True], index=index) + tm.assert_series_equal(result, expected) + + for v in [False, 0]: + result = Series([True, False, True], index=index) & v + expected = Series([False, False, False], index=index) + tm.assert_series_equal(result, expected) + for v in [np.nan]: + pytest.raises(TypeError, lambda: t & v) + + def test_comparison_flex_basic(self): + left = pd.Series(np.random.randn(10)) + right = pd.Series(np.random.randn(10)) + + tm.assert_series_equal(left.eq(right), left == right) + tm.assert_series_equal(left.ne(right), left != right) + tm.assert_series_equal(left.le(right), left < right) + tm.assert_series_equal(left.lt(right), left <= right) + tm.assert_series_equal(left.gt(right), left > right) + tm.assert_series_equal(left.ge(right), left >= right) + + # axis + for axis in [0, None, 'index']: + tm.assert_series_equal(left.eq(right, axis=axis), left == right) + tm.assert_series_equal(left.ne(right, axis=axis), left != right) + tm.assert_series_equal(left.le(right, axis=axis), left < right) + tm.assert_series_equal(left.lt(right, axis=axis), left <= right) + tm.assert_series_equal(left.gt(right, axis=axis), left > right) + tm.assert_series_equal(left.ge(right, axis=axis), left >= right) + + # + msg = 'No axis named 1 for object type' + for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']: + with tm.assert_raises_regex(ValueError, msg): + getattr(left, op)(right, axis=1) + + def test_comparison_flex_alignment(self): + left = Series([1, 3, 2], index=list('abc')) + right = Series([2, 2, 2], index=list('bcd')) + + exp = pd.Series([False, False, True, False], index=list('abcd')) + tm.assert_series_equal(left.eq(right), exp) + + exp = pd.Series([True, True, False, True], index=list('abcd')) + tm.assert_series_equal(left.ne(right), exp) + + exp = pd.Series([False, False, True, False], index=list('abcd')) + tm.assert_series_equal(left.le(right), exp) + + exp = pd.Series([False, False, False, False], index=list('abcd')) + tm.assert_series_equal(left.lt(right), exp) + + exp = pd.Series([False, True, True, False], index=list('abcd')) + tm.assert_series_equal(left.ge(right), exp) + + exp = pd.Series([False, True, False, False], index=list('abcd')) + tm.assert_series_equal(left.gt(right), exp) + + def test_comparison_flex_alignment_fill(self): + left = Series([1, 3, 2], index=list('abc')) + right = Series([2, 2, 2], index=list('bcd')) + + exp = pd.Series([False, False, True, True], index=list('abcd')) + tm.assert_series_equal(left.eq(right, fill_value=2), exp) + + exp = pd.Series([True, True, False, False], index=list('abcd')) + tm.assert_series_equal(left.ne(right, fill_value=2), exp) + + exp = pd.Series([False, False, True, True], index=list('abcd')) + tm.assert_series_equal(left.le(right, fill_value=0), exp) + + exp = pd.Series([False, False, False, True], index=list('abcd')) + tm.assert_series_equal(left.lt(right, fill_value=0), exp) + + exp = pd.Series([True, True, True, False], index=list('abcd')) + tm.assert_series_equal(left.ge(right, fill_value=0), exp) + + exp = pd.Series([True, True, False, False], index=list('abcd')) + tm.assert_series_equal(left.gt(right, fill_value=0), exp) + + def test_ne(self): + ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) + expected = [True, True, False, True, True] + assert tm.equalContents(ts.index != 5, expected) + assert tm.equalContents(~(ts.index == 5), expected) + + def test_comp_ops_df_compat(self): + # GH 1134 + s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x') + s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x') + + s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x') + s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x') + + for left, right in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]: + + msg = "Can only compare identically-labeled Series objects" + with tm.assert_raises_regex(ValueError, msg): + left == right + + with tm.assert_raises_regex(ValueError, msg): + left != right + + with tm.assert_raises_regex(ValueError, msg): + left < right + + msg = "Can only compare identically-labeled DataFrame objects" + with tm.assert_raises_regex(ValueError, msg): + left.to_frame() == right.to_frame() + + with tm.assert_raises_regex(ValueError, msg): + left.to_frame() != right.to_frame() + + with tm.assert_raises_regex(ValueError, msg): + left.to_frame() < right.to_frame() diff --git a/pandas/tests/series/arithmetic/test_datetime_arithmetic.py b/pandas/tests/series/arithmetic/test_datetime_arithmetic.py new file mode 100644 index 0000000000000..494577bab3684 --- /dev/null +++ b/pandas/tests/series/arithmetic/test_datetime_arithmetic.py @@ -0,0 +1,340 @@ +# coding=utf-8 +import pytest +import pytz + +from datetime import datetime, timedelta + +import numpy as np +import pandas as pd + +from pandas import (Series, NaT, date_range, timedelta_range, + Timestamp, Timedelta) + +import pandas.util.testing as tm + + +class TestDatetimeSeriesArithmetic(object): + @pytest.mark.parametrize( + 'box, assert_func', + [(Series, tm.assert_series_equal), + (pd.Index, tm.assert_index_equal)]) + def test_sub_datetime64_not_ns(self, box, assert_func): + # GH#7996 + dt64 = np.datetime64('2013-01-01') + assert dt64.dtype == 'datetime64[D]' + + obj = box(date_range('20130101', periods=3)) + res = obj - dt64 + expected = box([Timedelta(days=0), Timedelta(days=1), + Timedelta(days=2)]) + assert_func(res, expected) + + res = dt64 - obj + assert_func(res, -expected) + + def test_operators_datetimelike(self): + def run_ops(ops, get_ser, test_ser): + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + for op_str in ops: + op = getattr(get_ser, op_str, None) + with tm.assert_raises_regex(TypeError, 'operate|cannot'): + op(test_ser) + + # ## timedelta64 ### + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # ## datetime64 ### + dt1 = Series([Timestamp('20111230'), Timestamp('20120101'), + Timestamp('20120103')]) + dt1.iloc[2] = np.nan + dt2 = Series([Timestamp('20111231'), Timestamp('20120102'), + Timestamp('20120104')]) + ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__', + '__pow__', '__radd__', '__rmul__', '__rfloordiv__', + '__rtruediv__', '__rdiv__', '__rpow__'] + run_ops(ops, dt1, dt2) + dt1 - dt2 + dt2 - dt1 + + # ## datetime64 with timetimedelta ### + ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', + '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', + '__rpow__'] + run_ops(ops, dt1, td1) + dt1 + td1 + td1 + dt1 + dt1 - td1 + # TODO: Decide if this ought to work. + # td1 - dt1 + + # ## timetimedelta with datetime64 ### + ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__', + '__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__', + '__rdiv__', '__rpow__'] + run_ops(ops, td1, dt1) + td1 + dt1 + dt1 + td1 + + # 8260, 10763 + # datetime64 with tz + ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', + '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', + '__rpow__'] + + tz = 'US/Eastern' + dt1 = Series(date_range('2000-01-01 09:00:00', periods=5, + tz=tz), name='foo') + dt2 = dt1.copy() + dt2.iloc[2] = np.nan + td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H')) + td2 = td1.copy() + td2.iloc[1] = np.nan + run_ops(ops, dt1, td1) + + result = dt1 + td1[0] + exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt2 + td2[0] + exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + # odd numpy behavior with scalar timedeltas + result = td1[0] + dt1 + exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = td2[0] + dt2 + exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt1 - td1[0] + exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + pytest.raises(TypeError, lambda: td1[0] - dt1) + + result = dt2 - td2[0] + exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + pytest.raises(TypeError, lambda: td2[0] - dt2) + + result = dt1 + td1 + exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt2 + td2 + exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt1 - td1 + exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt2 - td2 + exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + pytest.raises(TypeError, lambda: td1 - dt1) + pytest.raises(TypeError, lambda: td2 - dt2) + + def test_sub_single_tz(self): + # GH12290 + s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')]) + s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')]) + result = s1 - s2 + expected = Series([Timedelta('2days')]) + tm.assert_series_equal(result, expected) + result = s2 - s1 + expected = Series([Timedelta('-2days')]) + tm.assert_series_equal(result, expected) + + def test_dt64tz_series_sub_dtitz(self): + # GH#19071 subtracting tzaware DatetimeIndex from tzaware Series + # (with same tz) raises, fixed by #19024 + dti = pd.date_range('1999-09-30', periods=10, tz='US/Pacific') + ser = pd.Series(dti) + expected = pd.Series(pd.TimedeltaIndex(['0days'] * 10)) + + res = dti - ser + tm.assert_series_equal(res, expected) + res = ser - dti + tm.assert_series_equal(res, expected) + + def test_sub_datetime_compat(self): + # see gh-14088 + s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT]) + dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc) + exp = Series([Timedelta('1 days'), pd.NaT]) + tm.assert_series_equal(s - dt, exp) + tm.assert_series_equal(s - Timestamp(dt), exp) + + def test_dt64_series_with_timedelta(self): + # scalar timedeltas/np.timedelta64 objects + # operate with np.timedelta64 correctly + s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) + + result = s + np.timedelta64(1, 's') + result2 = np.timedelta64(1, 's') + s + expected = Series([Timestamp('20130101 9:01:01'), + Timestamp('20130101 9:02:01')]) + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + result = s + np.timedelta64(5, 'ms') + result2 = np.timedelta64(5, 'ms') + s + expected = Series([Timestamp('20130101 9:01:00.005'), + Timestamp('20130101 9:02:00.005')]) + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + def test_dt64_series_add_tick_DateOffset(self): + # GH 4532 + # operate with pd.offsets + ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) + expected = Series([Timestamp('20130101 9:01:05'), + Timestamp('20130101 9:02:05')]) + + result = ser + pd.offsets.Second(5) + tm.assert_series_equal(result, expected) + + result2 = pd.offsets.Second(5) + ser + tm.assert_series_equal(result2, expected) + + def test_dt64_series_sub_tick_DateOffset(self): + # GH 4532 + # operate with pd.offsets + ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) + expected = Series([Timestamp('20130101 9:00:55'), + Timestamp('20130101 9:01:55')]) + + result = ser - pd.offsets.Second(5) + tm.assert_series_equal(result, expected) + + result2 = -pd.offsets.Second(5) + ser + tm.assert_series_equal(result2, expected) + + with pytest.raises(TypeError): + pd.offsets.Second(5) - ser + + @pytest.mark.parametrize('cls_name', ['Day', 'Hour', 'Minute', 'Second', + 'Milli', 'Micro', 'Nano']) + def test_dt64_series_with_tick_DateOffset_smoke(self, cls_name): + # GH 4532 + # smoke tests for valid DateOffsets + ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) + + offset_cls = getattr(pd.offsets, cls_name) + ser + offset_cls(5) + offset_cls(5) + ser + + def test_dt64_series_add_mixed_tick_DateOffset(self): + # GH 4532 + # operate with pd.offsets + s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) + + result = s + pd.offsets.Milli(5) + result2 = pd.offsets.Milli(5) + s + expected = Series([Timestamp('20130101 9:01:00.005'), + Timestamp('20130101 9:02:00.005')]) + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5) + expected = Series([Timestamp('20130101 9:06:00.005'), + Timestamp('20130101 9:07:00.005')]) + tm.assert_series_equal(result, expected) + + def test_dt64_series_sub_NaT(self): + # GH#18808 + dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp('19900315')]) + ser = pd.Series(dti) + res = ser - pd.NaT + expected = pd.Series([pd.NaT, pd.NaT], dtype='timedelta64[ns]') + tm.assert_series_equal(res, expected) + + dti_tz = dti.tz_localize('Asia/Tokyo') + ser_tz = pd.Series(dti_tz) + res = ser_tz - pd.NaT + expected = pd.Series([pd.NaT, pd.NaT], dtype='timedelta64[ns]') + tm.assert_series_equal(res, expected) + + def test_datetime64_ops_nat(self): + # GH 11349 + datetime_series = Series([NaT, Timestamp('19900315')]) + nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]') + single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]') + + # subtraction + tm.assert_series_equal(-NaT + datetime_series, + nat_series_dtype_timestamp) + with pytest.raises(TypeError): + -single_nat_dtype_datetime + datetime_series + + tm.assert_series_equal(-NaT + nat_series_dtype_timestamp, + nat_series_dtype_timestamp) + with pytest.raises(TypeError): + -single_nat_dtype_datetime + nat_series_dtype_timestamp + + # addition + tm.assert_series_equal(nat_series_dtype_timestamp + NaT, + nat_series_dtype_timestamp) + tm.assert_series_equal(NaT + nat_series_dtype_timestamp, + nat_series_dtype_timestamp) + + tm.assert_series_equal(nat_series_dtype_timestamp + NaT, + nat_series_dtype_timestamp) + tm.assert_series_equal(NaT + nat_series_dtype_timestamp, + nat_series_dtype_timestamp) + + @pytest.mark.parametrize('dt64_series', [ + Series([Timestamp('19900315'), Timestamp('19900315')]), + Series([NaT, Timestamp('19900315')]), + Series([NaT, NaT], dtype='datetime64[ns]')]) + @pytest.mark.parametrize('one', [1, 1.0, np.array(1)]) + def test_dt64_mul_div_numeric_invalid(self, one, dt64_series): + # multiplication + with pytest.raises(TypeError): + dt64_series * one + with pytest.raises(TypeError): + one * dt64_series + + # division + with pytest.raises(TypeError): + dt64_series / one + with pytest.raises(TypeError): + one / dt64_series + + def test_dt64_series_arith_overflow(self): + # GH#12534, fixed by #19024 + dt = pd.Timestamp('1700-01-31') + td = pd.Timedelta('20000 Days') + dti = pd.date_range('1949-09-30', freq='100Y', periods=4) + ser = pd.Series(dti) + with pytest.raises(OverflowError): + ser - dt + with pytest.raises(OverflowError): + dt - ser + with pytest.raises(OverflowError): + ser + td + with pytest.raises(OverflowError): + td + ser + + ser.iloc[-1] = pd.NaT + expected = pd.Series(['2004-10-03', '2104-10-04', '2204-10-04', 'NaT'], + dtype='datetime64[ns]') + res = ser + td + tm.assert_series_equal(res, expected) + res = td + ser + tm.assert_series_equal(res, expected) + + ser.iloc[1:] = pd.NaT + expected = pd.Series(['91279 Days', 'NaT', 'NaT', 'NaT'], + dtype='timedelta64[ns]') + res = ser - dt + tm.assert_series_equal(res, expected) + res = dt - ser + tm.assert_series_equal(res, -expected) diff --git a/pandas/tests/series/arithmetic/test_numeric_arithmetic.py b/pandas/tests/series/arithmetic/test_numeric_arithmetic.py new file mode 100644 index 0000000000000..e95c4488de601 --- /dev/null +++ b/pandas/tests/series/arithmetic/test_numeric_arithmetic.py @@ -0,0 +1,173 @@ +# coding=utf-8 + +import pytest + +import numpy as np +import pandas as pd + +from pandas import Series +import pandas.util.testing as tm + + +class TestSeriesArithmeticDtypeCompat(object): + + def test_object_series_add_int_invalid(self): + # invalid ops + obj_series = tm.makeObjectSeries() + obj_series.name = 'objects' + + with pytest.raises(Exception): + obj_series + 1 + with pytest.raises(Exception): + obj_series + np.array(1, dtype=np.int64) + + def test_object_series_sub_int_invalid(self): + # invalid ops + obj_series = tm.makeObjectSeries() + obj_series.name = 'objects' + + with pytest.raises(Exception): + obj_series - 1 + with pytest.raises(Exception): + obj_series - np.array(1, dtype=np.int64) + + def test_series_radd_str(self): + ser = pd.Series(['x', np.nan, 'x']) + tm.assert_series_equal('a' + ser, pd.Series(['ax', np.nan, 'ax'])) + tm.assert_series_equal(ser + 'a', pd.Series(['xa', np.nan, 'xa'])) + + @pytest.mark.parametrize('data', [ + [1, 2, 3], + [1.1, 2.2, 3.3], + [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.NaT], + ['x', 'y', 1]]) + @pytest.mark.parametrize('dtype', [None, object]) + def test_series_radd_str_invalid(self, dtype, data): + ser = Series(data, dtype=dtype) + with pytest.raises(TypeError): + 'foo_' + ser + + @pytest.mark.parametrize('dtype', [None, object]) + def test_series_with_dtype_radd_int(self, dtype): + ser = pd.Series([1, 2, 3], dtype=dtype) + expected = pd.Series([2, 3, 4], dtype=dtype) + + result = 1 + ser + tm.assert_series_equal(result, expected) + + result = ser + 1 + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('dtype', [None, object]) + def test_series_with_dtype_radd_nan(self, dtype): + ser = pd.Series([1, 2, 3], dtype=dtype) + expected = pd.Series([np.nan, np.nan, np.nan], dtype=dtype) + + result = np.nan + ser + tm.assert_series_equal(result, expected) + + result = ser + np.nan + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('dtype', [None, object]) + def test_series_with_dtype_radd_timedelta(self, dtype): + ser = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'), + pd.Timedelta('3 days')], dtype=dtype) + expected = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'), + pd.Timedelta('6 days')]) + + result = pd.Timedelta('3 days') + ser + tm.assert_series_equal(result, expected) + + result = ser + pd.Timedelta('3 days') + tm.assert_series_equal(result, expected) + + +class TestSeriesArithmetic(object): + def test_divide_decimal(self): + """ resolves issue #9787 """ + from decimal import Decimal + + expected = Series([Decimal(5)]) + + s = Series([Decimal(10)]) + s = s / Decimal(2) + + tm.assert_series_equal(expected, s) + + s = Series([Decimal(10)]) + s = s // Decimal(2) + + tm.assert_series_equal(expected, s) + + def test_div(self): + with np.errstate(all='ignore'): + # no longer do integer div for any ops, but deal with the 0's + p = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) + result = p['first'] / p['second'] + expected = Series( + p['first'].values.astype(float) / p['second'].values, + dtype='float64') + expected.iloc[0:3] = np.inf + tm.assert_series_equal(result, expected) + + result = p['first'] / 0 + expected = Series(np.inf, index=p.index, name='first') + tm.assert_series_equal(result, expected) + + p = p.astype('float64') + result = p['first'] / p['second'] + expected = Series(p['first'].values / p['second'].values) + tm.assert_series_equal(result, expected) + + p = pd.DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]}) + result = p['first'] / p['second'] + tm.assert_series_equal(result, p['first'].astype('float64'), + check_names=False) + assert result.name is None + assert not result.equals(p['second'] / p['first']) + + # inf signing + s = Series([np.nan, 1., -1.]) + result = s / 0 + expected = Series([np.nan, np.inf, -np.inf]) + tm.assert_series_equal(result, expected) + + # float/integer issue + # GH 7785 + p = pd.DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)}) + expected = Series([-0.01, -np.inf]) + + result = p['second'].div(p['first']) + tm.assert_series_equal(result, expected, check_names=False) + + result = p['second'] / p['first'] + tm.assert_series_equal(result, expected) + + # GH 9144 + s = Series([-1, 0, 1]) + + result = 0 / s + expected = Series([0.0, np.nan, 0.0]) + tm.assert_series_equal(result, expected) + + result = s / 0 + expected = Series([-np.inf, np.nan, np.inf]) + tm.assert_series_equal(result, expected) + + result = s // 0 + expected = Series([-np.inf, np.nan, np.inf]) + tm.assert_series_equal(result, expected) + + # GH 8674 + zero_array = np.array([0] * 5) + data = np.random.randn(5) + expected = pd.Series([0.] * 5) + result = zero_array / pd.Series(data) + tm.assert_series_equal(result, expected) + + result = pd.Series(zero_array) / data + tm.assert_series_equal(result, expected) + + result = pd.Series(zero_array) / pd.Series(data) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/arithmetic/test_timedelta_arithmetic.py b/pandas/tests/series/arithmetic/test_timedelta_arithmetic.py new file mode 100644 index 0000000000000..98046b24cbecc --- /dev/null +++ b/pandas/tests/series/arithmetic/test_timedelta_arithmetic.py @@ -0,0 +1,558 @@ +# coding=utf-8 + +import pytest + +from datetime import datetime, timedelta + +import numpy as np +import pandas as pd + +from pandas import Series, NaT, date_range, Timestamp, Timedelta + +from pandas.compat import range +import pandas.util.testing as tm + + +@pytest.fixture +def tdser(): + return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]') + + +class TestTimedeltaSeriesArithmeticWithIntegers(object): + # Tests for Series with dtype 'timedelta64[ns]' arithmetic operations + # with integer and int-like others + + # ------------------------------------------------------------------ + # Addition and Subtraction + + def test_td64series_add_int_series_invalid(self, tdser): + with pytest.raises(TypeError): + tdser + Series([2, 3, 4]) + + @pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds') + def test_td64series_radd_int_series_invalid(self, tdser): + with pytest.raises(TypeError): + Series([2, 3, 4]) + tdser + + def test_td64series_sub_int_series_invalid(self, tdser): + with pytest.raises(TypeError): + tdser - Series([2, 3, 4]) + + @pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds') + def test_td64series_rsub_int_series_invalid(self, tdser): + with pytest.raises(TypeError): + Series([2, 3, 4]) - tdser + + @pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)]) + def test_td64series_add_sub_numeric_scalar_invalid(self, scalar, tdser): + with pytest.raises(TypeError): + tdser + scalar + with pytest.raises(TypeError): + scalar + tdser + with pytest.raises(TypeError): + tdser - scalar + with pytest.raises(TypeError): + scalar - tdser + + @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', + 'uint64', 'uint32', 'uint16', 'uint8', + 'float64', 'float32', 'float16']) + @pytest.mark.parametrize('vector', [ + np.array([1, 2, 3]), + pd.Index([1, 2, 3]), + pytest.param(Series([1, 2, 3]), + marks=pytest.mark.xfail(reason='GH#19123 integer ' + 'interpreted as nanos')) + ]) + def test_td64series_add_sub_numeric_array_invalid(self, vector, + dtype, tdser): + vector = vector.astype(dtype) + with pytest.raises(TypeError): + tdser + vector + with pytest.raises(TypeError): + vector + tdser + with pytest.raises(TypeError): + tdser - vector + with pytest.raises(TypeError): + vector - tdser + + # ------------------------------------------------------------------ + # Multiplicaton and Division + + @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', + 'uint64', 'uint32', 'uint16', 'uint8', + 'float64', 'float32', 'float16']) + @pytest.mark.parametrize('vector', [np.array([20, 30, 40]), + pd.Index([20, 30, 40]), + Series([20, 30, 40])]) + def test_td64series_div_numeric_array(self, vector, dtype, tdser): + # GH 4521 + # divide/multiply by integers + vector = vector.astype(dtype) + expected = Series(['2.95D', '1D 23H 12m', 'NaT'], + dtype='timedelta64[ns]') + + result = tdser / vector + tm.assert_series_equal(result, expected) + + with pytest.raises(TypeError): + vector / tdser + + @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', + 'uint64', 'uint32', 'uint16', 'uint8', + 'float64', 'float32', 'float16']) + @pytest.mark.parametrize('vector', [np.array([20, 30, 40]), + pd.Index([20, 30, 40]), + Series([20, 30, 40])]) + def test_td64series_mul_numeric_array(self, vector, dtype, tdser): + # GH 4521 + # divide/multiply by integers + vector = vector.astype(dtype) + + expected = Series(['1180 Days', '1770 Days', 'NaT'], + dtype='timedelta64[ns]') + + result = tdser * vector + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', + 'uint64', 'uint32', 'uint16', 'uint8', + 'float64', 'float32', 'float16']) + @pytest.mark.parametrize('vector', [ + np.array([20, 30, 40]), + pytest.param(pd.Index([20, 30, 40]), + marks=pytest.mark.xfail(reason='__mul__ raises ' + 'instead of returning ' + 'NotImplemented')), + Series([20, 30, 40]) + ]) + def test_td64series_rmul_numeric_array(self, vector, dtype, tdser): + # GH 4521 + # divide/multiply by integers + vector = vector.astype(dtype) + + expected = Series(['1180 Days', '1770 Days', 'NaT'], + dtype='timedelta64[ns]') + + result = vector * tdser + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)]) + def test_td64series_mul_numeric_scalar(self, one, tdser): + # GH 4521 + # divide/multiply by integers + expected = Series(['-59 Days', '-59 Days', 'NaT'], + dtype='timedelta64[ns]') + + result = tdser * (-one) + tm.assert_series_equal(result, expected) + result = (-one) * tdser + tm.assert_series_equal(result, expected) + + expected = Series(['118 Days', '118 Days', 'NaT'], + dtype='timedelta64[ns]') + + result = tdser * (2 * one) + tm.assert_series_equal(result, expected) + result = (2 * one) * tdser + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('two', [ + 2, 2.0, + pytest.param(np.array(2), + marks=pytest.mark.xfail(reason='GH#19011 is_list_like ' + 'incorrectly True.')), + pytest.param(np.array(2.0), + marks=pytest.mark.xfail(reason='GH#19011 is_list_like ' + 'incorrectly True.')), + ]) + def test_td64series_div_numeric_scalar(self, two, tdser): + # GH 4521 + # divide/multiply by integers + expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]') + + result = tdser / two + tm.assert_series_equal(result, expected) + + +class TestTimedeltaSeriesArithmetic(object): + def test_td64series_add_sub_timestamp(self): + # GH11925 + tdser = Series(pd.timedelta_range('1 day', periods=3)) + ts = Timestamp('2012-01-01') + expected = Series(date_range('2012-01-02', periods=3)) + tm.assert_series_equal(ts + tdser, expected) + tm.assert_series_equal(tdser + ts, expected) + + expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D')) + tm.assert_series_equal(ts - tdser, expected2) + tm.assert_series_equal(ts + (-tdser), expected2) + + with pytest.raises(TypeError): + tdser - ts + + def test_timedelta64_operations_with_DateOffset(self): + # GH 10699 + td = Series([timedelta(minutes=5, seconds=3)] * 3) + result = td + pd.offsets.Minute(1) + expected = Series([timedelta(minutes=6, seconds=3)] * 3) + tm.assert_series_equal(result, expected) + + result = td - pd.offsets.Minute(1) + expected = Series([timedelta(minutes=4, seconds=3)] * 3) + tm.assert_series_equal(result, expected) + + result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3), + pd.offsets.Hour(2)]) + expected = Series([timedelta(minutes=6, seconds=3), timedelta( + minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)]) + tm.assert_series_equal(result, expected) + + result = td + pd.offsets.Minute(1) + pd.offsets.Second(12) + expected = Series([timedelta(minutes=6, seconds=15)] * 3) + tm.assert_series_equal(result, expected) + + # valid DateOffsets + for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli', + 'Nano']: + op = getattr(pd.offsets, do) + td + op(5) + op(5) + td + td - op(5) + op(5) - td + + def test_timedelta64_operations_with_timedeltas(self): + # td operate with td + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td2 = timedelta(minutes=5, seconds=4) + result = td1 - td2 + expected = (Series([timedelta(seconds=0)] * 3) - + Series([timedelta(seconds=1)] * 3)) + assert result.dtype == 'm8[ns]' + tm.assert_series_equal(result, expected) + + result2 = td2 - td1 + expected = (Series([timedelta(seconds=1)] * 3) - + Series([timedelta(seconds=0)] * 3)) + tm.assert_series_equal(result2, expected) + + # roundtrip + tm.assert_series_equal(result + td2, td1) + + # Now again, using pd.to_timedelta, which should build + # a Series or a scalar, depending on input. + td1 = Series(pd.to_timedelta(['00:05:03'] * 3)) + td2 = pd.to_timedelta('00:05:04') + result = td1 - td2 + expected = (Series([timedelta(seconds=0)] * 3) - + Series([timedelta(seconds=1)] * 3)) + assert result.dtype == 'm8[ns]' + tm.assert_series_equal(result, expected) + + result2 = td2 - td1 + expected = (Series([timedelta(seconds=1)] * 3) - + Series([timedelta(seconds=0)] * 3)) + tm.assert_series_equal(result2, expected) + + # roundtrip + tm.assert_series_equal(result + td2, td1) + + def test_operators_timedelta64(self): + # series ops + v1 = date_range('2012-1-1', periods=3, freq='D') + v2 = date_range('2012-1-2', periods=3, freq='D') + rs = Series(v2) - Series(v1) + xp = Series(1e9 * 3600 * 24, + rs.index).astype('int64').astype('timedelta64[ns]') + tm.assert_series_equal(rs, xp) + assert rs.dtype == 'timedelta64[ns]' + + df = pd.DataFrame(dict(A=v1)) + td = Series([timedelta(days=i) for i in range(3)]) + assert td.dtype == 'timedelta64[ns]' + + # series on the rhs + result = df['A'] - df['A'].shift() + assert result.dtype == 'timedelta64[ns]' + + result = df['A'] + td + assert result.dtype == 'M8[ns]' + + # scalar Timestamp on rhs + maxa = df['A'].max() + assert isinstance(maxa, Timestamp) + + resultb = df['A'] - df['A'].max() + assert resultb.dtype == 'timedelta64[ns]' + + # timestamp on lhs + result = resultb + df['A'] + values = [Timestamp('20111230'), Timestamp('20120101'), + Timestamp('20120103')] + expected = Series(values, name='A') + tm.assert_series_equal(result, expected) + + # datetimes on rhs + result = df['A'] - datetime(2001, 1, 1) + expected = Series( + [timedelta(days=4017 + i) for i in range(3)], name='A') + tm.assert_series_equal(result, expected) + assert result.dtype == 'm8[ns]' + + d = datetime(2001, 1, 1, 3, 4) + resulta = df['A'] - d + assert resulta.dtype == 'm8[ns]' + + # roundtrip + resultb = resulta + d + tm.assert_series_equal(df['A'], resultb) + + # timedeltas on rhs + td = timedelta(days=1) + resulta = df['A'] + td + resultb = resulta - td + tm.assert_series_equal(resultb, df['A']) + assert resultb.dtype == 'M8[ns]' + + # roundtrip + td = timedelta(minutes=5, seconds=3) + resulta = df['A'] + td + resultb = resulta - td + tm.assert_series_equal(df['A'], resultb) + assert resultb.dtype == 'M8[ns]' + + # inplace + value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1)) + rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1)) + assert rs[2] == value + + def test_timedelta64_ops_nat(self): + # GH 11349 + timedelta_series = Series([NaT, Timedelta('1s')]) + nat_series_dtype_timedelta = Series([NaT, NaT], + dtype='timedelta64[ns]') + single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]') + + # subtraction + tm.assert_series_equal(timedelta_series - NaT, + nat_series_dtype_timedelta) + tm.assert_series_equal(-NaT + timedelta_series, + nat_series_dtype_timedelta) + + tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta, + nat_series_dtype_timedelta) + tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series, + nat_series_dtype_timedelta) + + # addition + tm.assert_series_equal(nat_series_dtype_timedelta + NaT, + nat_series_dtype_timedelta) + tm.assert_series_equal(NaT + nat_series_dtype_timedelta, + nat_series_dtype_timedelta) + + tm.assert_series_equal(nat_series_dtype_timedelta + + single_nat_dtype_timedelta, + nat_series_dtype_timedelta) + tm.assert_series_equal(single_nat_dtype_timedelta + + nat_series_dtype_timedelta, + nat_series_dtype_timedelta) + + tm.assert_series_equal(timedelta_series + NaT, + nat_series_dtype_timedelta) + tm.assert_series_equal(NaT + timedelta_series, + nat_series_dtype_timedelta) + + tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta, + nat_series_dtype_timedelta) + tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series, + nat_series_dtype_timedelta) + + tm.assert_series_equal(nat_series_dtype_timedelta + NaT, + nat_series_dtype_timedelta) + tm.assert_series_equal(NaT + nat_series_dtype_timedelta, + nat_series_dtype_timedelta) + + tm.assert_series_equal(nat_series_dtype_timedelta + + single_nat_dtype_timedelta, + nat_series_dtype_timedelta) + tm.assert_series_equal(single_nat_dtype_timedelta + + nat_series_dtype_timedelta, + nat_series_dtype_timedelta) + + # multiplication + tm.assert_series_equal(nat_series_dtype_timedelta * 1.0, + nat_series_dtype_timedelta) + tm.assert_series_equal(1.0 * nat_series_dtype_timedelta, + nat_series_dtype_timedelta) + + tm.assert_series_equal(timedelta_series * 1, timedelta_series) + tm.assert_series_equal(1 * timedelta_series, timedelta_series) + + tm.assert_series_equal(timedelta_series * 1.5, + Series([NaT, Timedelta('1.5s')])) + tm.assert_series_equal(1.5 * timedelta_series, + Series([NaT, Timedelta('1.5s')])) + + tm.assert_series_equal(timedelta_series * np.nan, + nat_series_dtype_timedelta) + tm.assert_series_equal(np.nan * timedelta_series, + nat_series_dtype_timedelta) + + # division + tm.assert_series_equal(timedelta_series / 2, + Series([NaT, Timedelta('0.5s')])) + tm.assert_series_equal(timedelta_series / 2.0, + Series([NaT, Timedelta('0.5s')])) + tm.assert_series_equal(timedelta_series / np.nan, + nat_series_dtype_timedelta) + + def test_td64_sub_NaT(self): + # GH#18808 + ser = Series([NaT, Timedelta('1s')]) + res = ser - NaT + expected = Series([NaT, NaT], dtype='timedelta64[ns]') + tm.assert_series_equal(res, expected) + + @pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4), + Timedelta(minutes=5, seconds=4), + Timedelta('5m4s').to_timedelta64()]) + def test_operators_timedelta64_with_timedelta(self, scalar_td): + # smoke tests + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + td1 + scalar_td + scalar_td + td1 + td1 - scalar_td + scalar_td - td1 + td1 / scalar_td + scalar_td / td1 + + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + Timedelta('5m4s'), + Timedelta('5m4s').to_timedelta64()]) + def test_operators_timedelta64_with_timedelta_invalid(self, scalar_td): + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + pattern = 'operate|unsupported|cannot' + with tm.assert_raises_regex(TypeError, pattern): + td1 * scalar_td + with tm.assert_raises_regex(TypeError, pattern): + scalar_td * td1 + with tm.assert_raises_regex(TypeError, pattern): + scalar_td ** td1 + with tm.assert_raises_regex(TypeError, pattern): + td1 ** scalar_td + + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + Timedelta('5m4s'), + Timedelta('5m4s').to_timedelta64()]) + def test_timedelta_rfloordiv(self, scalar_td): + # GH#18831 + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + result = scalar_td // td1 + expected = Series([1, 1, np.nan]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + Timedelta('5m4s'), + Timedelta('5m4s').to_timedelta64()]) + def test_timedelta_rfloordiv_explicit(self, scalar_td): + # GH#18831 + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # We can test __rfloordiv__ using this syntax, + # see `test_timedelta_rfloordiv` + result = td1.__rfloordiv__(scalar_td) + expected = Series([1, 1, np.nan]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=5, seconds=4), + Timedelta('5m4s'), + Timedelta('5m4s').to_timedelta64()]) + def test_timedelta_floordiv(self, scalar_td): + # GH#18831 + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + result = td1 // scalar_td + expected = Series([0, 0, np.nan]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('names', [(None, None, None), + ('Egon', 'Venkman', None), + ('NCC1701D', 'NCC1701D', 'NCC1701D')]) + def test_td64_series_with_tdi(self, names): + # GH#17250 make sure result dtype is correct + # GH#19043 make sure names are propogated correctly + tdi = pd.TimedeltaIndex(['0 days', '1 day'], name=names[0]) + ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1]) + expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)], + name=names[2]) + + result = tdi + ser + tm.assert_series_equal(result, expected) + assert result.dtype == 'timedelta64[ns]' + + result = ser + tdi + tm.assert_series_equal(result, expected) + assert result.dtype == 'timedelta64[ns]' + + expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)], + name=names[2]) + + result = tdi - ser + tm.assert_series_equal(result, expected) + assert result.dtype == 'timedelta64[ns]' + + result = ser - tdi + tm.assert_series_equal(result, -expected) + assert result.dtype == 'timedelta64[ns]' + + @pytest.mark.parametrize('names', [(None, None, None), + ('Egon', 'Venkman', None), + ('NCC1701D', 'NCC1701D', 'NCC1701D')]) + def test_tdi_mul_int_series(self, names): + # GH#19042 + tdi = pd.TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'], + name=names[0]) + ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1]) + + expected = Series(['0days', '1day', '4days', '9days', '16days'], + dtype='timedelta64[ns]', + name=names[2]) + + result = ser * tdi + tm.assert_series_equal(result, expected) + + # The direct operation tdi * ser still needs to be fixed. + result = ser.__rmul__(tdi) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('names', [(None, None, None), + ('Egon', 'Venkman', None), + ('NCC1701D', 'NCC1701D', 'NCC1701D')]) + def test_float_series_rdiv_tdi(self, names): + # GH#19042 + # TODO: the direct operation TimedeltaIndex / Series still + # needs to be fixed. + tdi = pd.TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'], + name=names[0]) + ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1]) + + expected = Series([tdi[n] / ser[n] for n in range(len(ser))], + dtype='timedelta64[ns]', + name=names[2]) + + result = ser.__rdiv__(tdi) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 1797dbcc15872..fb41e50312ae9 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -2,22 +2,20 @@ # pylint: disable-msg=E1101,W0612 import pytest -import pytz from collections import Iterable from datetime import datetime, timedelta import operator from itertools import product, starmap -from numpy import nan, inf +from numpy import nan import numpy as np import pandas as pd -from pandas import (Index, Series, DataFrame, isna, bdate_range, - NaT, date_range, timedelta_range, Categorical) +from pandas import (Index, Series, DataFrame, isna, + NaT, date_range) from pandas.core.indexes.datetimes import Timestamp from pandas.core.indexes.timedeltas import Timedelta -import pandas.core.nanops as nanops from pandas.compat import range, zip from pandas import compat @@ -28,1512 +26,6 @@ from .common import TestData -@pytest.fixture -def tdser(): - return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]') - - -class TestSeriesComparisons(object): - def test_series_comparison_scalars(self): - series = Series(date_range('1/1/2000', periods=10)) - - val = datetime(2000, 1, 4) - result = series > val - expected = Series([x > val for x in series]) - tm.assert_series_equal(result, expected) - - val = series[5] - result = series > val - expected = Series([x > val for x in series]) - tm.assert_series_equal(result, expected) - - def test_comparisons(self): - left = np.random.randn(10) - right = np.random.randn(10) - left[:3] = np.nan - - result = nanops.nangt(left, right) - with np.errstate(invalid='ignore'): - expected = (left > right).astype('O') - expected[:3] = np.nan - - assert_almost_equal(result, expected) - - s = Series(['a', 'b', 'c']) - s2 = Series([False, True, False]) - - # it works! - exp = Series([False, False, False]) - assert_series_equal(s == s2, exp) - assert_series_equal(s2 == s, exp) - - def test_operator_series_comparison_zerorank(self): - # GH 13006 - result = np.float64(0) > pd.Series([1, 2, 3]) - expected = 0.0 > pd.Series([1, 2, 3]) - tm.assert_series_equal(result, expected) - result = pd.Series([1, 2, 3]) < np.float64(0) - expected = pd.Series([1, 2, 3]) < 0.0 - tm.assert_series_equal(result, expected) - result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2]) - expected = 0.0 > pd.Series([1, 2, 3]) - tm.assert_series_equal(result, expected) - - def test_object_comparisons(self): - s = Series(['a', 'b', np.nan, 'c', 'a']) - - result = s == 'a' - expected = Series([True, False, False, False, True]) - assert_series_equal(result, expected) - - result = s < 'a' - expected = Series([False, False, False, False, False]) - assert_series_equal(result, expected) - - result = s != 'a' - expected = -(s == 'a') - assert_series_equal(result, expected) - - def test_categorical_comparisons(self): - # GH 8938 - # allow equality comparisons - a = Series(list('abc'), dtype="category") - b = Series(list('abc'), dtype="object") - c = Series(['a', 'b', 'cc'], dtype="object") - d = Series(list('acb'), dtype="object") - e = Categorical(list('abc')) - f = Categorical(list('acb')) - - # vs scalar - assert not (a == 'a').all() - assert ((a != 'a') == ~(a == 'a')).all() - - assert not ('a' == a).all() - assert (a == 'a')[0] - assert ('a' == a)[0] - assert not ('a' != a)[0] - - # vs list-like - assert (a == a).all() - assert not (a != a).all() - - assert (a == list(a)).all() - assert (a == b).all() - assert (b == a).all() - assert ((~(a == b)) == (a != b)).all() - assert ((~(b == a)) == (b != a)).all() - - assert not (a == c).all() - assert not (c == a).all() - assert not (a == d).all() - assert not (d == a).all() - - # vs a cat-like - assert (a == e).all() - assert (e == a).all() - assert not (a == f).all() - assert not (f == a).all() - - assert ((~(a == e) == (a != e)).all()) - assert ((~(e == a) == (e != a)).all()) - assert ((~(a == f) == (a != f)).all()) - assert ((~(f == a) == (f != a)).all()) - - # non-equality is not comparable - pytest.raises(TypeError, lambda: a < b) - pytest.raises(TypeError, lambda: b < a) - pytest.raises(TypeError, lambda: a > b) - pytest.raises(TypeError, lambda: b > a) - - def test_comparison_tuples(self): - # GH11339 - # comparisons vs tuple - s = Series([(1, 1), (1, 2)]) - - result = s == (1, 2) - expected = Series([False, True]) - assert_series_equal(result, expected) - - result = s != (1, 2) - expected = Series([True, False]) - assert_series_equal(result, expected) - - result = s == (0, 0) - expected = Series([False, False]) - assert_series_equal(result, expected) - - result = s != (0, 0) - expected = Series([True, True]) - assert_series_equal(result, expected) - - s = Series([(1, 1), (1, 1)]) - - result = s == (1, 1) - expected = Series([True, True]) - assert_series_equal(result, expected) - - result = s != (1, 1) - expected = Series([False, False]) - assert_series_equal(result, expected) - - s = Series([frozenset([1]), frozenset([1, 2])]) - - result = s == frozenset([1]) - expected = Series([True, False]) - assert_series_equal(result, expected) - - def test_comparison_operators_with_nas(self): - ser = Series(bdate_range('1/1/2000', periods=10), dtype=object) - ser[::2] = np.nan - - # test that comparisons work - ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne'] - for op in ops: - val = ser[5] - - f = getattr(operator, op) - result = f(ser, val) - - expected = f(ser.dropna(), val).reindex(ser.index) - - if op == 'ne': - expected = expected.fillna(True).astype(bool) - else: - expected = expected.fillna(False).astype(bool) - - assert_series_equal(result, expected) - - # fffffffuuuuuuuuuuuu - # result = f(val, s) - # expected = f(val, s.dropna()).reindex(s.index) - # assert_series_equal(result, expected) - - # boolean &, |, ^ should work with object arrays and propagate NAs - - ops = ['and_', 'or_', 'xor'] - mask = ser.isna() - for bool_op in ops: - func = getattr(operator, bool_op) - - filled = ser.fillna(ser[0]) - - result = func(ser < ser[9], ser > ser[3]) - - expected = func(filled < filled[9], filled > filled[3]) - expected[mask] = False - assert_series_equal(result, expected) - - def test_comparison_object_numeric_nas(self): - ser = Series(np.random.randn(10), dtype=object) - shifted = ser.shift(2) - - ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne'] - for op in ops: - func = getattr(operator, op) - - result = func(ser, shifted) - expected = func(ser.astype(float), shifted.astype(float)) - assert_series_equal(result, expected) - - def test_comparison_invalid(self): - # GH4968 - # invalid date/int comparisons - s = Series(range(5)) - s2 = Series(date_range('20010101', periods=5)) - - for (x, y) in [(s, s2), (s2, s)]: - pytest.raises(TypeError, lambda: x == y) - pytest.raises(TypeError, lambda: x != y) - pytest.raises(TypeError, lambda: x >= y) - pytest.raises(TypeError, lambda: x > y) - pytest.raises(TypeError, lambda: x < y) - pytest.raises(TypeError, lambda: x <= y) - - def test_unequal_categorical_comparison_raises_type_error(self): - # unequal comparison should raise for unordered cats - cat = Series(Categorical(list("abc"))) - - def f(): - cat > "b" - - pytest.raises(TypeError, f) - cat = Series(Categorical(list("abc"), ordered=False)) - - def f(): - cat > "b" - - pytest.raises(TypeError, f) - - # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057 - # and following comparisons with scalars not in categories should raise - # for unequal comps, but not for equal/not equal - cat = Series(Categorical(list("abc"), ordered=True)) - - pytest.raises(TypeError, lambda: cat < "d") - pytest.raises(TypeError, lambda: cat > "d") - pytest.raises(TypeError, lambda: "d" < cat) - pytest.raises(TypeError, lambda: "d" > cat) - - tm.assert_series_equal(cat == "d", Series([False, False, False])) - tm.assert_series_equal(cat != "d", Series([True, True, True])) - - @pytest.mark.parametrize('dtype', [None, object]) - def test_more_na_comparisons(self, dtype): - left = Series(['a', np.nan, 'c'], dtype=dtype) - right = Series(['a', np.nan, 'd'], dtype=dtype) - - result = left == right - expected = Series([True, False, False]) - assert_series_equal(result, expected) - - result = left != right - expected = Series([False, True, True]) - assert_series_equal(result, expected) - - result = left == np.nan - expected = Series([False, False, False]) - assert_series_equal(result, expected) - - result = left != np.nan - expected = Series([True, True, True]) - assert_series_equal(result, expected) - - @pytest.mark.parametrize('pair', [ - ([pd.Timestamp('2011-01-01'), NaT, pd.Timestamp('2011-01-03')], - [NaT, NaT, pd.Timestamp('2011-01-03')]), - - ([pd.Timedelta('1 days'), NaT, pd.Timedelta('3 days')], - [NaT, NaT, pd.Timedelta('3 days')]), - - ([pd.Period('2011-01', freq='M'), NaT, pd.Period('2011-03', freq='M')], - [NaT, NaT, pd.Period('2011-03', freq='M')])]) - @pytest.mark.parametrize('reverse', [True, False]) - @pytest.mark.parametrize('box', [Series, Index]) - @pytest.mark.parametrize('dtype', [None, object]) - def test_nat_comparisons(self, dtype, box, reverse, pair): - l, r = pair - if reverse: - # add lhs / rhs switched data - l, r = r, l - - left = Series(l, dtype=dtype) - right = box(r, dtype=dtype) - # Series, Index - - expected = Series([False, False, True]) - assert_series_equal(left == right, expected) - - expected = Series([True, True, False]) - assert_series_equal(left != right, expected) - - expected = Series([False, False, False]) - assert_series_equal(left < right, expected) - - expected = Series([False, False, False]) - assert_series_equal(left > right, expected) - - expected = Series([False, False, True]) - assert_series_equal(left >= right, expected) - - expected = Series([False, False, True]) - assert_series_equal(left <= right, expected) - - @pytest.mark.parametrize('data', [ - [pd.Timestamp('2011-01-01'), NaT, pd.Timestamp('2011-01-03')], - [pd.Timedelta('1 days'), NaT, pd.Timedelta('3 days')], - [pd.Period('2011-01', freq='M'), NaT, pd.Period('2011-03', freq='M')] - ]) - @pytest.mark.parametrize('dtype', [None, object]) - def test_nat_comparisons_scalar(self, dtype, data): - left = Series(data, dtype=dtype) - - expected = Series([False, False, False]) - assert_series_equal(left == pd.NaT, expected) - assert_series_equal(pd.NaT == left, expected) - - expected = Series([True, True, True]) - assert_series_equal(left != pd.NaT, expected) - assert_series_equal(pd.NaT != left, expected) - - expected = Series([False, False, False]) - assert_series_equal(left < pd.NaT, expected) - assert_series_equal(pd.NaT > left, expected) - assert_series_equal(left <= pd.NaT, expected) - assert_series_equal(pd.NaT >= left, expected) - - assert_series_equal(left > pd.NaT, expected) - assert_series_equal(pd.NaT < left, expected) - assert_series_equal(left >= pd.NaT, expected) - assert_series_equal(pd.NaT <= left, expected) - - def test_comparison_different_length(self): - a = Series(['a', 'b', 'c']) - b = Series(['b', 'a']) - pytest.raises(ValueError, a.__lt__, b) - - a = Series([1, 2]) - b = Series([2, 3, 4]) - pytest.raises(ValueError, a.__eq__, b) - - def test_comparison_label_based(self): - - # GH 4947 - # comparisons should be label based - - a = Series([True, False, True], list('bca')) - b = Series([False, True, False], list('abc')) - - expected = Series([False, True, False], list('abc')) - result = a & b - assert_series_equal(result, expected) - - expected = Series([True, True, False], list('abc')) - result = a | b - assert_series_equal(result, expected) - - expected = Series([True, False, False], list('abc')) - result = a ^ b - assert_series_equal(result, expected) - - # rhs is bigger - a = Series([True, False, True], list('bca')) - b = Series([False, True, False, True], list('abcd')) - - expected = Series([False, True, False, False], list('abcd')) - result = a & b - assert_series_equal(result, expected) - - expected = Series([True, True, False, False], list('abcd')) - result = a | b - assert_series_equal(result, expected) - - # filling - - # vs empty - result = a & Series([]) - expected = Series([False, False, False], list('bca')) - assert_series_equal(result, expected) - - result = a | Series([]) - expected = Series([True, False, True], list('bca')) - assert_series_equal(result, expected) - - # vs non-matching - result = a & Series([1], ['z']) - expected = Series([False, False, False, False], list('abcz')) - assert_series_equal(result, expected) - - result = a | Series([1], ['z']) - expected = Series([True, True, False, False], list('abcz')) - assert_series_equal(result, expected) - - # identity - # we would like s[s|e] == s to hold for any e, whether empty or not - for e in [Series([]), Series([1], ['z']), - Series(np.nan, b.index), Series(np.nan, a.index)]: - result = a[a | e] - assert_series_equal(result, a[a]) - - for e in [Series(['z'])]: - if compat.PY3: - with tm.assert_produces_warning(RuntimeWarning): - result = a[a | e] - else: - result = a[a | e] - assert_series_equal(result, a[a]) - - # vs scalars - index = list('bca') - t = Series([True, False, True]) - - for v in [True, 1, 2]: - result = Series([True, False, True], index=index) | v - expected = Series([True, True, True], index=index) - assert_series_equal(result, expected) - - for v in [np.nan, 'foo']: - pytest.raises(TypeError, lambda: t | v) - - for v in [False, 0]: - result = Series([True, False, True], index=index) | v - expected = Series([True, False, True], index=index) - assert_series_equal(result, expected) - - for v in [True, 1]: - result = Series([True, False, True], index=index) & v - expected = Series([True, False, True], index=index) - assert_series_equal(result, expected) - - for v in [False, 0]: - result = Series([True, False, True], index=index) & v - expected = Series([False, False, False], index=index) - assert_series_equal(result, expected) - for v in [np.nan]: - pytest.raises(TypeError, lambda: t & v) - - def test_comparison_flex_basic(self): - left = pd.Series(np.random.randn(10)) - right = pd.Series(np.random.randn(10)) - - assert_series_equal(left.eq(right), left == right) - assert_series_equal(left.ne(right), left != right) - assert_series_equal(left.le(right), left < right) - assert_series_equal(left.lt(right), left <= right) - assert_series_equal(left.gt(right), left > right) - assert_series_equal(left.ge(right), left >= right) - - # axis - for axis in [0, None, 'index']: - assert_series_equal(left.eq(right, axis=axis), left == right) - assert_series_equal(left.ne(right, axis=axis), left != right) - assert_series_equal(left.le(right, axis=axis), left < right) - assert_series_equal(left.lt(right, axis=axis), left <= right) - assert_series_equal(left.gt(right, axis=axis), left > right) - assert_series_equal(left.ge(right, axis=axis), left >= right) - - # - msg = 'No axis named 1 for object type' - for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']: - with tm.assert_raises_regex(ValueError, msg): - getattr(left, op)(right, axis=1) - - def test_comparison_flex_alignment(self): - left = Series([1, 3, 2], index=list('abc')) - right = Series([2, 2, 2], index=list('bcd')) - - exp = pd.Series([False, False, True, False], index=list('abcd')) - assert_series_equal(left.eq(right), exp) - - exp = pd.Series([True, True, False, True], index=list('abcd')) - assert_series_equal(left.ne(right), exp) - - exp = pd.Series([False, False, True, False], index=list('abcd')) - assert_series_equal(left.le(right), exp) - - exp = pd.Series([False, False, False, False], index=list('abcd')) - assert_series_equal(left.lt(right), exp) - - exp = pd.Series([False, True, True, False], index=list('abcd')) - assert_series_equal(left.ge(right), exp) - - exp = pd.Series([False, True, False, False], index=list('abcd')) - assert_series_equal(left.gt(right), exp) - - def test_comparison_flex_alignment_fill(self): - left = Series([1, 3, 2], index=list('abc')) - right = Series([2, 2, 2], index=list('bcd')) - - exp = pd.Series([False, False, True, True], index=list('abcd')) - assert_series_equal(left.eq(right, fill_value=2), exp) - - exp = pd.Series([True, True, False, False], index=list('abcd')) - assert_series_equal(left.ne(right, fill_value=2), exp) - - exp = pd.Series([False, False, True, True], index=list('abcd')) - assert_series_equal(left.le(right, fill_value=0), exp) - - exp = pd.Series([False, False, False, True], index=list('abcd')) - assert_series_equal(left.lt(right, fill_value=0), exp) - - exp = pd.Series([True, True, True, False], index=list('abcd')) - assert_series_equal(left.ge(right, fill_value=0), exp) - - exp = pd.Series([True, True, False, False], index=list('abcd')) - assert_series_equal(left.gt(right, fill_value=0), exp) - - def test_ne(self): - ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float) - expected = [True, True, False, True, True] - assert tm.equalContents(ts.index != 5, expected) - assert tm.equalContents(~(ts.index == 5), expected) - - def test_comp_ops_df_compat(self): - # GH 1134 - s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x') - s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x') - - s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x') - s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x') - - for left, right in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]: - - msg = "Can only compare identically-labeled Series objects" - with tm.assert_raises_regex(ValueError, msg): - left == right - - with tm.assert_raises_regex(ValueError, msg): - left != right - - with tm.assert_raises_regex(ValueError, msg): - left < right - - msg = "Can only compare identically-labeled DataFrame objects" - with tm.assert_raises_regex(ValueError, msg): - left.to_frame() == right.to_frame() - - with tm.assert_raises_regex(ValueError, msg): - left.to_frame() != right.to_frame() - - with tm.assert_raises_regex(ValueError, msg): - left.to_frame() < right.to_frame() - - -class TestSeriesArithmetic(object): - def test_divide_decimal(self): - """ resolves issue #9787 """ - from decimal import Decimal - - expected = Series([Decimal(5)]) - - s = Series([Decimal(10)]) - s = s / Decimal(2) - - assert_series_equal(expected, s) - - s = Series([Decimal(10)]) - s = s // Decimal(2) - - assert_series_equal(expected, s) - - def test_div(self): - with np.errstate(all='ignore'): - # no longer do integer div for any ops, but deal with the 0's - p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]}) - result = p['first'] / p['second'] - expected = Series( - p['first'].values.astype(float) / p['second'].values, - dtype='float64') - expected.iloc[0:3] = np.inf - assert_series_equal(result, expected) - - result = p['first'] / 0 - expected = Series(np.inf, index=p.index, name='first') - assert_series_equal(result, expected) - - p = p.astype('float64') - result = p['first'] / p['second'] - expected = Series(p['first'].values / p['second'].values) - assert_series_equal(result, expected) - - p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]}) - result = p['first'] / p['second'] - assert_series_equal(result, p['first'].astype('float64'), - check_names=False) - assert result.name is None - assert not result.equals(p['second'] / p['first']) - - # inf signing - s = Series([np.nan, 1., -1.]) - result = s / 0 - expected = Series([np.nan, np.inf, -np.inf]) - assert_series_equal(result, expected) - - # float/integer issue - # GH 7785 - p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)}) - expected = Series([-0.01, -np.inf]) - - result = p['second'].div(p['first']) - assert_series_equal(result, expected, check_names=False) - - result = p['second'] / p['first'] - assert_series_equal(result, expected) - - # GH 9144 - s = Series([-1, 0, 1]) - - result = 0 / s - expected = Series([0.0, nan, 0.0]) - assert_series_equal(result, expected) - - result = s / 0 - expected = Series([-inf, nan, inf]) - assert_series_equal(result, expected) - - result = s // 0 - expected = Series([-inf, nan, inf]) - assert_series_equal(result, expected) - - # GH 8674 - zero_array = np.array([0] * 5) - data = np.random.randn(5) - expected = pd.Series([0.] * 5) - result = zero_array / pd.Series(data) - assert_series_equal(result, expected) - - result = pd.Series(zero_array) / data - assert_series_equal(result, expected) - - result = pd.Series(zero_array) / pd.Series(data) - assert_series_equal(result, expected) - - -class TestTimedeltaSeriesArithmeticWithIntegers(object): - # Tests for Series with dtype 'timedelta64[ns]' arithmetic operations - # with integer and int-like others - - # ------------------------------------------------------------------ - # Addition and Subtraction - - def test_td64series_add_int_series_invalid(self, tdser): - with pytest.raises(TypeError): - tdser + Series([2, 3, 4]) - - @pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds') - def test_td64series_radd_int_series_invalid(self, tdser): - with pytest.raises(TypeError): - Series([2, 3, 4]) + tdser - - def test_td64series_sub_int_series_invalid(self, tdser): - with pytest.raises(TypeError): - tdser - Series([2, 3, 4]) - - @pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds') - def test_td64series_rsub_int_series_invalid(self, tdser): - with pytest.raises(TypeError): - Series([2, 3, 4]) - tdser - - @pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)]) - def test_td64series_add_sub_numeric_scalar_invalid(self, scalar, tdser): - with pytest.raises(TypeError): - tdser + scalar - with pytest.raises(TypeError): - scalar + tdser - with pytest.raises(TypeError): - tdser - scalar - with pytest.raises(TypeError): - scalar - tdser - - @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', - 'uint64', 'uint32', 'uint16', 'uint8', - 'float64', 'float32', 'float16']) - @pytest.mark.parametrize('vector', [ - np.array([1, 2, 3]), - pd.Index([1, 2, 3]), - pytest.param(Series([1, 2, 3]), - marks=pytest.mark.xfail(reason='GH#19123 integer ' - 'interpreted as nanos')) - ]) - def test_td64series_add_sub_numeric_array_invalid(self, vector, - dtype, tdser): - vector = vector.astype(dtype) - with pytest.raises(TypeError): - tdser + vector - with pytest.raises(TypeError): - vector + tdser - with pytest.raises(TypeError): - tdser - vector - with pytest.raises(TypeError): - vector - tdser - - # ------------------------------------------------------------------ - # Multiplicaton and Division - - @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', - 'uint64', 'uint32', 'uint16', 'uint8', - 'float64', 'float32', 'float16']) - @pytest.mark.parametrize('vector', [np.array([20, 30, 40]), - pd.Index([20, 30, 40]), - Series([20, 30, 40])]) - def test_td64series_div_numeric_array(self, vector, dtype, tdser): - # GH 4521 - # divide/multiply by integers - vector = vector.astype(dtype) - expected = Series(['2.95D', '1D 23H 12m', 'NaT'], - dtype='timedelta64[ns]') - - result = tdser / vector - assert_series_equal(result, expected) - - with pytest.raises(TypeError): - vector / tdser - - @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', - 'uint64', 'uint32', 'uint16', 'uint8', - 'float64', 'float32', 'float16']) - @pytest.mark.parametrize('vector', [np.array([20, 30, 40]), - pd.Index([20, 30, 40]), - Series([20, 30, 40])]) - def test_td64series_mul_numeric_array(self, vector, dtype, tdser): - # GH 4521 - # divide/multiply by integers - vector = vector.astype(dtype) - - expected = Series(['1180 Days', '1770 Days', 'NaT'], - dtype='timedelta64[ns]') - - result = tdser * vector - assert_series_equal(result, expected) - - @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', - 'uint64', 'uint32', 'uint16', 'uint8', - 'float64', 'float32', 'float16']) - @pytest.mark.parametrize('vector', [ - np.array([20, 30, 40]), - pytest.param(pd.Index([20, 30, 40]), - marks=pytest.mark.xfail(reason='__mul__ raises ' - 'instead of returning ' - 'NotImplemented')), - Series([20, 30, 40]) - ]) - def test_td64series_rmul_numeric_array(self, vector, dtype, tdser): - # GH 4521 - # divide/multiply by integers - vector = vector.astype(dtype) - - expected = Series(['1180 Days', '1770 Days', 'NaT'], - dtype='timedelta64[ns]') - - result = vector * tdser - assert_series_equal(result, expected) - - @pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)]) - def test_td64series_mul_numeric_scalar(self, one, tdser): - # GH 4521 - # divide/multiply by integers - expected = Series(['-59 Days', '-59 Days', 'NaT'], - dtype='timedelta64[ns]') - - result = tdser * (-one) - assert_series_equal(result, expected) - result = (-one) * tdser - assert_series_equal(result, expected) - - expected = Series(['118 Days', '118 Days', 'NaT'], - dtype='timedelta64[ns]') - - result = tdser * (2 * one) - assert_series_equal(result, expected) - result = (2 * one) * tdser - assert_series_equal(result, expected) - - @pytest.mark.parametrize('two', [ - 2, 2.0, - pytest.param(np.array(2), - marks=pytest.mark.xfail(reason='GH#19011 is_list_like ' - 'incorrectly True.')), - pytest.param(np.array(2.0), - marks=pytest.mark.xfail(reason='GH#19011 is_list_like ' - 'incorrectly True.')), - ]) - def test_td64series_div_numeric_scalar(self, two, tdser): - # GH 4521 - # divide/multiply by integers - expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]') - - result = tdser / two - assert_series_equal(result, expected) - - -class TestTimedeltaSeriesArithmetic(object): - def test_td64series_add_sub_timestamp(self): - # GH11925 - tdser = Series(timedelta_range('1 day', periods=3)) - ts = Timestamp('2012-01-01') - expected = Series(date_range('2012-01-02', periods=3)) - assert_series_equal(ts + tdser, expected) - assert_series_equal(tdser + ts, expected) - - expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D')) - assert_series_equal(ts - tdser, expected2) - assert_series_equal(ts + (-tdser), expected2) - - with pytest.raises(TypeError): - tdser - ts - - def test_timedelta64_operations_with_DateOffset(self): - # GH 10699 - td = Series([timedelta(minutes=5, seconds=3)] * 3) - result = td + pd.offsets.Minute(1) - expected = Series([timedelta(minutes=6, seconds=3)] * 3) - assert_series_equal(result, expected) - - result = td - pd.offsets.Minute(1) - expected = Series([timedelta(minutes=4, seconds=3)] * 3) - assert_series_equal(result, expected) - - result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3), - pd.offsets.Hour(2)]) - expected = Series([timedelta(minutes=6, seconds=3), timedelta( - minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)]) - assert_series_equal(result, expected) - - result = td + pd.offsets.Minute(1) + pd.offsets.Second(12) - expected = Series([timedelta(minutes=6, seconds=15)] * 3) - assert_series_equal(result, expected) - - # valid DateOffsets - for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli', - 'Nano']: - op = getattr(pd.offsets, do) - td + op(5) - op(5) + td - td - op(5) - op(5) - td - - def test_timedelta64_operations_with_timedeltas(self): - # td operate with td - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td2 = timedelta(minutes=5, seconds=4) - result = td1 - td2 - expected = (Series([timedelta(seconds=0)] * 3) - - Series([timedelta(seconds=1)] * 3)) - assert result.dtype == 'm8[ns]' - assert_series_equal(result, expected) - - result2 = td2 - td1 - expected = (Series([timedelta(seconds=1)] * 3) - - Series([timedelta(seconds=0)] * 3)) - assert_series_equal(result2, expected) - - # roundtrip - assert_series_equal(result + td2, td1) - - # Now again, using pd.to_timedelta, which should build - # a Series or a scalar, depending on input. - td1 = Series(pd.to_timedelta(['00:05:03'] * 3)) - td2 = pd.to_timedelta('00:05:04') - result = td1 - td2 - expected = (Series([timedelta(seconds=0)] * 3) - - Series([timedelta(seconds=1)] * 3)) - assert result.dtype == 'm8[ns]' - assert_series_equal(result, expected) - - result2 = td2 - td1 - expected = (Series([timedelta(seconds=1)] * 3) - - Series([timedelta(seconds=0)] * 3)) - assert_series_equal(result2, expected) - - # roundtrip - assert_series_equal(result + td2, td1) - - def test_operators_timedelta64(self): - # series ops - v1 = date_range('2012-1-1', periods=3, freq='D') - v2 = date_range('2012-1-2', periods=3, freq='D') - rs = Series(v2) - Series(v1) - xp = Series(1e9 * 3600 * 24, - rs.index).astype('int64').astype('timedelta64[ns]') - assert_series_equal(rs, xp) - assert rs.dtype == 'timedelta64[ns]' - - df = DataFrame(dict(A=v1)) - td = Series([timedelta(days=i) for i in range(3)]) - assert td.dtype == 'timedelta64[ns]' - - # series on the rhs - result = df['A'] - df['A'].shift() - assert result.dtype == 'timedelta64[ns]' - - result = df['A'] + td - assert result.dtype == 'M8[ns]' - - # scalar Timestamp on rhs - maxa = df['A'].max() - assert isinstance(maxa, Timestamp) - - resultb = df['A'] - df['A'].max() - assert resultb.dtype == 'timedelta64[ns]' - - # timestamp on lhs - result = resultb + df['A'] - values = [Timestamp('20111230'), Timestamp('20120101'), - Timestamp('20120103')] - expected = Series(values, name='A') - assert_series_equal(result, expected) - - # datetimes on rhs - result = df['A'] - datetime(2001, 1, 1) - expected = Series( - [timedelta(days=4017 + i) for i in range(3)], name='A') - assert_series_equal(result, expected) - assert result.dtype == 'm8[ns]' - - d = datetime(2001, 1, 1, 3, 4) - resulta = df['A'] - d - assert resulta.dtype == 'm8[ns]' - - # roundtrip - resultb = resulta + d - assert_series_equal(df['A'], resultb) - - # timedeltas on rhs - td = timedelta(days=1) - resulta = df['A'] + td - resultb = resulta - td - assert_series_equal(resultb, df['A']) - assert resultb.dtype == 'M8[ns]' - - # roundtrip - td = timedelta(minutes=5, seconds=3) - resulta = df['A'] + td - resultb = resulta - td - assert_series_equal(df['A'], resultb) - assert resultb.dtype == 'M8[ns]' - - # inplace - value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1)) - rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1)) - assert rs[2] == value - - def test_timedelta64_ops_nat(self): - # GH 11349 - timedelta_series = Series([NaT, Timedelta('1s')]) - nat_series_dtype_timedelta = Series([NaT, NaT], - dtype='timedelta64[ns]') - single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]') - - # subtraction - assert_series_equal(timedelta_series - NaT, - nat_series_dtype_timedelta) - assert_series_equal(-NaT + timedelta_series, - nat_series_dtype_timedelta) - - assert_series_equal(timedelta_series - single_nat_dtype_timedelta, - nat_series_dtype_timedelta) - assert_series_equal(-single_nat_dtype_timedelta + timedelta_series, - nat_series_dtype_timedelta) - - # addition - assert_series_equal(nat_series_dtype_timedelta + NaT, - nat_series_dtype_timedelta) - assert_series_equal(NaT + nat_series_dtype_timedelta, - nat_series_dtype_timedelta) - - assert_series_equal(nat_series_dtype_timedelta + - single_nat_dtype_timedelta, - nat_series_dtype_timedelta) - assert_series_equal(single_nat_dtype_timedelta + - nat_series_dtype_timedelta, - nat_series_dtype_timedelta) - - assert_series_equal(timedelta_series + NaT, - nat_series_dtype_timedelta) - assert_series_equal(NaT + timedelta_series, - nat_series_dtype_timedelta) - - assert_series_equal(timedelta_series + single_nat_dtype_timedelta, - nat_series_dtype_timedelta) - assert_series_equal(single_nat_dtype_timedelta + timedelta_series, - nat_series_dtype_timedelta) - - assert_series_equal(nat_series_dtype_timedelta + NaT, - nat_series_dtype_timedelta) - assert_series_equal(NaT + nat_series_dtype_timedelta, - nat_series_dtype_timedelta) - - assert_series_equal(nat_series_dtype_timedelta + - single_nat_dtype_timedelta, - nat_series_dtype_timedelta) - assert_series_equal(single_nat_dtype_timedelta + - nat_series_dtype_timedelta, - nat_series_dtype_timedelta) - - # multiplication - assert_series_equal(nat_series_dtype_timedelta * 1.0, - nat_series_dtype_timedelta) - assert_series_equal(1.0 * nat_series_dtype_timedelta, - nat_series_dtype_timedelta) - - assert_series_equal(timedelta_series * 1, timedelta_series) - assert_series_equal(1 * timedelta_series, timedelta_series) - - assert_series_equal(timedelta_series * 1.5, - Series([NaT, Timedelta('1.5s')])) - assert_series_equal(1.5 * timedelta_series, - Series([NaT, Timedelta('1.5s')])) - - assert_series_equal(timedelta_series * nan, - nat_series_dtype_timedelta) - assert_series_equal(nan * timedelta_series, - nat_series_dtype_timedelta) - - # division - assert_series_equal(timedelta_series / 2, - Series([NaT, Timedelta('0.5s')])) - assert_series_equal(timedelta_series / 2.0, - Series([NaT, Timedelta('0.5s')])) - assert_series_equal(timedelta_series / nan, - nat_series_dtype_timedelta) - - def test_td64_sub_NaT(self): - # GH#18808 - ser = Series([NaT, Timedelta('1s')]) - res = ser - NaT - expected = Series([NaT, NaT], dtype='timedelta64[ns]') - tm.assert_series_equal(res, expected) - - @pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4), - Timedelta(minutes=5, seconds=4), - Timedelta('5m4s').to_timedelta64()]) - def test_operators_timedelta64_with_timedelta(self, scalar_td): - # smoke tests - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - - td1 + scalar_td - scalar_td + td1 - td1 - scalar_td - scalar_td - td1 - td1 / scalar_td - scalar_td / td1 - - @pytest.mark.parametrize('scalar_td', [ - timedelta(minutes=5, seconds=4), - Timedelta('5m4s'), - Timedelta('5m4s').to_timedelta64()]) - def test_operators_timedelta64_with_timedelta_invalid(self, scalar_td): - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - - # check that we are getting a TypeError - # with 'operate' (from core/ops.py) for the ops that are not - # defined - pattern = 'operate|unsupported|cannot' - with tm.assert_raises_regex(TypeError, pattern): - td1 * scalar_td - with tm.assert_raises_regex(TypeError, pattern): - scalar_td * td1 - with tm.assert_raises_regex(TypeError, pattern): - scalar_td ** td1 - with tm.assert_raises_regex(TypeError, pattern): - td1 ** scalar_td - - @pytest.mark.parametrize('scalar_td', [ - timedelta(minutes=5, seconds=4), - Timedelta('5m4s'), - Timedelta('5m4s').to_timedelta64()]) - def test_timedelta_rfloordiv(self, scalar_td): - # GH#18831 - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - result = scalar_td // td1 - expected = Series([1, 1, np.nan]) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize('scalar_td', [ - timedelta(minutes=5, seconds=4), - Timedelta('5m4s'), - Timedelta('5m4s').to_timedelta64()]) - def test_timedelta_rfloordiv_explicit(self, scalar_td): - # GH#18831 - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - - # We can test __rfloordiv__ using this syntax, - # see `test_timedelta_rfloordiv` - result = td1.__rfloordiv__(scalar_td) - expected = Series([1, 1, np.nan]) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize('scalar_td', [ - timedelta(minutes=5, seconds=4), - Timedelta('5m4s'), - Timedelta('5m4s').to_timedelta64()]) - def test_timedelta_floordiv(self, scalar_td): - # GH#18831 - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - - result = td1 // scalar_td - expected = Series([0, 0, np.nan]) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize('names', [(None, None, None), - ('Egon', 'Venkman', None), - ('NCC1701D', 'NCC1701D', 'NCC1701D')]) - def test_td64_series_with_tdi(self, names): - # GH#17250 make sure result dtype is correct - # GH#19043 make sure names are propogated correctly - tdi = pd.TimedeltaIndex(['0 days', '1 day'], name=names[0]) - ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1]) - expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)], - name=names[2]) - - result = tdi + ser - tm.assert_series_equal(result, expected) - assert result.dtype == 'timedelta64[ns]' - - result = ser + tdi - tm.assert_series_equal(result, expected) - assert result.dtype == 'timedelta64[ns]' - - expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)], - name=names[2]) - - result = tdi - ser - tm.assert_series_equal(result, expected) - assert result.dtype == 'timedelta64[ns]' - - result = ser - tdi - tm.assert_series_equal(result, -expected) - assert result.dtype == 'timedelta64[ns]' - - @pytest.mark.parametrize('names', [(None, None, None), - ('Egon', 'Venkman', None), - ('NCC1701D', 'NCC1701D', 'NCC1701D')]) - def test_tdi_mul_int_series(self, names): - # GH#19042 - tdi = pd.TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'], - name=names[0]) - ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1]) - - expected = Series(['0days', '1day', '4days', '9days', '16days'], - dtype='timedelta64[ns]', - name=names[2]) - - result = ser * tdi - tm.assert_series_equal(result, expected) - - # The direct operation tdi * ser still needs to be fixed. - result = ser.__rmul__(tdi) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize('names', [(None, None, None), - ('Egon', 'Venkman', None), - ('NCC1701D', 'NCC1701D', 'NCC1701D')]) - def test_float_series_rdiv_tdi(self, names): - # GH#19042 - # TODO: the direct operation TimedeltaIndex / Series still - # needs to be fixed. - tdi = pd.TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'], - name=names[0]) - ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1]) - - expected = Series([tdi[n] / ser[n] for n in range(len(ser))], - dtype='timedelta64[ns]', - name=names[2]) - - result = ser.__rdiv__(tdi) - tm.assert_series_equal(result, expected) - - -class TestDatetimeSeriesArithmetic(object): - @pytest.mark.parametrize( - 'box, assert_func', - [(Series, tm.assert_series_equal), - (pd.Index, tm.assert_index_equal)]) - def test_sub_datetime64_not_ns(self, box, assert_func): - # GH#7996 - dt64 = np.datetime64('2013-01-01') - assert dt64.dtype == 'datetime64[D]' - - obj = box(date_range('20130101', periods=3)) - res = obj - dt64 - expected = box([Timedelta(days=0), Timedelta(days=1), - Timedelta(days=2)]) - assert_func(res, expected) - - res = dt64 - obj - assert_func(res, -expected) - - def test_operators_datetimelike(self): - def run_ops(ops, get_ser, test_ser): - - # check that we are getting a TypeError - # with 'operate' (from core/ops.py) for the ops that are not - # defined - for op_str in ops: - op = getattr(get_ser, op_str, None) - with tm.assert_raises_regex(TypeError, 'operate|cannot'): - op(test_ser) - - # ## timedelta64 ### - td1 = Series([timedelta(minutes=5, seconds=3)] * 3) - td1.iloc[2] = np.nan - - # ## datetime64 ### - dt1 = Series([Timestamp('20111230'), Timestamp('20120101'), - Timestamp('20120103')]) - dt1.iloc[2] = np.nan - dt2 = Series([Timestamp('20111231'), Timestamp('20120102'), - Timestamp('20120104')]) - ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__', - '__pow__', '__radd__', '__rmul__', '__rfloordiv__', - '__rtruediv__', '__rdiv__', '__rpow__'] - run_ops(ops, dt1, dt2) - dt1 - dt2 - dt2 - dt1 - - # ## datetime64 with timetimedelta ### - ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', - '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', - '__rpow__'] - run_ops(ops, dt1, td1) - dt1 + td1 - td1 + dt1 - dt1 - td1 - # TODO: Decide if this ought to work. - # td1 - dt1 - - # ## timetimedelta with datetime64 ### - ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__', - '__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__', - '__rdiv__', '__rpow__'] - run_ops(ops, td1, dt1) - td1 + dt1 - dt1 + td1 - - # 8260, 10763 - # datetime64 with tz - ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__', - '__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__', - '__rpow__'] - - tz = 'US/Eastern' - dt1 = Series(date_range('2000-01-01 09:00:00', periods=5, - tz=tz), name='foo') - dt2 = dt1.copy() - dt2.iloc[2] = np.nan - td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H')) - td2 = td1.copy() - td2.iloc[1] = np.nan - run_ops(ops, dt1, td1) - - result = dt1 + td1[0] - exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) - assert_series_equal(result, exp) - - result = dt2 + td2[0] - exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz) - assert_series_equal(result, exp) - - # odd numpy behavior with scalar timedeltas - result = td1[0] + dt1 - exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) - assert_series_equal(result, exp) - - result = td2[0] + dt2 - exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz) - assert_series_equal(result, exp) - - result = dt1 - td1[0] - exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz) - assert_series_equal(result, exp) - pytest.raises(TypeError, lambda: td1[0] - dt1) - - result = dt2 - td2[0] - exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz) - assert_series_equal(result, exp) - pytest.raises(TypeError, lambda: td2[0] - dt2) - - result = dt1 + td1 - exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz) - assert_series_equal(result, exp) - - result = dt2 + td2 - exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz) - assert_series_equal(result, exp) - - result = dt1 - td1 - exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz) - assert_series_equal(result, exp) - - result = dt2 - td2 - exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz) - assert_series_equal(result, exp) - - pytest.raises(TypeError, lambda: td1 - dt1) - pytest.raises(TypeError, lambda: td2 - dt2) - - def test_sub_single_tz(self): - # GH12290 - s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')]) - s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')]) - result = s1 - s2 - expected = Series([Timedelta('2days')]) - assert_series_equal(result, expected) - result = s2 - s1 - expected = Series([Timedelta('-2days')]) - assert_series_equal(result, expected) - - def test_dt64tz_series_sub_dtitz(self): - # GH#19071 subtracting tzaware DatetimeIndex from tzaware Series - # (with same tz) raises, fixed by #19024 - dti = pd.date_range('1999-09-30', periods=10, tz='US/Pacific') - ser = pd.Series(dti) - expected = pd.Series(pd.TimedeltaIndex(['0days'] * 10)) - - res = dti - ser - tm.assert_series_equal(res, expected) - res = ser - dti - tm.assert_series_equal(res, expected) - - def test_sub_datetime_compat(self): - # see gh-14088 - s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT]) - dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc) - exp = Series([Timedelta('1 days'), pd.NaT]) - assert_series_equal(s - dt, exp) - assert_series_equal(s - Timestamp(dt), exp) - - def test_dt64_series_with_timedelta(self): - # scalar timedeltas/np.timedelta64 objects - # operate with np.timedelta64 correctly - s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) - - result = s + np.timedelta64(1, 's') - result2 = np.timedelta64(1, 's') + s - expected = Series([Timestamp('20130101 9:01:01'), - Timestamp('20130101 9:02:01')]) - assert_series_equal(result, expected) - assert_series_equal(result2, expected) - - result = s + np.timedelta64(5, 'ms') - result2 = np.timedelta64(5, 'ms') + s - expected = Series([Timestamp('20130101 9:01:00.005'), - Timestamp('20130101 9:02:00.005')]) - assert_series_equal(result, expected) - assert_series_equal(result2, expected) - - def test_dt64_series_add_tick_DateOffset(self): - # GH 4532 - # operate with pd.offsets - ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) - expected = Series([Timestamp('20130101 9:01:05'), - Timestamp('20130101 9:02:05')]) - - result = ser + pd.offsets.Second(5) - assert_series_equal(result, expected) - - result2 = pd.offsets.Second(5) + ser - assert_series_equal(result2, expected) - - def test_dt64_series_sub_tick_DateOffset(self): - # GH 4532 - # operate with pd.offsets - ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) - expected = Series([Timestamp('20130101 9:00:55'), - Timestamp('20130101 9:01:55')]) - - result = ser - pd.offsets.Second(5) - assert_series_equal(result, expected) - - result2 = -pd.offsets.Second(5) + ser - assert_series_equal(result2, expected) - - with pytest.raises(TypeError): - pd.offsets.Second(5) - ser - - @pytest.mark.parametrize('cls_name', ['Day', 'Hour', 'Minute', 'Second', - 'Milli', 'Micro', 'Nano']) - def test_dt64_series_with_tick_DateOffset_smoke(self, cls_name): - # GH 4532 - # smoke tests for valid DateOffsets - ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) - - offset_cls = getattr(pd.offsets, cls_name) - ser + offset_cls(5) - offset_cls(5) + ser - - def test_dt64_series_add_mixed_tick_DateOffset(self): - # GH 4532 - # operate with pd.offsets - s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) - - result = s + pd.offsets.Milli(5) - result2 = pd.offsets.Milli(5) + s - expected = Series([Timestamp('20130101 9:01:00.005'), - Timestamp('20130101 9:02:00.005')]) - assert_series_equal(result, expected) - assert_series_equal(result2, expected) - - result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5) - expected = Series([Timestamp('20130101 9:06:00.005'), - Timestamp('20130101 9:07:00.005')]) - assert_series_equal(result, expected) - - def test_dt64_series_sub_NaT(self): - # GH#18808 - dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp('19900315')]) - ser = pd.Series(dti) - res = ser - pd.NaT - expected = pd.Series([pd.NaT, pd.NaT], dtype='timedelta64[ns]') - tm.assert_series_equal(res, expected) - - dti_tz = dti.tz_localize('Asia/Tokyo') - ser_tz = pd.Series(dti_tz) - res = ser_tz - pd.NaT - expected = pd.Series([pd.NaT, pd.NaT], dtype='timedelta64[ns]') - tm.assert_series_equal(res, expected) - - def test_datetime64_ops_nat(self): - # GH 11349 - datetime_series = Series([NaT, Timestamp('19900315')]) - nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]') - single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]') - - # subtraction - assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp) - with pytest.raises(TypeError): - -single_nat_dtype_datetime + datetime_series - - assert_series_equal(-NaT + nat_series_dtype_timestamp, - nat_series_dtype_timestamp) - with pytest.raises(TypeError): - -single_nat_dtype_datetime + nat_series_dtype_timestamp - - # addition - assert_series_equal(nat_series_dtype_timestamp + NaT, - nat_series_dtype_timestamp) - assert_series_equal(NaT + nat_series_dtype_timestamp, - nat_series_dtype_timestamp) - - assert_series_equal(nat_series_dtype_timestamp + NaT, - nat_series_dtype_timestamp) - assert_series_equal(NaT + nat_series_dtype_timestamp, - nat_series_dtype_timestamp) - - @pytest.mark.parametrize('dt64_series', [ - Series([Timestamp('19900315'), Timestamp('19900315')]), - Series([NaT, Timestamp('19900315')]), - Series([NaT, NaT], dtype='datetime64[ns]')]) - @pytest.mark.parametrize('one', [1, 1.0, np.array(1)]) - def test_dt64_mul_div_numeric_invalid(self, one, dt64_series): - # multiplication - with pytest.raises(TypeError): - dt64_series * one - with pytest.raises(TypeError): - one * dt64_series - - # division - with pytest.raises(TypeError): - dt64_series / one - with pytest.raises(TypeError): - one / dt64_series - - def test_dt64_series_arith_overflow(self): - # GH#12534, fixed by #19024 - dt = pd.Timestamp('1700-01-31') - td = pd.Timedelta('20000 Days') - dti = pd.date_range('1949-09-30', freq='100Y', periods=4) - ser = pd.Series(dti) - with pytest.raises(OverflowError): - ser - dt - with pytest.raises(OverflowError): - dt - ser - with pytest.raises(OverflowError): - ser + td - with pytest.raises(OverflowError): - td + ser - - ser.iloc[-1] = pd.NaT - expected = pd.Series(['2004-10-03', '2104-10-04', '2204-10-04', 'NaT'], - dtype='datetime64[ns]') - res = ser + td - tm.assert_series_equal(res, expected) - res = td + ser - tm.assert_series_equal(res, expected) - - ser.iloc[1:] = pd.NaT - expected = pd.Series(['91279 Days', 'NaT', 'NaT', 'NaT'], - dtype='timedelta64[ns]') - res = ser - dt - tm.assert_series_equal(res, expected) - res = dt - ser - tm.assert_series_equal(res, -expected) - - class TestSeriesOperators(TestData): def test_op_method(self): def check(series, other, check_reverse=False): @@ -1645,15 +137,6 @@ def test_operators_empty_int_corner(self): s2 = Series({'x': 0.}) assert_series_equal(s1 * s2, Series([np.nan], index=['x'])) - def test_invalid_ops(self): - # invalid ops - pytest.raises(Exception, self.objSeries.__add__, 1) - pytest.raises(Exception, self.objSeries.__add__, - np.array(1, dtype=np.int64)) - pytest.raises(Exception, self.objSeries.__sub__, 1) - pytest.raises(Exception, self.objSeries.__sub__, - np.array(1, dtype=np.int64)) - @pytest.mark.parametrize("m", [1, 3, 10]) @pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns']) def test_timedelta64_conversions(self, m, unit): @@ -2074,57 +557,6 @@ def test_series_frame_radd_bug(self): with pytest.raises(TypeError): self.ts + datetime.now() - def test_series_radd_str(self): - ser = pd.Series(['x', np.nan, 'x']) - assert_series_equal('a' + ser, pd.Series(['ax', np.nan, 'ax'])) - assert_series_equal(ser + 'a', pd.Series(['xa', np.nan, 'xa'])) - - @pytest.mark.parametrize('dtype', [None, object]) - def test_series_with_dtype_radd_timedelta(self, dtype): - ser = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'), - pd.Timedelta('3 days')], dtype=dtype) - expected = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'), - pd.Timedelta('6 days')]) - - result = pd.Timedelta('3 days') + ser - assert_series_equal(result, expected) - - result = ser + pd.Timedelta('3 days') - assert_series_equal(result, expected) - - @pytest.mark.parametrize('dtype', [None, object]) - def test_series_with_dtype_radd_int(self, dtype): - ser = pd.Series([1, 2, 3], dtype=dtype) - expected = pd.Series([2, 3, 4], dtype=dtype) - - result = 1 + ser - assert_series_equal(result, expected) - - result = ser + 1 - assert_series_equal(result, expected) - - @pytest.mark.parametrize('dtype', [None, object]) - def test_series_with_dtype_radd_nan(self, dtype): - ser = pd.Series([1, 2, 3], dtype=dtype) - expected = pd.Series([np.nan, np.nan, np.nan], dtype=dtype) - - result = np.nan + ser - assert_series_equal(result, expected) - - result = ser + np.nan - assert_series_equal(result, expected) - - @pytest.mark.parametrize('data', [ - [1, 2, 3], - [1.1, 2.2, 3.3], - [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.NaT], - ['x', 'y', 1]]) - @pytest.mark.parametrize('dtype', [None, object]) - def test_series_radd_str_invalid(self, dtype, data): - ser = Series(data, dtype=dtype) - with pytest.raises(TypeError): - 'foo_' + ser - def test_operators_frame(self): # rpow does not work with DataFrame df = DataFrame({'A': self.ts})
Separate tests.series.test_operators into smaller test files specific to arithmetic for datetime-like, tiemdelta-like, numeric, and comparisons. Cleans up some leftover TODOs for index arithmetic tests. The actual contents of tests are unchanged, just split up and moved around.
https://api.github.com/repos/pandas-dev/pandas/pulls/19255
2018-01-15T20:33:09Z
2018-01-15T20:42:37Z
null
2018-02-11T21:59:12Z
Fix Index mul/div ops with Series, closes #19080, #19042
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 744f1e14533e7..7c54d44d9f25b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -423,6 +423,9 @@ Conversion - Bug in :class:`WeekOfMonth` and :class:`LastWeekOfMonth` where default keyword arguments for constructor raised ``ValueError`` (:issue:`19142`) - Bug in localization of a naive, datetime string in a ``Series`` constructor with a ``datetime64[ns, tz]`` dtype (:issue:`174151`) - :func:`Timestamp.replace` will now handle Daylight Savings transitions gracefully (:issue:`18319`) +- Bug in :class:`Index` multiplication and division methods where operating with a ``Series`` would return an ``Index`` object instead of a ``Series`` object (:issue:`19042`) + + - - - Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`) diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a5949c62ad913..d5330768059fd 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -14,7 +14,7 @@ from pandas.core.accessor import CachedAccessor from pandas.core.dtypes.generic import ( - ABCSeries, + ABCSeries, ABCDataFrame, ABCMultiIndex, ABCPeriodIndex, ABCDateOffset) @@ -4019,6 +4019,9 @@ def _add_numeric_methods_binary(cls): def _make_evaluate_binop(op, opstr, reversed=False, constructor=Index): def _evaluate_numeric_binop(self, other): + if isinstance(other, (ABCSeries, ABCDataFrame)): + return NotImplemented + other = self._validate_for_numeric_binop(other, op, opstr) # handle time-based others diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 741dca6be0630..10a923c056be2 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -8,6 +8,7 @@ is_integer, is_scalar, is_int64_dtype) +from pandas.core.dtypes.generic import ABCSeries from pandas import compat from pandas.compat import lrange, range, get_range_parameters @@ -583,6 +584,8 @@ def _make_evaluate_binop(op, opstr, reversed=False, step=False): """ def _evaluate_numeric_binop(self, other): + if isinstance(other, ABCSeries): + return NotImplemented other = self._validate_for_numeric_binop(other, op, opstr) attrs = self._get_attributes_dict() diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index dcd592345b91c..1ce8ade50c071 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -68,12 +68,12 @@ def test_numeric_compat(self): tm.assert_index_equal(result, didx) result = idx * Series(np.arange(5, dtype=arr_dtype)) - tm.assert_index_equal(result, didx) + tm.assert_series_equal(result, Series(didx)) - result = idx * Series(np.arange(5, dtype='float64') + 0.1) - expected = Float64Index(np.arange(5, dtype='float64') * - (np.arange(5, dtype='float64') + 0.1)) - tm.assert_index_equal(result, expected) + rng5 = np.arange(5, dtype='float64') + result = idx * Series(rng5 + 0.1) + expected = Series(rng5 * (rng5 + 0.1)) + tm.assert_series_equal(result, expected) # invalid pytest.raises(TypeError, @@ -95,16 +95,6 @@ def test_numeric_compat(self): for r, e in zip(result, expected): tm.assert_index_equal(r, e) - result = divmod(idx, Series(full_like(idx.values, 2))) - with np.errstate(all='ignore'): - div, mod = divmod( - idx.values, - full_like(idx.values, 2), - ) - expected = Index(div), Index(mod) - for r, e in zip(result, expected): - tm.assert_index_equal(r, e) - # test power calculations both ways, GH 14973 expected = pd.Float64Index(2.0**idx.values) result = 2.0**idx @@ -114,6 +104,18 @@ def test_numeric_compat(self): result = idx**2.0 tm.assert_index_equal(result, expected) + @pytest.mark.xfail(reason='GH#19252 Series has no __rdivmod__') + def test_divmod_series(self): + idx = self.create_index() + + result = divmod(idx, Series(full_like(idx.values, 2))) + with np.errstate(all='ignore'): + div, mod = divmod(idx.values, full_like(idx.values, 2)) + expected = Series(div), Series(mod) + + for r, e in zip(result, expected): + tm.assert_series_equal(r, e) + def test_explicit_conversions(self): # GH 8608 diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 3ec918e391860..962de91ed0581 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -152,11 +152,12 @@ def test_numeric_compat(self): tm.assert_index_equal(result, didx) result = idx * Series(np.arange(5, dtype='int64')) - tm.assert_index_equal(result, didx) + tm.assert_series_equal(result, Series(didx)) - result = idx * Series(np.arange(5, dtype='float64') + 0.1) - tm.assert_index_equal(result, self._holder(np.arange( - 5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1))) + rng5 = np.arange(5, dtype='float64') + result = idx * Series(rng5 + 0.1) + tm.assert_series_equal(result, + Series(self._holder(rng5 * (rng5 + 0.1)))) # invalid pytest.raises(TypeError, lambda: idx * idx)
- [x] closes #19080, closes #19042 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19253
2018-01-15T19:36:22Z
2018-01-16T23:59:29Z
2018-01-16T23:59:28Z
2018-01-17T03:02:30Z
Fix pd.NaT - Series
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 314a5a3f37311..672ef8be1f72a 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -416,10 +416,10 @@ Conversion - Bug in :class:`WeekOfMonth` and :class:`LastWeekOfMonth` where default keyword arguments for constructor raised ``ValueError`` (:issue:`19142`) - Bug in localization of a naive, datetime string in a ``Series`` constructor with a ``datetime64[ns, tz]`` dtype (:issue:`174151`) - :func:`Timestamp.replace` will now handle Daylight Savings transitions gracefully (:issue:`18319`) - - - +- +- - Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`) +- Bug in subtracting :class:`Series` from ``NaT`` incorrectly returning ``NaT`` (:issue:`19158`) Indexing diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 683be4c9aa3a8..39f9437f0cecf 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -156,7 +156,7 @@ cdef class _NaT(datetime): neg_other = -other return self + neg_other - elif getattr(other, '_typ', None) in ['period', + elif getattr(other, '_typ', None) in ['period', 'series', 'periodindex', 'dateoffset']: return NotImplemented diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index 73e8c783ba882..a6b217a37bd0c 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -314,8 +314,7 @@ def test_nat_arithmetic_index(): @pytest.mark.parametrize('box, assert_func', [ (TimedeltaIndex, tm.assert_index_equal), - pytest.param(Series, tm.assert_series_equal, - marks=pytest.mark.xfail(reason='NaT - Series returns NaT')) + (Series, tm.assert_series_equal) ]) def test_nat_arithmetic_td64_vector(box, assert_func): # GH#19124
Tests will be added after #19139 is merged, since largely that will involve removing an "xfail" from tests that already exist there. - [x] closes #19158 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19251
2018-01-15T19:02:06Z
2018-01-16T11:17:41Z
2018-01-16T11:17:41Z
2018-02-11T21:59:11Z
CLN: Remove unused core.internals methods
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 2482148af9308..5c3481ed6d4ff 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -303,10 +303,6 @@ def getitem_block(self, slicer, new_mgr_locs=None): def shape(self): return self.values.shape - @property - def itemsize(self): - return self.values.itemsize - @property def dtype(self): return self.values.dtype @@ -327,21 +323,6 @@ def concat_same_type(self, to_concat, placement=None): return self.make_block_same_class( values, placement=placement or slice(0, len(values), 1)) - def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, - limit=None, mask_info=None): - """ - Reindex using pre-computed indexer information - """ - if axis < 1: - raise AssertionError( - 'axis must be at least 1, got {axis}'.format(axis=axis)) - if fill_value is None: - fill_value = self.fill_value - - new_values = algos.take_nd(self.values, indexer, axis, - fill_value=fill_value, mask_info=mask_info) - return self.make_block(new_values) - def iget(self, i): return self.values[i] @@ -936,11 +917,8 @@ def putmask(self, mask, new, align=True, inplace=False, axis=0, new_values = self.values if inplace else self.values.copy() - if hasattr(new, 'reindex_axis'): - new = new.values - - if hasattr(mask, 'reindex_axis'): - mask = mask.values + new = getattr(new, 'values', new) + mask = getattr(mask, 'values', mask) # if we are passed a scalar None, convert it here if not is_list_like(new) and isna(new) and not self.is_object: @@ -1297,8 +1275,7 @@ def eval(self, func, other, errors='raise', try_cast=False, mgr=None): orig_other = other values = self.values - if hasattr(other, 'reindex_axis'): - other = other.values + other = getattr(other, 'values', other) # make sure that we can broadcast is_transposed = False @@ -1446,11 +1423,8 @@ def where(self, other, cond, align=True, errors='raise', if transpose: values = values.T - if hasattr(other, 'reindex_axis'): - other = other.values - - if hasattr(cond, 'reindex_axis'): - cond = cond.values + other = getattr(other, 'values', other) + cond = getattr(cond, 'values', cond) # If the default broadcasting would go in the wrong direction, then # explicitly reshape other instead @@ -2630,9 +2604,8 @@ def external_values(self): def get_values(self, dtype=None): # return object dtype as Timestamps with the zones if is_object_dtype(dtype): - f = lambda x: lib.Timestamp(x, tz=self.values.tz) return lib.map_infer( - self.values.ravel(), f).reshape(self.values.shape) + self.values.ravel(), self._box_func).reshape(self.values.shape) return self.values def _slice(self, slicer): @@ -2760,10 +2733,6 @@ class SparseBlock(NonConsolidatableMixIn, Block): def shape(self): return (len(self.mgr_locs), self.sp_index.length) - @property - def itemsize(self): - return self.dtype.itemsize - @property def fill_value(self): # return np.nan @@ -2887,22 +2856,6 @@ def shift(self, periods, axis=0, mgr=None): return [self.make_block_same_class(new_values, placement=self.mgr_locs)] - def reindex_axis(self, indexer, method=None, axis=1, fill_value=None, - limit=None, mask_info=None): - """ - Reindex using pre-computed indexer information - """ - if axis < 1: - raise AssertionError( - 'axis must be at least 1, got {axis}'.format(axis=axis)) - - # taking on the 0th axis always here - if fill_value is None: - fill_value = self.fill_value - return self.make_block_same_class(self.values.take(indexer), - fill_value=fill_value, - placement=self.mgr_locs) - def sparse_reindex(self, new_index): """ sparse reindex and return a new block current reindex only works for float64 dtype! """ @@ -3324,7 +3277,7 @@ def apply(self, f, axes=None, filter=None, do_integrity_check=False, aligned_args = dict((k, kwargs[k]) for k in align_keys - if hasattr(kwargs[k], 'reindex_axis')) + if hasattr(kwargs[k], 'values')) for b in self.blocks: if filter is not None: @@ -4552,10 +4505,6 @@ def asobject(self): """ return self._block.get_values(dtype=object) - @property - def itemsize(self): - return self._block.values.itemsize - @property def _can_hold_na(self): return self._block._can_hold_na
De-duplicate some bits of DatetimeBlock and DatetimeTZBlock. - [x] closes #19243 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19250
2018-01-15T18:48:20Z
2018-01-21T19:30:14Z
2018-01-21T19:30:14Z
2018-01-23T04:40:18Z
DEPR: change Panel DeprecationWarning -> FutureWarning
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 14949267fc37d..402b85ceb681a 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -225,6 +225,34 @@ If installed, we now require: | openpyxl | 2.4.0 | | +-----------------+-----------------+----------+ +.. _whatsnew_0230.api_breaking.deprecate_panel: + +Deprecate Panel +^^^^^^^^^^^^^^^ + +``Panel`` was deprecated in the 0.20.x release, showing as a ``DeprecationWarning``. Using ``Panel`` will now show a ``FutureWarning``. The recommended way to represent 3-D data are +with a ``MultiIndex`` on a ``DataFrame`` via the :meth:`~Panel.to_frame` or with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas +provides a :meth:`~Panel.to_xarray` method to automate this conversion. For more details see :ref:`Deprecate Panel <dsintro.deprecate_panel>` documentation. (:issue:`13563`, :issue:`18324`). + +.. ipython:: python + :okwarning: + + p = tm.makePanel() + p + +Convert to a MultiIndex DataFrame + +.. ipython:: python + + p.to_frame() + +Convert to an xarray DataArray + +.. ipython:: python + :okwarning: + + p.to_xarray() + Build Changes ^^^^^^^^^^^^^ @@ -290,6 +318,7 @@ Deprecations - :func:`read_excel` has deprecated the ``skip_footer`` parameter. Use ``skipfooter`` instead (:issue:`18836`) - The ``is_copy`` attribute is deprecated and will be removed in a future version (:issue:`18801`). + .. _whatsnew_0230.prior_deprecations: Removal of prior version deprecations/changes diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 26e7c192ad0af..1df69576e6ff2 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -151,7 +151,7 @@ def __init__(self, data=None, items=None, major_axis=None, minor_axis=None, "http://xarray.pydata.org/en/stable/.\n" "Pandas provides a `.to_xarray()` method to help " "automate this conversion.\n", - DeprecationWarning, stacklevel=3) + FutureWarning, stacklevel=3) self._init_data(data=data, items=items, major_axis=major_axis, minor_axis=minor_axis, copy=copy, dtype=dtype) diff --git a/pandas/tests/generic/test_label_or_level_utils.py b/pandas/tests/generic/test_label_or_level_utils.py index 1ad1b06aaefa2..8b133e654a869 100644 --- a/pandas/tests/generic/test_label_or_level_utils.py +++ b/pandas/tests/generic/test_label_or_level_utils.py @@ -46,7 +46,7 @@ def df_duplabels(df): @pytest.fixture def panel(): - with tm.assert_produces_warning(DeprecationWarning, + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): return pd.Panel()
closes #18324
https://api.github.com/repos/pandas-dev/pandas/pulls/19247
2018-01-15T13:39:04Z
2018-01-16T00:14:37Z
2018-01-16T00:14:37Z
2018-01-16T00:15:01Z
CLN: Refactor Index._validate_names()
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index f634d809560ee..c4f413cca7fab 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -785,7 +785,13 @@ def copy(self, name=None, deep=False, dtype=None, **kwargs): new_index = self._shallow_copy() names = kwargs.get('names') - names = self._validate_names(name=name, names=names, deep=deep) + + if name is None and names is None: + from copy import deepcopy + names = deepcopy(self.names) if deep else self.names + else: + names = self._validate_names(name=name, names=names) + new_index = new_index.set_names(names) if dtype: @@ -800,24 +806,21 @@ def __deepcopy__(self, memo=None): memo = {} return self.copy(deep=True) - def _validate_names(self, name=None, names=None, deep=False): + def _validate_names(self, name=None, names=None): """ - Handles the quirks of having a singular 'name' parameter for general - Index and plural 'names' parameter for MultiIndex. + Returns valid `names` considering both `names` and `name` + or raises TypeError in case of wrong arguments passed. """ - from copy import deepcopy if names is not None and name is not None: - raise TypeError("Can only provide one of `names` and `name`") - elif names is None and name is None: - return deepcopy(self.names) if deep else self.names - elif names is not None: - if not is_list_like(names): - raise TypeError("Must pass list-like as `names`.") - return names - else: - if not is_list_like(name): - return [name] - return name + raise TypeError("Can provide only one of names and name arguments") + + if names is not None and not is_list_like(names): + raise TypeError("names must be list-like") + + if name is not None: + return name if is_list_like(name) else [name] + + return names def __unicode__(self): """ diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 5739c8dfd8b53..cabe159be6715 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -427,11 +427,16 @@ def copy(self, names=None, dtype=None, levels=None, labels=None, ``deep``, but if ``deep`` is passed it will attempt to deepcopy. This could be potentially expensive on large MultiIndex objects. """ + from copy import deepcopy + name = kwargs.get('name') - names = self._validate_names(name=name, names=names, deep=deep) + + if name is None and names is None: + names = deepcopy(self.names) if deep else self.names + else: + names = self._validate_names(name=name, names=names) if deep: - from copy import deepcopy if levels is None: levels = deepcopy(self.levels) if labels is None:
- [x] closes #19171 - [x] tests passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/19246
2018-01-15T12:42:02Z
2018-01-24T10:27:33Z
null
2018-02-12T12:38:08Z
Doc: Adds example of categorical data for efficient storage and consistency across DataFrames
diff --git a/doc/source/merging.rst b/doc/source/merging.rst index 74b21c21252ec..6c8061f5cbdfd 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -152,7 +152,7 @@ functionality below. Set logic on the other axes ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When gluing together multiple DataFrames, you have a choice of how to handle +When gluing together multiple ``DataFrame``s, you have a choice of how to handle the other axes (other than the one being concatenated). This can be done in the following three ways: @@ -323,13 +323,6 @@ the name of the ``Series``. labels=['df1', 's1'], vertical=False); plt.close('all'); -.. note:: - - Since we're concatenating a ``Series`` to a ``DataFrame``, we could have - achieved the same result with :meth:`DataFrame.assign`. To concatenate an - arbitrary number of pandas objects (``DataFrame`` or ``Series``), use - ``concat``. - If unnamed ``Series`` are passed they will be numbered consecutively. .. ipython:: python @@ -583,7 +576,7 @@ and ``right`` is a subclass of DataFrame, the return type will still be ``merge`` is a function in the pandas namespace, and it is also available as a ``DataFrame`` instance method :meth:`~DataFrame.merge`, with the calling -``DataFrame`` being implicitly considered the left object in the join. +``DataFrame `` being implicitly considered the left object in the join. The related :meth:`~DataFrame.join` method, uses ``merge`` internally for the index-on-index (by default) and column(s)-on-index join. If you are joining on @@ -636,7 +629,7 @@ key combination: Here is a more complicated example with multiple join keys. Only the keys appearing in ``left`` and ``right`` are present (the intersection), since -``how='inner'`` by default. +``how='inner'```by default. .. ipython:: python @@ -721,7 +714,32 @@ either the left or right tables, the values in the joined table will be labels=['left', 'right'], vertical=False); plt.close('all'); -Here is another example with duplicate join keys in DataFrames: +To join a Series and a DataFrame, the Series has to be transformed into a DataFrame first: + +.. ipython:: python + + df = pd.DataFrame({"Let": ["A", "B", "C"], "Num": [1, 2, 3]}) + df + + # The series has a multi-index with levels corresponding to columns in the DataFrame we want to merge with + ser = pd.Series( + ['a', 'b', 'c', 'd', 'e', 'f'], + index=pd.MultiIndex.from_arrays([["A", "B", "C"]*2, [1, 2, 3, 4, 5, 6]]) + ) + ser + + # Name the row index levels + ser.index.names=['Let','Num'] + ser + + # reset_index turns the multi-level row index into columns, which requires a DataFrame + df2 = ser.reset_index() + type(df2) + + # Now we merge the DataFrames + pd.merge(df, df2, on=['Let','Num']) + +Here is another example with duplicate join keys in ``DataFrame``s: .. ipython:: python @@ -1202,7 +1220,7 @@ Overlapping value columns ~~~~~~~~~~~~~~~~~~~~~~~~~ The merge ``suffixes`` argument takes a tuple of list of strings to append to -overlapping column names in the input ``DataFrame``\ s to disambiguate the result +overlapping column names in the input ``DataFrame``s to disambiguate the result columns: .. ipython:: python
- [X] closes #12509
https://api.github.com/repos/pandas-dev/pandas/pulls/19245
2018-01-15T06:00:54Z
2018-11-01T01:37:51Z
null
2018-11-01T01:37:51Z
BUG: unsupported type Interval when writing dataframe to excel
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 246eab386b2ab..52d8cf5f0a66d 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -489,6 +489,8 @@ I/O - Bug in :func:`DataFrame.to_latex()` where pairs of braces meant to serve as invisible placeholders were escaped (:issue:`18667`) - Bug in :func:`read_json` where large numeric values were causing an ``OverflowError`` (:issue:`18842`) - Bug in :func:`DataFrame.to_parquet` where an exception was raised if the write destination is S3 (:issue:`19134`) +- :class:`Interval` now supported in :func:`DataFrame.to_excel` for all Excel file types (:issue:`19242`) +- :class:`Timedelta` now supported in :func:`DataFrame.to_excel` for xls file type (:issue:`19242`, :issue:`9155`) - Plotting diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 92b29c8da7e3f..b03987e933bff 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -4,7 +4,7 @@ # --------------------------------------------------------------------- # ExcelFile class -from datetime import datetime, date, time, MINYEAR +from datetime import datetime, date, time, MINYEAR, timedelta import os import abc @@ -21,7 +21,6 @@ from pandas.io.common import (_is_url, _urlopen, _validate_header_arg, get_filepath_or_buffer, _NA_VALUES, _stringify_path) -from pandas.core.indexes.period import Period import pandas._libs.json as json from pandas.compat import (map, zip, reduce, range, lrange, u, add_metaclass, string_types, OrderedDict) @@ -777,17 +776,30 @@ def _pop_header_name(row, index_col): def _conv_value(val): - # Convert numpy types to Python types for the Excel writers. + """ Convert numpy types to Python types for the Excel writers. + + Parameters + ---------- + val : object + Value to be written into cells + + Returns + ------- + If val is a numpy int, float, or bool, then the equivalent Python + types are returned. :obj:`datetime`, :obj:`date`, and :obj:`timedelta` + are passed and formatting must be handled in the writer. :obj:`str` + representation is returned for all other types. + """ if is_integer(val): val = int(val) elif is_float(val): val = float(val) elif is_bool(val): val = bool(val) - elif isinstance(val, Period): - val = "{val}".format(val=val) - elif is_list_like(val): - val = str(val) + elif isinstance(val, (datetime, date, timedelta)): + pass + else: + val = compat.to_str(val) return val @@ -1460,6 +1472,9 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0, num_format_str = self.datetime_format elif isinstance(cell.val, date): num_format_str = self.date_format + elif isinstance(cell.val, timedelta): + delta = cell.val + val = delta.total_seconds() / float(86400) stylekey = json.dumps(cell.style) if num_format_str: diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 3263f71dea3c3..efbabcfd8fc4c 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -2,7 +2,7 @@ import os import sys import warnings -from datetime import datetime, date, time +from datetime import datetime, date, time, timedelta from distutils.version import LooseVersion from functools import partial from warnings import catch_warnings @@ -1440,6 +1440,56 @@ def test_excel_date_datetime_format(self): # to use df_expected to check the result tm.assert_frame_equal(rs2, df_expected) + def test_to_excel_interval_no_labels(self): + # GH19242 - test writing Interval without labels + _skip_if_no_xlrd() + + with ensure_clean(self.ext) as path: + frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)), + dtype=np.int64) + expected = frame.copy() + frame['new'] = pd.cut(frame[0], 10) + expected['new'] = pd.cut(expected[0], 10).astype(str) + frame.to_excel(path, 'test1') + reader = ExcelFile(path) + recons = read_excel(reader, 'test1') + tm.assert_frame_equal(expected, recons) + + def test_to_excel_interval_labels(self): + # GH19242 - test writing Interval with labels + _skip_if_no_xlrd() + + with ensure_clean(self.ext) as path: + frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)), + dtype=np.int64) + expected = frame.copy() + intervals = pd.cut(frame[0], 10, labels=['A', 'B', 'C', 'D', 'E', + 'F', 'G', 'H', 'I', 'J']) + frame['new'] = intervals + expected['new'] = pd.Series(list(intervals)) + frame.to_excel(path, 'test1') + reader = ExcelFile(path) + recons = read_excel(reader, 'test1') + tm.assert_frame_equal(expected, recons) + + def test_to_excel_timedelta(self): + # GH 19242, GH9155 - test writing timedelta to xls + _skip_if_no_xlrd() + + with ensure_clean('.xls') as path: + frame = DataFrame(np.random.randint(-10, 10, size=(20, 1)), + columns=['A'], + dtype=np.int64 + ) + expected = frame.copy() + frame['new'] = frame['A'].apply(lambda x: timedelta(seconds=x)) + expected['new'] = expected['A'].apply( + lambda x: timedelta(seconds=x).total_seconds() / float(86400)) + frame.to_excel(path, 'test1') + reader = ExcelFile(path) + recons = read_excel(reader, 'test1') + tm.assert_frame_equal(expected, recons) + def test_to_excel_periodindex(self): _skip_if_no_xlrd()
- [x] closes #19242 closes #9155 - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19244
2018-01-15T04:08:46Z
2018-01-27T16:56:34Z
2018-01-27T16:56:33Z
2018-01-27T16:56:50Z
BUG: Fixes rounding error in Timestamp.floor()
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index ca625f492b61f..7800a7c391940 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -551,6 +551,7 @@ Datetimelike - Bug in :func:`~DataFrame.pct_change` using ``periods`` and ``freq`` returned different length outputs (:issue:`7292`) - Bug in comparison of :class:`DatetimeIndex` against ``None`` or ``datetime.date`` objects raising ``TypeError`` for ``==`` and ``!=`` comparisons instead of all-``False`` and all-``True``, respectively (:issue:`19301`) - Bug in :class:`Timestamp` and :func:`to_datetime` where a string representing a barely out-of-bounds timestamp would be incorrectly rounded down instead of raising ``OutOfBoundsDatetime`` (:issue:`19382`) +- Bug in :func:`Timestamp.floor` :func:`DatetimeIndex.floor` where time stamps far in the future and past were not rounded correctly (:issue:`19206`) - Timezones diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index b9be9c16eb6c3..6d91a96e32db7 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -59,6 +59,46 @@ cdef inline object create_timestamp_from_ts(int64_t value, return ts_base +def round_ns(values, rounder, freq): + """ + Applies rounding function at given frequency + + Parameters + ---------- + values : int, :obj:`ndarray` + rounder : function + freq : str, obj + + Returns + ------- + int or :obj:`ndarray` + """ + from pandas.tseries.frequencies import to_offset + unit = to_offset(freq).nanos + if unit < 1000: + # for nano rounding, work with the last 6 digits separately + # due to float precision + buff = 1000000 + r = (buff * (values // buff) + unit * + (rounder((values % buff) * (1 / float(unit)))).astype('i8')) + else: + if unit % 1000 != 0: + msg = 'Precision will be lost using frequency: {}' + warnings.warn(msg.format(freq)) + + # GH19206 + # to deal with round-off when unit is large + if unit >= 1e9: + divisor = 10 ** int(np.log10(unit / 1e7)) + else: + divisor = 10 + + r = (unit * rounder((values * (divisor / float(unit))) / divisor) + .astype('i8')) + + return r + + # This is PITA. Because we inherit from datetime, which has very specific # construction requirements, we need to do object instantiation in python # (see Timestamp class above). This will serve as a C extension type that @@ -590,28 +630,12 @@ class Timestamp(_Timestamp): return create_timestamp_from_ts(ts.value, ts.dts, ts.tzinfo, freq) def _round(self, freq, rounder): - - cdef: - int64_t unit, r, value, buff = 1000000 - object result - - from pandas.tseries.frequencies import to_offset - unit = to_offset(freq).nanos if self.tz is not None: value = self.tz_localize(None).value else: value = self.value - if unit < 1000 and unit % 1000 != 0: - # for nano rounding, work with the last 6 digits separately - # due to float precision - r = (buff * (value // buff) + unit * - (rounder((value % buff) / float(unit))).astype('i8')) - elif unit >= 1000 and unit % 1000 != 0: - msg = 'Precision will be lost using frequency: {}' - warnings.warn(msg.format(freq)) - r = (unit * rounder(value / float(unit)).astype('i8')) - else: - r = (unit * rounder(value / float(unit)).astype('i8')) + + r = round_ns(value, rounder, freq) result = Timestamp(r, unit='ns') if self.tz is not None: result = result.tz_localize(self.tz) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 8e77c7a7fa48c..4a526955d9bf4 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -36,6 +36,7 @@ from pandas._libs import lib, iNaT, NaT from pandas._libs.tslibs.period import Period from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds +from pandas._libs.tslibs.timestamps import round_ns from pandas.core.indexes.base import Index, _index_shared_docs from pandas.util._decorators import Appender, cache_readonly @@ -90,23 +91,9 @@ class TimelikeOps(object): """) def _round(self, freq, rounder): - - from pandas.tseries.frequencies import to_offset - unit = to_offset(freq).nanos # round the local times values = _ensure_datetimelike_to_i8(self) - if unit < 1000 and unit % 1000 != 0: - # for nano rounding, work with the last 6 digits separately - # due to float precision - buff = 1000000 - result = (buff * (values // buff) + unit * - (rounder((values % buff) / float(unit))).astype('i8')) - elif unit >= 1000 and unit % 1000 != 0: - msg = 'Precision will be lost using frequency: {}' - warnings.warn(msg.format(freq)) - result = (unit * rounder(values / float(unit)).astype('i8')) - else: - result = (unit * rounder(values / float(unit)).astype('i8')) + result = round_ns(values, rounder, freq) result = self._maybe_mask_results(result, fill_value=NaT) attribs = self._get_attributes_dict() diff --git a/pandas/tests/indexes/datetimes/test_scalar_compat.py b/pandas/tests/indexes/datetimes/test_scalar_compat.py index 111f68ba14775..83e7a0cd68d63 100644 --- a/pandas/tests/indexes/datetimes/test_scalar_compat.py +++ b/pandas/tests/indexes/datetimes/test_scalar_compat.py @@ -126,6 +126,27 @@ def test_round(self, tz): ts = '2016-10-17 12:00:00.001501031' DatetimeIndex([ts]).round('1010ns') + @pytest.mark.parametrize('test_input, rounder, freq, expected', [ + (['2117-01-01 00:00:45'], 'floor', '15s', ['2117-01-01 00:00:45']), + (['2117-01-01 00:00:45'], 'ceil', '15s', ['2117-01-01 00:00:45']), + (['2117-01-01 00:00:45.000000012'], 'floor', '10ns', + ['2117-01-01 00:00:45.000000010']), + (['1823-01-01 00:00:01.000000012'], 'ceil', '10ns', + ['1823-01-01 00:00:01.000000020']), + (['1823-01-01 00:00:01'], 'floor', '1s', ['1823-01-01 00:00:01']), + (['1823-01-01 00:00:01'], 'ceil', '1s', ['1823-01-01 00:00:01']), + (('NaT', '1823-01-01 00:00:01'), 'floor', '1s', + ('NaT', '1823-01-01 00:00:01')), + (('NaT', '1823-01-01 00:00:01'), 'ceil', '1s', + ('NaT', '1823-01-01 00:00:01')) + ]) + def test_ceil_floor_edge(self, tz, test_input, rounder, freq, expected): + dt = DatetimeIndex(list(test_input)) + func = getattr(dt, rounder) + result = func(freq) + expected = DatetimeIndex(list(expected)) + assert expected.equals(result) + # ---------------------------------------------------------------- # DatetimeIndex.normalize diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py index 70c7308dd3991..8a6989c909cb2 100644 --- a/pandas/tests/scalar/timestamp/test_unary_ops.py +++ b/pandas/tests/scalar/timestamp/test_unary_ops.py @@ -10,7 +10,7 @@ from pandas.compat import PY3 from pandas._libs.tslibs.frequencies import _INVALID_FREQ_ERROR -from pandas import Timestamp +from pandas import Timestamp, NaT class TestTimestampUnaryOps(object): @@ -93,6 +93,29 @@ def test_round_frequencies(self, freq, expected): result = stamp.round(freq=freq) assert result == expected + @pytest.mark.parametrize('test_input, rounder, freq, expected', [ + ('2117-01-01 00:00:45', 'floor', '15s', '2117-01-01 00:00:45'), + ('2117-01-01 00:00:45', 'ceil', '15s', '2117-01-01 00:00:45'), + ('2117-01-01 00:00:45.000000012', 'floor', '10ns', + '2117-01-01 00:00:45.000000010'), + ('1823-01-01 00:00:01.000000012', 'ceil', '10ns', + '1823-01-01 00:00:01.000000020'), + ('1823-01-01 00:00:01', 'floor', '1s', '1823-01-01 00:00:01'), + ('1823-01-01 00:00:01', 'ceil', '1s', '1823-01-01 00:00:01'), + ('NaT', 'floor', '1s', 'NaT'), + ('NaT', 'ceil', '1s', 'NaT') + ]) + def test_ceil_floor_edge(self, test_input, rounder, freq, expected): + dt = Timestamp(test_input) + func = getattr(dt, rounder) + result = func(freq) + + if dt is NaT: + assert result is NaT + else: + expected = Timestamp(expected) + assert result == expected + def test_ceil(self): dt = Timestamp('20130101 09:10:11') result = dt.ceil('D')
- [x] closes #19206 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19240
2018-01-14T23:53:18Z
2018-02-07T15:25:39Z
2018-02-07T15:25:38Z
2018-02-07T15:30:33Z