title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
Fix exception causes all over the code
diff --git a/doc/make.py b/doc/make.py index 024a748cd28ca..db729853e5834 100755 --- a/doc/make.py +++ b/doc/make.py @@ -83,8 +83,8 @@ def _process_single_doc(self, single_doc): obj = pandas # noqa: F821 for name in single_doc.split("."): obj = getattr(obj, name) - except AttributeError: - raise ImportError(f"Could not import {single_doc}") + except AttributeError as err: + raise ImportError(f"Could not import {single_doc}") from err else: return single_doc[len("pandas.") :] else: diff --git a/pandas/__init__.py b/pandas/__init__.py index 2d3d3f7d92a9c..2b9a461e0e95d 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -37,7 +37,7 @@ f"C extension: {module} not built. If you want to import " "pandas from the source directory, you may need to run " "'python setup.py build_ext --inplace --force' to build the C extensions first." - ) + ) from e from pandas._config import ( get_option, @@ -290,8 +290,8 @@ def __getattr__(self, item): try: return getattr(self.np, item) - except AttributeError: - raise AttributeError(f"module numpy has no attribute {item}") + except AttributeError as err: + raise AttributeError(f"module numpy has no attribute {item}") from err np = __numpy() @@ -306,8 +306,10 @@ def __getattr__(cls, item): try: return getattr(cls.datetime, item) - except AttributeError: - raise AttributeError(f"module datetime has no attribute {item}") + except AttributeError as err: + raise AttributeError( + f"module datetime has no attribute {item}" + ) from err def __instancecheck__(cls, other): return isinstance(other, cls.datetime) diff --git a/pandas/_config/config.py b/pandas/_config/config.py index f1959cd70ed3a..df706bf25097e 100644 --- a/pandas/_config/config.py +++ b/pandas/_config/config.py @@ -213,8 +213,8 @@ def __getattr__(self, key: str): prefix += key try: v = object.__getattribute__(self, "d")[key] - except KeyError: - raise OptionError("No such option") + except KeyError as err: + raise OptionError("No such option") from err if isinstance(v, dict): return DictWrapper(v, prefix) else: diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 02a979aea6c6b..7201629cb086e 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -686,8 +686,8 @@ def value_counts( values = Series(values) try: ii = cut(values, bins, include_lowest=True) - except TypeError: - raise TypeError("bins argument only works with numeric data.") + except TypeError as err: + raise TypeError("bins argument only works with numeric data.") from err # count, remove nulls (from the index), and but the bins result = ii.value_counts(dropna=dropna) diff --git a/pandas/core/arrays/_ranges.py b/pandas/core/arrays/_ranges.py index 20e4cf70eddcf..471bfa736d4b9 100644 --- a/pandas/core/arrays/_ranges.py +++ b/pandas/core/arrays/_ranges.py @@ -121,8 +121,8 @@ def _generate_range_overflow_safe( # we cannot salvage the operation by recursing, so raise try: addend = np.uint64(periods) * np.uint64(np.abs(stride)) - except FloatingPointError: - raise OutOfBoundsDatetime(msg) + except FloatingPointError as err: + raise OutOfBoundsDatetime(msg) from err if np.abs(addend) <= i64max: # relatively easy case without casting concerns diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index a5048e3aae899..4167c75eb5782 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -350,7 +350,7 @@ def __init__( if dtype.categories is None: try: codes, categories = factorize(values, sort=True) - except TypeError: + except TypeError as err: codes, categories = factorize(values, sort=False) if dtype.ordered: # raise, as we don't have a sortable data structure and so @@ -359,13 +359,13 @@ def __init__( "'values' is not ordered, please " "explicitly specify the categories order " "by passing in a categories argument." - ) - except ValueError: + ) from err + except ValueError as err: # FIXME raise NotImplementedError( "> 1 ndim Categorical are not supported at this time" - ) + ) from err # we're inferring from values dtype = CategoricalDtype(categories, dtype.ordered) diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index a75536e46e60d..56939cda6d21c 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -2080,11 +2080,11 @@ def _infer_tz_from_endpoints(start, end, tz): """ try: inferred_tz = timezones.infer_tzinfo(start, end) - except AssertionError: + except AssertionError as err: # infer_tzinfo raises AssertionError if passed mismatched timezones raise TypeError( "Start and end cannot both be tz-aware with different timezones" - ) + ) from err inferred_tz = timezones.maybe_get_tz(inferred_tz) tz = timezones.maybe_get_tz(tz) diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index f1e0882def13b..e2b66b1a006e4 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -154,7 +154,7 @@ def safe_cast(values, dtype, copy: bool): """ try: return values.astype(dtype, casting="safe", copy=copy) - except TypeError: + except TypeError as err: casted = values.astype(dtype, copy=copy) if (casted == values).all(): @@ -162,7 +162,7 @@ def safe_cast(values, dtype, copy: bool): raise TypeError( f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}" - ) + ) from err def coerce_to_array( @@ -199,8 +199,8 @@ def coerce_to_array( if not issubclass(type(dtype), _IntegerDtype): try: dtype = _dtypes[str(np.dtype(dtype))] - except KeyError: - raise ValueError(f"invalid dtype specified {dtype}") + except KeyError as err: + raise ValueError(f"invalid dtype specified {dtype}") from err if isinstance(values, IntegerArray): values, mask = values._data, values._mask diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index f5167f470b056..51c94d5059f8b 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -448,12 +448,12 @@ def from_tuples(cls, data, closed="right", copy=False, dtype=None): try: # need list of length 2 tuples, e.g. [(0, 1), (1, 2), ...] lhs, rhs = d - except ValueError: + except ValueError as err: msg = f"{name}.from_tuples requires tuples of length 2, got {d}" - raise ValueError(msg) - except TypeError: + raise ValueError(msg) from err + except TypeError as err: msg = f"{name}.from_tuples received an invalid item, {d}" - raise TypeError(msg) + raise TypeError(msg) from err left.append(lhs) right.append(rhs) @@ -538,10 +538,10 @@ def __setitem__(self, key, value): try: array = IntervalArray(value) value_left, value_right = array.left, array.right - except TypeError: + except TypeError as err: # wrong type: not interval or NA msg = f"'value' should be an interval type, got {type(value)} instead." - raise TypeError(msg) + raise TypeError(msg) from err key = check_array_indexer(self, key) # Need to ensure that left and right are updated atomically, so we're @@ -688,20 +688,20 @@ def astype(self, dtype, copy=True): try: new_left = self.left.astype(dtype.subtype) new_right = self.right.astype(dtype.subtype) - except TypeError: + except TypeError as err: msg = ( f"Cannot convert {self.dtype} to {dtype}; subtypes are incompatible" ) - raise TypeError(msg) + raise TypeError(msg) from err return self._shallow_copy(new_left, new_right) elif is_categorical_dtype(dtype): return Categorical(np.asarray(self)) # TODO: This try/except will be repeated. try: return np.asarray(self).astype(dtype, copy=copy) - except (TypeError, ValueError): + except (TypeError, ValueError) as err: msg = f"Cannot cast {type(self).__name__} to dtype {dtype}" - raise TypeError(msg) + raise TypeError(msg) from err @classmethod def _concat_same_type(cls, to_concat): @@ -1020,13 +1020,13 @@ def length(self): """ try: return self.right - self.left - except TypeError: + except TypeError as err: # length not defined for some types, e.g. string msg = ( "IntervalArray contains Intervals without defined length, " "e.g. Intervals with string endpoints" ) - raise TypeError(msg) + raise TypeError(msg) from err @property def mid(self): @@ -1100,11 +1100,11 @@ def __arrow_array__(self, type=None): try: subtype = pyarrow.from_numpy_dtype(self.dtype.subtype) - except TypeError: + except TypeError as err: raise TypeError( f"Conversion to arrow with subtype '{self.dtype.subtype}' " "is not supported" - ) + ) from err interval_type = ArrowIntervalType(subtype, self.closed) storage_array = pyarrow.StructArray.from_arrays( [ diff --git a/pandas/core/arrays/sparse/dtype.py b/pandas/core/arrays/sparse/dtype.py index 86869f50aab8e..135514e334920 100644 --- a/pandas/core/arrays/sparse/dtype.py +++ b/pandas/core/arrays/sparse/dtype.py @@ -217,8 +217,8 @@ def construct_from_string(cls, string: str) -> "SparseDtype": if string.startswith("Sparse"): try: sub_type, has_fill_value = cls._parse_subtype(string) - except ValueError: - raise TypeError(msg) + except ValueError as err: + raise TypeError(msg) from err else: result = SparseDtype(sub_type) msg = ( diff --git a/pandas/core/arrays/sparse/scipy_sparse.py b/pandas/core/arrays/sparse/scipy_sparse.py index e77256a5aaadd..eafd782dc9b9c 100644 --- a/pandas/core/arrays/sparse/scipy_sparse.py +++ b/pandas/core/arrays/sparse/scipy_sparse.py @@ -134,8 +134,10 @@ def _coo_to_sparse_series(A, dense_index: bool = False): try: s = Series(A.data, MultiIndex.from_arrays((A.row, A.col))) - except AttributeError: - raise TypeError(f"Expected coo_matrix. Got {type(A).__name__} instead.") + except AttributeError as err: + raise TypeError( + f"Expected coo_matrix. Got {type(A).__name__} instead." + ) from err s = s.sort_index() s = s.astype(SparseDtype(s.dtype)) if dense_index: diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index a7b16fd86468e..81fc934748d3e 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -451,10 +451,10 @@ def _addsub_object_array(self, other, op): # subclasses. Incompatible classes will raise AttributeError, # which we re-raise as TypeError return super()._addsub_object_array(other, op) - except AttributeError: + except AttributeError as err: raise TypeError( f"Cannot add/subtract non-tick DateOffset to {type(self).__name__}" - ) + ) from err def __mul__(self, other): other = lib.item_from_zerodim(other) diff --git a/pandas/core/base.py b/pandas/core/base.py index 85424e35fa0e0..3c6f24dbe363a 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -458,7 +458,7 @@ def is_any_frame() -> bool: # return a MI Series try: result = concat(result) - except TypeError: + except TypeError as err: # we want to give a nice error here if # we have non-same sized objects, so # we don't automatically broadcast @@ -467,7 +467,7 @@ def is_any_frame() -> bool: "cannot perform both aggregation " "and transformation operations " "simultaneously" - ) + ) from err return result, True @@ -553,7 +553,7 @@ def _aggregate_multiple_funcs(self, arg, _axis): try: return concat(results, keys=keys, axis=1, sort=False) - except TypeError: + except TypeError as err: # we are concatting non-NDFrame objects, # e.g. a list of scalars @@ -562,7 +562,9 @@ def _aggregate_multiple_funcs(self, arg, _axis): result = Series(results, index=keys, name=self.name) if is_nested_object(result): - raise ValueError("cannot combine transform and aggregation operations") + raise ValueError( + "cannot combine transform and aggregation operations" + ) from err return result def _get_cython_func(self, arg: str) -> Optional[str]: diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index f6947d5ec6233..fe3d3f49f16a7 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -362,8 +362,8 @@ def eval( if not inplace and first_expr: try: target = env.target.copy() - except AttributeError: - raise ValueError("Cannot return a copy of the target") + except AttributeError as err: + raise ValueError("Cannot return a copy of the target") from err else: target = env.target @@ -375,8 +375,8 @@ def eval( with warnings.catch_warnings(record=True): # TODO: Filter the warnings we actually care about here. target[assigner] = ret - except (TypeError, IndexError): - raise ValueError("Cannot assign expression output to target") + except (TypeError, IndexError) as err: + raise ValueError("Cannot assign expression output to target") from err if not resolvers: resolvers = ({assigner: ret},) diff --git a/pandas/core/computation/ops.py b/pandas/core/computation/ops.py index 7ed089b283903..bc9ff7c44b689 100644 --- a/pandas/core/computation/ops.py +++ b/pandas/core/computation/ops.py @@ -372,12 +372,12 @@ def __init__(self, op: str, lhs, rhs): try: self.func = _binary_ops_dict[op] - except KeyError: + except KeyError as err: # has to be made a list for python3 keys = list(_binary_ops_dict.keys()) raise ValueError( f"Invalid binary operator {repr(op)}, valid operators are {keys}" - ) + ) from err def __call__(self, env): """ @@ -550,11 +550,11 @@ def __init__(self, op: str, operand): try: self.func = _unary_ops_dict[op] - except KeyError: + except KeyError as err: raise ValueError( f"Invalid unary operator {repr(op)}, " f"valid operators are {_unary_ops_syms}" - ) + ) from err def __call__(self, env): operand = self.operand(env) diff --git a/pandas/core/computation/parsing.py b/pandas/core/computation/parsing.py index 92a2c20cd2a9e..418fc7d38d08f 100644 --- a/pandas/core/computation/parsing.py +++ b/pandas/core/computation/parsing.py @@ -185,7 +185,7 @@ def tokenize_string(source: str) -> Iterator[Tuple[int, str]]: yield tokenize_backtick_quoted_string( token_generator, source, string_start=start[1] + 1 ) - except Exception: - raise SyntaxError(f"Failed to parse backticks in '{source}'.") + except Exception as err: + raise SyntaxError(f"Failed to parse backticks in '{source}'.") from err else: yield toknum, tokval diff --git a/pandas/core/computation/pytables.py b/pandas/core/computation/pytables.py index 828ec11c2bd38..653d014775386 100644 --- a/pandas/core/computation/pytables.py +++ b/pandas/core/computation/pytables.py @@ -424,8 +424,10 @@ def visit_Subscript(self, node, **kwargs): try: return self.const_type(value[slobj], self.env) - except TypeError: - raise ValueError(f"cannot subscript {repr(value)} with {repr(slobj)}") + except TypeError as err: + raise ValueError( + f"cannot subscript {repr(value)} with {repr(slobj)}" + ) from err def visit_Attribute(self, node, **kwargs): attr = node.attr @@ -575,18 +577,18 @@ def evaluate(self): """ create and return the numexpr condition and filter """ try: self.condition = self.terms.prune(ConditionBinOp) - except AttributeError: + except AttributeError as err: raise ValueError( f"cannot process expression [{self.expr}], [{self}] " "is not a valid condition" - ) + ) from err try: self.filter = self.terms.prune(FilterBinOp) - except AttributeError: + except AttributeError as err: raise ValueError( f"cannot process expression [{self.expr}], [{self}] " "is not a valid filter" - ) + ) from err return self.condition, self.filter diff --git a/pandas/core/computation/scope.py b/pandas/core/computation/scope.py index 937c81fdeb8d6..83bf92ad737e4 100644 --- a/pandas/core/computation/scope.py +++ b/pandas/core/computation/scope.py @@ -197,11 +197,11 @@ def resolve(self, key: str, is_local: bool): # these are created when parsing indexing expressions # e.g., df[df > 0] return self.temps[key] - except KeyError: + except KeyError as err: # runtime import because ops imports from scope from pandas.core.computation.ops import UndefinedVariableError - raise UndefinedVariableError(key, is_local) + raise UndefinedVariableError(key, is_local) from err def swapkey(self, old_key: str, new_key: str, new_value=None): """ diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index c2b600b5d8c5b..c06bd8a1d6e36 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1573,11 +1573,11 @@ def maybe_cast_to_integer_array(arr, dtype, copy: bool = False): casted = np.array(arr, dtype=dtype, copy=copy) else: casted = arr.astype(dtype, copy=copy) - except OverflowError: + except OverflowError as err: raise OverflowError( "The elements provided in the data cannot all be " f"casted to the dtype {dtype}" - ) + ) from err if np.array_equal(arr, casted): return casted diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index c0420244f671e..df5bac1071985 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -198,8 +198,8 @@ def ensure_python_int(value: Union[int, np.integer]) -> int: try: new_value = int(value) assert new_value == value - except (TypeError, ValueError, AssertionError): - raise TypeError(f"Wrong type {type(value)} for value {value}") + except (TypeError, ValueError, AssertionError) as err: + raise TypeError(f"Wrong type {type(value)} for value {value}") from err return new_value @@ -1801,7 +1801,7 @@ def _validate_date_like_dtype(dtype) -> None: try: typ = np.datetime_data(dtype)[0] except ValueError as e: - raise TypeError(e) + raise TypeError(e) from e if typ != "generic" and typ != "ns": raise ValueError( f"{repr(dtype.name)} is too specific of a frequency, " @@ -1840,9 +1840,9 @@ def pandas_dtype(dtype) -> DtypeObj: # raise a consistent TypeError if failed try: npdtype = np.dtype(dtype) - except SyntaxError: + except SyntaxError as err: # np.dtype uses `eval` which can raise SyntaxError - raise TypeError(f"data type '{dtype}' not understood") + raise TypeError(f"data type '{dtype}' not understood") from err # Any invalid dtype (such as pd.Timestamp) should raise an error. # np.dtype(invalid_type).kind = 0 for such objects. However, this will diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 0730de934b56c..33daf6627721f 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -1040,8 +1040,8 @@ def __new__(cls, subtype=None): try: subtype = pandas_dtype(subtype) - except TypeError: - raise TypeError("could not construct IntervalDtype") + except TypeError as err: + raise TypeError("could not construct IntervalDtype") from err if is_categorical_dtype(subtype) or is_string_dtype(subtype): # GH 19016 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3fc10444ee064..990822913aecf 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2771,11 +2771,11 @@ def _ensure_valid_index(self, value): if not len(self.index) and is_list_like(value) and len(value): try: value = Series(value) - except (ValueError, NotImplementedError, TypeError): + except (ValueError, NotImplementedError, TypeError) as err: raise ValueError( "Cannot set a frame with no defined index " "and a value that cannot be converted to a Series" - ) + ) from err self._data = self._data.reindex_axis( value.index.copy(), axis=1, fill_value=np.nan @@ -3338,7 +3338,7 @@ def reindexer(value): # other raise TypeError( "incompatible index of inserted column with frame index" - ) + ) from err return value if isinstance(value, Series): @@ -4059,8 +4059,10 @@ def set_index( # everything else gets tried as a key; see GH 24969 try: found = col in self.columns - except TypeError: - raise TypeError(f"{err_msg}. Received column of type {type(col)}") + except TypeError as err: + raise TypeError( + f"{err_msg}. Received column of type {type(col)}" + ) from err else: if not found: missing.append(col) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ff7c481d550d4..25770c2c6470c 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -335,9 +335,11 @@ def _construct_axes_from_arguments( if a not in kwargs: try: kwargs[a] = args.pop(0) - except IndexError: + except IndexError as err: if require_all: - raise TypeError("not enough/duplicate arguments specified!") + raise TypeError( + "not enough/duplicate arguments specified!" + ) from err axes = {a: kwargs.pop(a, sentinel) for a in cls._AXIS_ORDERS} return axes, kwargs @@ -4792,10 +4794,10 @@ def sample( if axis == 0: try: weights = self[weights] - except KeyError: + except KeyError as err: raise KeyError( "String passed to weights not a valid column" - ) + ) from err else: raise ValueError( "Strings can only be passed to " @@ -7521,8 +7523,8 @@ def at_time( index = self._get_axis(axis) try: indexer = index.indexer_at_time(time, asof=asof) - except AttributeError: - raise TypeError("Index must be DatetimeIndex") + except AttributeError as err: + raise TypeError("Index must be DatetimeIndex") from err return self._take_with_is_copy(indexer, axis=axis) @@ -7609,8 +7611,8 @@ def between_time( include_start=include_start, include_end=include_end, ) - except AttributeError: - raise TypeError("Index must be DatetimeIndex") + except AttributeError as err: + raise TypeError("Index must be DatetimeIndex") from err return self._take_with_is_copy(indexer, axis=axis) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 1bb512aee39e2..fb935c9065b83 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -572,8 +572,8 @@ def true_and_notna(x, *args, **kwargs) -> bool: indices = [ self._get_index(name) for name, group in self if true_and_notna(group) ] - except (ValueError, TypeError): - raise TypeError("the filter must return a boolean result") + except (ValueError, TypeError) as err: + raise TypeError("the filter must return a boolean result") from err filtered = self._apply_filter(indices, dropna) return filtered @@ -1371,9 +1371,9 @@ def _transform_general(self, func, *args, **kwargs): path, res = self._choose_path(fast_path, slow_path, group) except TypeError: return self._transform_item_by_item(obj, fast_path) - except ValueError: + except ValueError as err: msg = "transform must return a scalar value for each group" - raise ValueError(msg) + raise ValueError(msg) from err else: res = path(group) diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index f946f0e63a583..48c00140461b5 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -482,13 +482,13 @@ def get_converter(s): try: # If the original grouper was a tuple return [self.indices[name] for name in names] - except KeyError: + except KeyError as err: # turns out it wasn't a tuple msg = ( "must supply a same-length tuple to get_group " "with multiple grouping keys" ) - raise ValueError(msg) + raise ValueError(msg) from err converters = [get_converter(s) for s in index_sample] names = (tuple(f(n) for f, n in zip(converters, name)) for name in names) diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py index 5e53b061dd1c8..3858e750326b4 100644 --- a/pandas/core/indexers.py +++ b/pandas/core/indexers.py @@ -437,10 +437,10 @@ def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any: elif is_integer_dtype(dtype): try: indexer = np.asarray(indexer, dtype=np.intp) - except ValueError: + except ValueError as err: raise ValueError( "Cannot index with an integer indexer containing NA values" - ) + ) from err else: raise IndexError("arrays used as indices must be of integer or boolean type") diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index c215fdb475ed8..935339c62e218 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -670,8 +670,10 @@ def astype(self, dtype, copy=True): try: casted = self.values.astype(dtype, copy=copy) - except (TypeError, ValueError): - raise TypeError(f"Cannot cast {type(self).__name__} to dtype {dtype}") + except (TypeError, ValueError) as err: + raise TypeError( + f"Cannot cast {type(self).__name__} to dtype {dtype}" + ) from err return Index(casted, name=self.name, dtype=dtype) _index_shared_docs[ @@ -2856,8 +2858,8 @@ def get_loc(self, key, method=None, tolerance=None): casted_key = self._maybe_cast_indexer(key) try: return self._engine.get_loc(casted_key) - except KeyError: - raise KeyError(key) + except KeyError as err: + raise KeyError(key) from err if tolerance is not None: tolerance = self._convert_tolerance(tolerance, np.asarray(key)) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 72a2aba2d8a88..b86d409d1f59b 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -967,11 +967,11 @@ def insert(self, loc, item): ) arr = type(self._data)._simple_new(new_i8s, dtype=self.dtype, freq=freq) return type(self)._simple_new(arr, name=self.name) - except (AttributeError, TypeError): + except (AttributeError, TypeError) as err: # fall back to object index if isinstance(item, str): return self.astype(object).insert(loc, item) raise TypeError( f"cannot insert {type(self).__name__} with incompatible label" - ) + ) from err diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index e303e487b1a7d..c9fefd46e55c7 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -552,8 +552,8 @@ def get_loc(self, key, method=None, tolerance=None): try: key = self._maybe_cast_for_get_loc(key) - except ValueError: - raise KeyError(key) + except ValueError as err: + raise KeyError(key) from err elif isinstance(key, timedelta): # GH#20464 @@ -574,8 +574,8 @@ def get_loc(self, key, method=None, tolerance=None): try: return Index.get_loc(self, key, method, tolerance) - except KeyError: - raise KeyError(orig_key) + except KeyError as err: + raise KeyError(orig_key) from err def _maybe_cast_for_get_loc(self, key) -> Timestamp: # needed to localize naive datetimes @@ -1040,9 +1040,9 @@ def bdate_range( try: weekmask = weekmask or "Mon Tue Wed Thu Fri" freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask) - except (KeyError, TypeError): + except (KeyError, TypeError) as err: msg = f"invalid custom frequency string: {freq}" - raise ValueError(msg) + raise ValueError(msg) from err elif holidays or weekmask: msg = ( "a custom frequency string is required when holidays or " diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index a7bb4237eab69..d396d1c76f357 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -724,9 +724,9 @@ def get_loc( op_right = le if self.closed_right else lt try: mask = op_left(self.left, key) & op_right(key, self.right) - except TypeError: + except TypeError as err: # scalar is not comparable to II subtype --> invalid label - raise KeyError(key) + raise KeyError(key) from err matches = mask.sum() if matches == 0: @@ -805,9 +805,9 @@ def get_indexer( loc = self.get_loc(key) except KeyError: loc = -1 - except InvalidIndexError: + except InvalidIndexError as err: # i.e. non-scalar key - raise TypeError(key) + raise TypeError(key) from err indexer.append(loc) return ensure_platform_int(indexer) @@ -1279,10 +1279,10 @@ def interval_range( if freq is not None and not is_number(freq): try: freq = to_offset(freq) - except ValueError: + except ValueError as err: raise ValueError( f"freq must be numeric or convertible to DateOffset, got {freq}" - ) + ) from err # verify type compatibility if not all( diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 4bd462e83a5bc..f70975e19b9a4 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1373,9 +1373,9 @@ def _get_level_number(self, level) -> int: ) try: level = self.names.index(level) - except ValueError: + except ValueError as err: if not is_integer(level): - raise KeyError(f"Level {level} not found") + raise KeyError(f"Level {level} not found") from err elif level < 0: level += self.nlevels if level < 0: @@ -1383,13 +1383,13 @@ def _get_level_number(self, level) -> int: raise IndexError( f"Too many levels: Index has only {self.nlevels} levels, " f"{orig_level} is not a valid level number" - ) + ) from err # Note: levels are zero-based elif level >= self.nlevels: raise IndexError( f"Too many levels: Index has only {self.nlevels} levels, " f"not {level + 1}" - ) + ) from err return level @property @@ -3370,8 +3370,8 @@ def _convert_can_do_setop(self, other): msg = "other must be a MultiIndex or a list of tuples" try: other = MultiIndex.from_tuples(other) - except TypeError: - raise TypeError(msg) + except TypeError as err: + raise TypeError(msg) from err else: result_names = self.names if self.names == other.names else None return other, result_names diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 35a5d99abf4e6..9eeb41f735015 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -515,9 +515,9 @@ def get_loc(self, key, method=None, tolerance=None): try: asdt, reso = parse_time_string(key, self.freq) - except DateParseError: + except DateParseError as err: # A string with invalid format - raise KeyError(f"Cannot interpret '{key}' as period") + raise KeyError(f"Cannot interpret '{key}' as period") from err grp = resolution.Resolution.get_freq_group(reso) freqn = resolution.get_freq_group(self.freq) @@ -540,14 +540,14 @@ def get_loc(self, key, method=None, tolerance=None): try: key = Period(key, freq=self.freq) - except ValueError: + except ValueError as err: # we cannot construct the Period - raise KeyError(orig_key) + raise KeyError(orig_key) from err try: return Index.get_loc(self, key, method, tolerance) - except KeyError: - raise KeyError(orig_key) + except KeyError as err: + raise KeyError(orig_key) from err def _maybe_cast_slice_bound(self, label, side: str, kind: str): """ @@ -578,10 +578,10 @@ def _maybe_cast_slice_bound(self, label, side: str, kind: str): parsed, reso = parse_time_string(label, self.freq) bounds = self._parsed_string_to_bounds(reso, parsed) return bounds[0 if side == "left" else 1] - except ValueError: + except ValueError as err: # string cannot be parsed as datetime-like # TODO: we need tests for this case - raise KeyError(label) + raise KeyError(label) from err elif is_integer(label) or is_float(label): self._invalid_indexer("slice", label) @@ -611,8 +611,8 @@ def _get_string_slice(self, key: str, use_lhs: bool = True, use_rhs: bool = True try: return self._partial_date_slice(reso, parsed, use_lhs, use_rhs) - except KeyError: - raise KeyError(key) + except KeyError as err: + raise KeyError(key) from err def insert(self, loc, item): if not isinstance(item, Period) or self.freq != item.freq: diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index 71cc62e6a110b..f621a3c153adf 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -349,8 +349,8 @@ def get_loc(self, key, method=None, tolerance=None): new_key = int(key) try: return self._range.index(new_key) - except ValueError: - raise KeyError(key) + except ValueError as err: + raise KeyError(key) from err raise KeyError(key) return super().get_loc(key, method=method, tolerance=tolerance) @@ -695,10 +695,10 @@ def __getitem__(self, key): new_key = int(key) try: return self._range[new_key] - except IndexError: + except IndexError as err: raise IndexError( f"index {key} is out of bounds for axis 0 with size {len(self)}" - ) + ) from err elif is_scalar(key): raise IndexError( "only integers, slices (`:`), " diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index b3b2bc46f6659..5e4a8e83bd95b 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -232,8 +232,8 @@ def get_loc(self, key, method=None, tolerance=None): elif isinstance(key, str): try: key = Timedelta(key) - except ValueError: - raise KeyError(key) + except ValueError as err: + raise KeyError(key) from err elif isinstance(key, self._data._recognized_scalars) or key is NaT: key = Timedelta(key) diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 5adc65b488399..29cb62a4c591f 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -610,7 +610,7 @@ def _get_setitem_indexer(self, key): # invalid indexer type vs 'other' indexing errors if "cannot do" in str(e): raise - raise IndexingError(key) + raise IndexingError(key) from e def __setitem__(self, key, value): if isinstance(key, tuple): @@ -654,11 +654,11 @@ def _has_valid_tuple(self, key: Tuple): raise IndexingError("Too many indexers") try: self._validate_key(k, i) - except ValueError: + except ValueError as err: raise ValueError( "Location based indexing can only have " f"[{self._valid_types}] types" - ) + ) from err def _is_nested_tuple_indexer(self, tup: Tuple) -> bool: """ @@ -1455,9 +1455,9 @@ def _get_list_axis(self, key, axis: int): """ try: return self.obj._take_with_is_copy(key, axis=axis) - except IndexError: + except IndexError as err: # re-raise with different error message - raise IndexError("positional indexers are out-of-bounds") + raise IndexError("positional indexers are out-of-bounds") from err def _getitem_axis(self, key, axis: int): if isinstance(key, slice): diff --git a/pandas/core/missing.py b/pandas/core/missing.py index b30a7a24f3495..c46aed999f45a 100644 --- a/pandas/core/missing.py +++ b/pandas/core/missing.py @@ -343,10 +343,10 @@ def _interpolate_scipy_wrapper( if method == "pchip": try: alt_methods["pchip"] = interpolate.pchip_interpolate - except AttributeError: + except AttributeError as err: raise ImportError( "Your version of Scipy does not support PCHIP interpolation." - ) + ) from err elif method == "akima": alt_methods["akima"] = _akima_interpolate diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index a5c609473760d..4398a1569ac56 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -73,7 +73,7 @@ def _f(*args, **kwargs): # e.g. this is normally a disallowed function on # object arrays that contain strings if is_object_dtype(args[0]): - raise TypeError(e) + raise TypeError(e) from e raise return _f @@ -607,9 +607,9 @@ def get_median(x): if not is_float_dtype(values.dtype): try: values = values.astype("f8") - except ValueError: + except ValueError as err: # e.g. "could not convert string to float: 'a'" - raise TypeError + raise TypeError from err if mask is not None: values[mask] = np.nan @@ -1361,9 +1361,9 @@ def _ensure_numeric(x): except (TypeError, ValueError): try: x = x.astype(np.float64) - except ValueError: + except ValueError as err: # GH#29941 we get here with object arrays containing strs - raise TypeError(f"Could not convert {x} to numeric") + raise TypeError(f"Could not convert {x} to numeric") from err else: if not np.any(np.imag(x)): x = x.real @@ -1374,9 +1374,9 @@ def _ensure_numeric(x): # e.g. "1+1j" or "foo" try: x = complex(x) - except ValueError: + except ValueError as err: # e.g. "foo" - raise TypeError(f"Could not convert {x} to numeric") + raise TypeError(f"Could not convert {x} to numeric") from err return x diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index b216a927f65b3..2c9105c52cf9b 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -295,12 +295,12 @@ def na_logical_op(x: np.ndarray, y, op): AttributeError, OverflowError, NotImplementedError, - ): + ) as err: typ = type(y).__name__ raise TypeError( f"Cannot perform '{op.__name__}' with a dtyped [{x.dtype}] array " f"and scalar of type [{typ}]" - ) + ) from err return result.reshape(x.shape) diff --git a/pandas/core/reshape/concat.py b/pandas/core/reshape/concat.py index d9f21f0b274ac..06a180d4a096e 100644 --- a/pandas/core/reshape/concat.py +++ b/pandas/core/reshape/concat.py @@ -620,8 +620,8 @@ def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiInde for key, index in zip(hlevel, indexes): try: i = level.get_loc(key) - except KeyError: - raise ValueError(f"Key {key} not in level {level}") + except KeyError as err: + raise ValueError(f"Key {key} not in level {level}") from err to_concat.append(np.repeat(i, len(index))) codes_list.append(np.concatenate(to_concat)) diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index b04e4e1ac4d48..61aa34f724307 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -631,8 +631,8 @@ def _normalize(table, normalize, margins: bool, margins_name="All"): axis_subs = {0: "index", 1: "columns"} try: normalize = axis_subs[normalize] - except KeyError: - raise ValueError("Not a valid normalize argument") + except KeyError as err: + raise ValueError("Not a valid normalize argument") from err if margins is False: @@ -647,8 +647,8 @@ def _normalize(table, normalize, margins: bool, margins_name="All"): try: f = normalizers[normalize] - except KeyError: - raise ValueError("Not a valid normalize argument") + except KeyError as err: + raise ValueError("Not a valid normalize argument") from err table = f(table) table = table.fillna(0) diff --git a/pandas/core/series.py b/pandas/core/series.py index 3ded02598963c..d984225f8fd89 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -992,11 +992,11 @@ def __setitem__(self, key, value): except TypeError as e: if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): - raise ValueError("Can only tuple-index with a MultiIndex") + raise ValueError("Can only tuple-index with a MultiIndex") from e # python 3 type errors should be raised if _is_unorderable_exception(e): - raise IndexError(key) + raise IndexError(key) from e if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 4b0fc3e47356c..b0c5d6a48d99a 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -2425,12 +2425,12 @@ def cat(self, others=None, sep=None, na_rep=None, join="left"): try: # turn anything in "others" into lists of Series others = self._get_series_list(others) - except ValueError: # do not catch TypeError raised by _get_series_list + except ValueError as err: # do not catch TypeError raised by _get_series_list raise ValueError( "If `others` contains arrays or lists (or other " "list-likes without an index), these must all be " "of the same length as the calling Series/Index." - ) + ) from err # align if required if any(not data.index.equals(x.index) for x in others): diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index b10b736b9134e..5580146b37d25 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -391,8 +391,10 @@ def _convert_listlike_datetimes( # datetime64[ns] orig_arg = ensure_object(orig_arg) result = _attempt_YYYYMMDD(orig_arg, errors=errors) - except (ValueError, TypeError, tslibs.OutOfBoundsDatetime): - raise ValueError("cannot convert the input to '%Y%m%d' date format") + except (ValueError, TypeError, tslibs.OutOfBoundsDatetime) as err: + raise ValueError( + "cannot convert the input to '%Y%m%d' date format" + ) from err # fallback if result is None: @@ -484,8 +486,10 @@ def _adjust_to_origin(arg, origin, unit): raise ValueError("unit must be 'D' for origin='julian'") try: arg = arg - j0 - except TypeError: - raise ValueError("incompatible 'arg' type for given 'origin'='julian'") + except TypeError as err: + raise ValueError( + "incompatible 'arg' type for given 'origin'='julian'" + ) from err # preemptively check this for a nice range j_max = Timestamp.max.to_julian_date() - j0 @@ -508,10 +512,14 @@ def _adjust_to_origin(arg, origin, unit): # we are going to offset back to unix / epoch time try: offset = Timestamp(origin) - except tslibs.OutOfBoundsDatetime: - raise tslibs.OutOfBoundsDatetime(f"origin {origin} is Out of Bounds") - except ValueError: - raise ValueError(f"origin {origin} cannot be converted to a Timestamp") + except tslibs.OutOfBoundsDatetime as err: + raise tslibs.OutOfBoundsDatetime( + f"origin {origin} is Out of Bounds" + ) from err + except ValueError as err: + raise ValueError( + f"origin {origin} cannot be converted to a Timestamp" + ) from err if offset.tz is not None: raise ValueError(f"origin offset {offset} must be tz-naive") @@ -861,7 +869,7 @@ def coerce(values): try: values = to_datetime(values, format="%Y%m%d", errors=errors, utc=tz) except (TypeError, ValueError) as err: - raise ValueError(f"cannot assemble the datetimes: {err}") + raise ValueError(f"cannot assemble the datetimes: {err}") from err for u in ["h", "m", "s", "ms", "us", "ns"]: value = unit_rev.get(u) @@ -869,7 +877,9 @@ def coerce(values): try: values += to_timedelta(coerce(arg[value]), unit=u, errors=errors) except (TypeError, ValueError) as err: - raise ValueError(f"cannot assemble the datetimes [{value}]: {err}") + raise ValueError( + f"cannot assemble the datetimes [{value}]: {err}" + ) from err return values @@ -1001,13 +1011,13 @@ def _convert_listlike(arg, format): for element in arg: try: times.append(datetime.strptime(element, format).time()) - except (ValueError, TypeError): + except (ValueError, TypeError) as err: if errors == "raise": msg = ( f"Cannot convert {element} to a time with given " f"format {format}" ) - raise ValueError(msg) + raise ValueError(msg) from err elif errors == "ignore": return arg else: diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index e045d1c2211d7..c6096c24ecbc9 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -219,13 +219,13 @@ def _apply(self, func, **kwargs): try: values = self._prep_values(b.values) - except (TypeError, NotImplementedError): + except (TypeError, NotImplementedError) as err: if isinstance(obj, ABCDataFrame): exclude.extend(b.columns) del block_list[i] continue else: - raise DataError("No numeric types to aggregate") + raise DataError("No numeric types to aggregate") from err if values.size == 0: results.append(values.copy()) diff --git a/pandas/core/window/rolling.py b/pandas/core/window/rolling.py index 65ac064a1322e..3784989de10ab 100644 --- a/pandas/core/window/rolling.py +++ b/pandas/core/window/rolling.py @@ -267,8 +267,8 @@ def _prep_values(self, values: Optional[np.ndarray] = None) -> np.ndarray: else: try: values = ensure_float64(values) - except (ValueError, TypeError): - raise TypeError(f"cannot handle this type -> {values.dtype}") + except (ValueError, TypeError) as err: + raise TypeError(f"cannot handle this type -> {values.dtype}") from err # Convert inf to nan for C funcs inf = np.isinf(values) @@ -449,13 +449,13 @@ def _apply( try: values = self._prep_values(b.values) - except (TypeError, NotImplementedError): + except (TypeError, NotImplementedError) as err: if isinstance(obj, ABCDataFrame): exclude.extend(b.columns) del block_list[i] continue else: - raise DataError("No numeric types to aggregate") + raise DataError("No numeric types to aggregate") from err if values.size == 0: results.append(values.copy()) @@ -1875,11 +1875,11 @@ def _validate_freq(self): try: return to_offset(self.window) - except (TypeError, ValueError): + except (TypeError, ValueError) as err: raise ValueError( f"passed window {self.window} is not " "compatible with a datetimelike index" - ) + ) from err _agg_see_also_doc = dedent( """ diff --git a/pandas/io/common.py b/pandas/io/common.py index c52583eed27ec..0fce8f5382686 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -265,8 +265,8 @@ def get_compression_method( compression_args = dict(compression) try: compression = compression_args.pop("method") - except KeyError: - raise ValueError("If mapping, compression must have key 'method'") + except KeyError as err: + raise ValueError("If mapping, compression must have key 'method'") from err else: compression_args = {} return compression, compression_args diff --git a/pandas/io/excel/_base.py b/pandas/io/excel/_base.py index 97959bd125113..d2f9dd285582f 100644 --- a/pandas/io/excel/_base.py +++ b/pandas/io/excel/_base.py @@ -628,8 +628,8 @@ def __new__(cls, path, engine=None, **kwargs): engine = config.get_option(f"io.excel.{ext}.writer") if engine == "auto": engine = _get_default_writer(ext) - except KeyError: - raise ValueError(f"No engine for filetype: '{ext}'") + except KeyError as err: + raise ValueError(f"No engine for filetype: '{ext}'") from err cls = get_writer(engine) return object.__new__(cls) diff --git a/pandas/io/excel/_util.py b/pandas/io/excel/_util.py index c8d40d7141fc8..7c8e1abb497bc 100644 --- a/pandas/io/excel/_util.py +++ b/pandas/io/excel/_util.py @@ -47,8 +47,8 @@ def _get_default_writer(ext): def get_writer(engine_name): try: return _writers[engine_name] - except KeyError: - raise ValueError(f"No Excel writer '{engine_name}'") + except KeyError as err: + raise ValueError(f"No Excel writer '{engine_name}'") from err def _excel2num(x): diff --git a/pandas/io/html.py b/pandas/io/html.py index 561570f466b68..9efdacadce83e 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -904,7 +904,7 @@ def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs): "Since you passed a non-rewindable file " "object, we can't rewind it to try " "another parser. Try read_html() with a different flavor." - ) + ) from caught retained = caught else: diff --git a/pandas/io/json/_normalize.py b/pandas/io/json/_normalize.py index f158ad6cd89e3..4b153d3cb69bf 100644 --- a/pandas/io/json/_normalize.py +++ b/pandas/io/json/_normalize.py @@ -315,7 +315,7 @@ def _recursive_extract(data, path, seen_meta, level=0): raise KeyError( "Try running with errors='ignore' as key " f"{e} is not always present" - ) + ) from e meta_vals[key].append(meta_val) records.extend(recs) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 8a3ad6cb45b57..9d1687e20a949 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -814,8 +814,10 @@ def __init__(self, f, engine=None, **kwds): ): try: dialect_val = getattr(dialect, param) - except AttributeError: - raise ValueError(f"Invalid dialect {kwds['dialect']} provided") + except AttributeError as err: + raise ValueError( + f"Invalid dialect {kwds['dialect']} provided" + ) from err parser_default = _parser_defaults[param] provided = kwds.get(param, parser_default) @@ -1816,19 +1818,19 @@ def _cast_types(self, values, cast_type, column): array_type = cast_type.construct_array_type() try: return array_type._from_sequence_of_strings(values, dtype=cast_type) - except NotImplementedError: + except NotImplementedError as err: raise NotImplementedError( f"Extension Array: {array_type} must implement " "_from_sequence_of_strings in order to be used in parser methods" - ) + ) from err else: try: values = astype_nansafe(values, cast_type, copy=True, skipna=True) - except ValueError: + except ValueError as err: raise ValueError( f"Unable to convert column {column} to type {cast_type}" - ) + ) from err return values def _do_date_conversions(self, names, data): @@ -2552,12 +2554,12 @@ def _infer_columns(self): while self.line_pos <= hr: line = self._next_line() - except StopIteration: + except StopIteration as err: if self.line_pos < hr: raise ValueError( f"Passed header={hr} but only {self.line_pos + 1} lines in " "file" - ) + ) from err # We have an empty file, so check # if columns are provided. That will @@ -2569,7 +2571,7 @@ def _infer_columns(self): return columns, num_original_columns, unnamed_cols if not self.names: - raise EmptyDataError("No columns to parse from file") + raise EmptyDataError("No columns to parse from file") from err line = self.names[:] @@ -2650,9 +2652,9 @@ def _infer_columns(self): try: line = self._buffered_line() - except StopIteration: + except StopIteration as err: if not names: - raise EmptyDataError("No columns to parse from file") + raise EmptyDataError("No columns to parse from file") from err line = names[:] diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 048aa8b1915d1..168666ea21f45 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -682,7 +682,7 @@ def open(self, mode: str = "a", **kwargs): # trying to read from a non-existent file causes an error which # is not part of IOError, make it one if self._mode == "r" and "Unable to open/create file" in str(err): - raise IOError(str(err)) + raise IOError(str(err)) from err raise def close(self): @@ -1069,14 +1069,14 @@ def remove(self, key: str, where=None, start=None, stop=None): except AssertionError: # surface any assertion errors for e.g. debugging raise - except Exception: + except Exception as err: # In tests we get here with ClosedFileError, TypeError, and # _table_mod.NoSuchNodeError. TODO: Catch only these? if where is not None: raise ValueError( "trying to remove a node with a non-None where clause!" - ) + ) from err # we are actually trying to remove a node (with children) node = self.get_node(key) @@ -1521,8 +1521,8 @@ def _validate_format(self, format: str) -> str: # validate try: format = _FORMAT_MAP[format.lower()] - except KeyError: - raise TypeError(f"invalid HDFStore format specified [{format}]") + except KeyError as err: + raise TypeError(f"invalid HDFStore format specified [{format}]") from err return format @@ -1579,8 +1579,8 @@ def error(t): _STORER_MAP = {"series": SeriesFixed, "frame": FrameFixed} try: cls = _STORER_MAP[pt] - except KeyError: - raise error("_STORER_MAP") + except KeyError as err: + raise error("_STORER_MAP") from err return cls(self, group, encoding=encoding, errors=errors) # existing node (and must be a table) @@ -1614,8 +1614,8 @@ def error(t): } try: cls = _TABLE_MAP[tt] - except KeyError: - raise error("_TABLE_MAP") + except KeyError as err: + raise error("_TABLE_MAP") from err return cls(self, group, encoding=encoding, errors=errors) @@ -3233,10 +3233,10 @@ def validate_multiindex(self, obj): ] try: return obj.reset_index(), levels - except ValueError: + except ValueError as err: raise ValueError( "duplicate names/columns in the multi-index when storing as a table" - ) + ) from err @property def nrows_expected(self) -> int: @@ -3784,11 +3784,11 @@ def get_blk_items(mgr, blocks): if table_exists and validate: try: existing_col = self.values_axes[i] - except (IndexError, KeyError): + except (IndexError, KeyError) as err: raise ValueError( f"Incompatible appended table [{blocks}]" f"with existing table [{self.values_axes}]" - ) + ) from err else: existing_col = None @@ -3899,12 +3899,12 @@ def get_blk_items(mgr, blocks): b, b_items = by_items.pop(items) new_blocks.append(b) new_blk_items.append(b_items) - except (IndexError, KeyError): + except (IndexError, KeyError) as err: jitems = ",".join(pprint_thing(item) for item in items) raise ValueError( f"cannot match existing table structure for [{jitems}] " "on appending data" - ) + ) from err blocks = new_blocks blk_items = new_blk_items @@ -5061,7 +5061,7 @@ def generate(self, where): q = self.table.queryables() try: return PyTablesExpr(where, queryables=q, encoding=self.table.encoding) - except NameError: + except NameError as err: # raise a nice message, suggesting that the user should use # data_columns qkeys = ",".join(q.keys()) @@ -5073,7 +5073,7 @@ def generate(self, where): " an axis (e.g. 'index' or 'columns'), or a " "data_column\n" f" The currently defined references are: {qkeys}\n" - ) + ) from err def select(self): """
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32322
2020-02-28T08:37:25Z
2020-02-28T10:22:52Z
2020-02-28T10:22:52Z
2020-06-24T08:34:18Z
BUG: parse_dates may have columns not in dataframe
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 035d8da74d1c1..84f8bd8e49fce 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -305,6 +305,7 @@ I/O timestamps with ``version="2.0"`` (:issue:`31652`). - Bug in :meth:`read_csv` was raising `TypeError` when `sep=None` was used in combination with `comment` keyword (:issue:`31396`) - Bug in :class:`HDFStore` that caused it to set to ``int64`` the dtype of a ``datetime64`` column when reading a DataFrame in Python 3 from fixed format written in Python 2 (:issue:`31750`) +- :func:`read_csv` will raise a ``ValueError`` when the column names passed in `parse_dates` are missing in the Dataframe (:issue:`31251`) - Bug in :meth:`read_excel` where a UTF-8 string with a high surrogate would cause a segmentation violation (:issue:`23809`) diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index 50b5db0274aa5..648c986460560 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -6,10 +6,11 @@ import csv import datetime from io import StringIO, TextIOWrapper +import itertools import re import sys from textwrap import fill -from typing import Any, Dict, Set +from typing import Any, Dict, Iterable, List, Set import warnings import numpy as np @@ -34,6 +35,7 @@ ensure_str, is_bool_dtype, is_categorical_dtype, + is_dict_like, is_dtype_equal, is_extension_array_dtype, is_file_like, @@ -1421,6 +1423,54 @@ def __init__(self, kwds): # keep references to file handles opened by the parser itself self.handles = [] + def _validate_parse_dates_presence(self, columns: List[str]) -> None: + """ + Check if parse_dates are in columns. + + If user has provided names for parse_dates, check if those columns + are available. + + Parameters + ---------- + columns : list + List of names of the dataframe. + + Raises + ------ + ValueError + If column to parse_date is not in dataframe. + + """ + cols_needed: Iterable + if is_dict_like(self.parse_dates): + cols_needed = itertools.chain(*self.parse_dates.values()) + elif is_list_like(self.parse_dates): + # a column in parse_dates could be represented + # ColReference = Union[int, str] + # DateGroups = List[ColReference] + # ParseDates = Union[DateGroups, List[DateGroups], + # Dict[ColReference, DateGroups]] + cols_needed = itertools.chain.from_iterable( + col if is_list_like(col) else [col] for col in self.parse_dates + ) + else: + cols_needed = [] + + # get only columns that are references using names (str), not by index + missing_cols = ", ".join( + sorted( + { + col + for col in cols_needed + if isinstance(col, str) and col not in columns + } + ) + ) + if missing_cols: + raise ValueError( + f"Missing column provided to 'parse_dates': '{missing_cols}'" + ) + def close(self): for f in self.handles: f.close() @@ -1940,6 +1990,7 @@ def __init__(self, src, **kwds): if len(self.names) < len(usecols): _validate_usecols_names(usecols, self.names) + self._validate_parse_dates_presence(self.names) self._set_noconvert_columns() self.orig_names = self.names @@ -2310,6 +2361,7 @@ def __init__(self, f, **kwds): if self.index_names is None: self.index_names = index_names + self._validate_parse_dates_presence(self.columns) if self.parse_dates: self._no_thousands_columns = self._set_no_thousands_columns() else: diff --git a/pandas/tests/io/parser/test_parse_dates.py b/pandas/tests/io/parser/test_parse_dates.py index 31573e4e6ecce..2fcac6fa57cf8 100644 --- a/pandas/tests/io/parser/test_parse_dates.py +++ b/pandas/tests/io/parser/test_parse_dates.py @@ -1516,3 +1516,35 @@ def test_hypothesis_delimited_date(date_format, dayfirst, delimiter, test_dateti assert except_out_dateutil == except_in_dateutil assert result == expected + + +@pytest.mark.parametrize( + "names, usecols, parse_dates, missing_cols", + [ + (None, ["val"], ["date", "time"], "date, time"), + (None, ["val"], [0, "time"], "time"), + (None, ["val"], [["date", "time"]], "date, time"), + (None, ["val"], [[0, "time"]], "time"), + (None, ["val"], {"date": [0, "time"]}, "time"), + (None, ["val"], {"date": ["date", "time"]}, "date, time"), + (None, ["val"], [["date", "time"], "date"], "date, time"), + (["date1", "time1", "temperature"], None, ["date", "time"], "date, time"), + ( + ["date1", "time1", "temperature"], + ["date1", "temperature"], + ["date1", "time"], + "time", + ), + ], +) +def test_missing_parse_dates_column_raises( + all_parsers, names, usecols, parse_dates, missing_cols +): + # gh-31251 column names provided in parse_dates could be missing. + parser = all_parsers + content = StringIO("date,time,val\n2020-01-31,04:20:32,32\n") + msg = f"Missing column provided to 'parse_dates': '{missing_cols}'" + with pytest.raises(ValueError, match=msg): + parser.read_csv( + content, sep=",", names=names, usecols=usecols, parse_dates=parse_dates, + )
read_csv will raise ValueError when columnes used for parse_dates are found in the dataframe. - [x] closes #31251 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32320
2020-02-28T03:28:41Z
2020-03-17T00:03:12Z
2020-03-17T00:03:12Z
2020-03-17T03:49:49Z
CLN: remove _igetitem_cache
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 6f5aef4884ccd..b87cbdc532761 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2741,7 +2741,7 @@ def _set_value(self, index, col, value, takeable: bool = False): """ try: if takeable is True: - series = self._iget_item_cache(col) + series = self._ixs(col, axis=1) series._set_value(index, value, takeable=True) return diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ff7c481d550d4..1be6429d33470 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3459,15 +3459,6 @@ def _get_item_cache(self, item): res._is_copy = self._is_copy return res - def _iget_item_cache(self, item: int): - """Return the cached item, item represents a positional indexer.""" - ax = self._info_axis - if ax.is_unique: - lower = self._get_item_cache(ax[item]) - else: - return self._ixs(item, axis=1) - return lower - def _box_item_values(self, key, values): raise AbstractMethodError(self)
Only used once (recent bugfixes got rid of the user uses IIRC) and its just an extra layer that isnt needed.
https://api.github.com/repos/pandas-dev/pandas/pulls/32319
2020-02-28T02:53:44Z
2020-03-03T01:34:31Z
2020-03-03T01:34:31Z
2020-03-03T01:52:52Z
REF: test_first_valid_index
diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 5956f73bb11f0..d8eeed69d66e2 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -1,8 +1,7 @@ import numpy as np -import pytest import pandas as pd -from pandas import DataFrame, Series, date_range, to_datetime +from pandas import DataFrame, date_range, to_datetime import pandas._testing as tm @@ -60,65 +59,6 @@ def test_frame_append_datetime64_col_other_units(self): assert (tmp["dates"].values == ex_vals).all() - @pytest.mark.parametrize( - "data,idx,expected_first,expected_last", - [ - ({"A": [1, 2, 3]}, [1, 1, 2], 1, 2), - ({"A": [1, 2, 3]}, [1, 2, 2], 1, 2), - ({"A": [1, 2, 3, 4]}, ["d", "d", "d", "d"], "d", "d"), - ({"A": [1, np.nan, 3]}, [1, 1, 2], 1, 2), - ({"A": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2), - ({"A": [1, np.nan, 3]}, [1, 2, 2], 1, 2), - ], - ) - def test_first_last_valid( - self, float_frame, data, idx, expected_first, expected_last - ): - N = len(float_frame.index) - mat = np.random.randn(N) - mat[:5] = np.nan - mat[-5:] = np.nan - - frame = DataFrame({"foo": mat}, index=float_frame.index) - index = frame.first_valid_index() - - assert index == frame.index[5] - - index = frame.last_valid_index() - assert index == frame.index[-6] - - # GH12800 - empty = DataFrame() - assert empty.last_valid_index() is None - assert empty.first_valid_index() is None - - # GH17400: no valid entries - frame[:] = np.nan - assert frame.last_valid_index() is None - assert frame.first_valid_index() is None - - # GH20499: its preserves freq with holes - frame.index = date_range("20110101", periods=N, freq="B") - frame.iloc[1] = 1 - frame.iloc[-2] = 1 - assert frame.first_valid_index() == frame.index[1] - assert frame.last_valid_index() == frame.index[-2] - assert frame.first_valid_index().freq == frame.index.freq - assert frame.last_valid_index().freq == frame.index.freq - - # GH 21441 - df = DataFrame(data, index=idx) - assert expected_first == df.first_valid_index() - assert expected_last == df.last_valid_index() - - @pytest.mark.parametrize("klass", [Series, DataFrame]) - def test_first_valid_index_all_nan(self, klass): - # GH#9752 Series/DataFrame should both return None, not raise - obj = klass([np.nan]) - - assert obj.first_valid_index() is None - assert obj.iloc[:0].first_valid_index() is None - def test_operation_on_NaT(self): # Both NaT and Timestamp are in DataFrame. df = pd.DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]}) diff --git a/pandas/tests/generic/methods/__init__.py b/pandas/tests/generic/methods/__init__.py new file mode 100644 index 0000000000000..5d18f97b8a55e --- /dev/null +++ b/pandas/tests/generic/methods/__init__.py @@ -0,0 +1,3 @@ +""" +Tests for methods shared by DataFrame and Series. +""" diff --git a/pandas/tests/generic/methods/test_first_valid_index.py b/pandas/tests/generic/methods/test_first_valid_index.py new file mode 100644 index 0000000000000..bca3452c3c458 --- /dev/null +++ b/pandas/tests/generic/methods/test_first_valid_index.py @@ -0,0 +1,90 @@ +""" +Includes test for last_valid_index. +""" +import numpy as np +import pytest + +from pandas import DataFrame, Series, date_range +import pandas._testing as tm + + +class TestFirstValidIndex: + @pytest.mark.parametrize("klass", [Series, DataFrame]) + def test_first_valid_index_single_nan(self, klass): + # GH#9752 Series/DataFrame should both return None, not raise + obj = klass([np.nan]) + + assert obj.first_valid_index() is None + assert obj.iloc[:0].first_valid_index() is None + + @pytest.mark.parametrize( + "empty", [DataFrame(), Series(dtype=object), Series([], index=[], dtype=object)] + ) + def test_first_valid_index_empty(self, empty): + # GH#12800 + assert empty.last_valid_index() is None + assert empty.first_valid_index() is None + + @pytest.mark.parametrize( + "data,idx,expected_first,expected_last", + [ + ({"A": [1, 2, 3]}, [1, 1, 2], 1, 2), + ({"A": [1, 2, 3]}, [1, 2, 2], 1, 2), + ({"A": [1, 2, 3, 4]}, ["d", "d", "d", "d"], "d", "d"), + ({"A": [1, np.nan, 3]}, [1, 1, 2], 1, 2), + ({"A": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2), + ({"A": [1, np.nan, 3]}, [1, 2, 2], 1, 2), + ], + ) + def test_first_last_valid_frame(self, data, idx, expected_first, expected_last): + # GH#21441 + df = DataFrame(data, index=idx) + assert expected_first == df.first_valid_index() + assert expected_last == df.last_valid_index() + + @pytest.mark.parametrize("index_func", [tm.makeStringIndex, tm.makeDateIndex]) + def test_first_last_valid(self, index_func): + N = 30 + index = index_func(N) + mat = np.random.randn(N) + mat[:5] = np.nan + mat[-5:] = np.nan + + frame = DataFrame({"foo": mat}, index=index) + assert frame.first_valid_index() == frame.index[5] + assert frame.last_valid_index() == frame.index[-6] + + ser = frame["foo"] + assert ser.first_valid_index() == frame.index[5] + assert ser.last_valid_index() == frame.index[-6] + + @pytest.mark.parametrize("index_func", [tm.makeStringIndex, tm.makeDateIndex]) + def test_first_last_valid_all_nan(self, index_func): + # GH#17400: no valid entries + index = index_func(30) + frame = DataFrame(np.nan, columns=["foo"], index=index) + + assert frame.last_valid_index() is None + assert frame.first_valid_index() is None + + ser = frame["foo"] + assert ser.first_valid_index() is None + assert ser.last_valid_index() is None + + def test_first_last_valid_preserves_freq(self): + # GH#20499: its preserves freq with holes + index = date_range("20110101", periods=30, freq="B") + frame = DataFrame(np.nan, columns=["foo"], index=index) + + frame.iloc[1] = 1 + frame.iloc[-2] = 1 + assert frame.first_valid_index() == frame.index[1] + assert frame.last_valid_index() == frame.index[-2] + assert frame.first_valid_index().freq == frame.index.freq + assert frame.last_valid_index().freq == frame.index.freq + + ts = frame["foo"] + assert ts.first_valid_index() == ts.index[1] + assert ts.last_valid_index() == ts.index[-2] + assert ts.first_valid_index().freq == ts.index.freq + assert ts.last_valid_index().freq == ts.index.freq diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index c4b2e2edd845a..9796a32532b99 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -48,39 +48,6 @@ def test_autocorr(self, datetime_series): else: assert corr1 == corr2 - def test_first_last_valid(self, datetime_series): - ts = datetime_series.copy() - ts[:5] = np.NaN - - index = ts.first_valid_index() - assert index == ts.index[5] - - ts[-5:] = np.NaN - index = ts.last_valid_index() - assert index == ts.index[-6] - - ts[:] = np.nan - assert ts.last_valid_index() is None - assert ts.first_valid_index() is None - - ser = Series([], index=[], dtype=object) - assert ser.last_valid_index() is None - assert ser.first_valid_index() is None - - # GH12800 - empty = Series(dtype=object) - assert empty.last_valid_index() is None - assert empty.first_valid_index() is None - - # GH20499: its preserves freq with holes - ts.index = date_range("20110101", periods=len(ts), freq="B") - ts.iloc[1] = 1 - ts.iloc[-2] = 1 - assert ts.first_valid_index() == ts.index[1] - assert ts.last_valid_index() == ts.index[-2] - assert ts.first_valid_index().freq == ts.index.freq - assert ts.last_valid_index().freq == ts.index.freq - def test_mpl_compat_hack(self, datetime_series): # This is currently failing because the test was relying on
For this one I went ahead and combined the two files, split tests, and parametrized. We end up covering a few more cases now than in the status quo (Series with index of strings, DataFrame with DTI)
https://api.github.com/repos/pandas-dev/pandas/pulls/32317
2020-02-28T01:33:24Z
2020-02-28T10:55:10Z
2020-02-28T10:55:10Z
2020-02-28T15:47:56Z
REGR: SeriesGroupby transform unique
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst index 1b6098e6b6ac1..ed1660cdbd859 100644 --- a/doc/source/whatsnew/v1.0.2.rst +++ b/doc/source/whatsnew/v1.0.2.rst @@ -24,7 +24,8 @@ Fixed regressions - Fixed regression in :class:`DataFrame` arithmetic operations with mis-matched columns (:issue:`31623`) - Fixed regression in :meth:`GroupBy.agg` calling a user-provided function an extra time on an empty input (:issue:`31760`) - Joining on :class:`DatetimeIndex` or :class:`TimedeltaIndex` will preserve ``freq`` in simple cases (:issue:`32166`) -- +- Fixed regression in :meth:`Groupby.transform` so that ``unique`` can now be passed as argument again (:issue:`31849`) + .. --------------------------------------------------------------------------- diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py index 700d8d503d086..ffdf9f0d8f28c 100644 --- a/pandas/core/groupby/base.py +++ b/pandas/core/groupby/base.py @@ -186,3 +186,6 @@ def _gotitem(self, key, ndim, subset=None): # NOTE: do NOT edit this directly. New additions should be inserted # into the appropriate list above. transform_kernel_whitelist = reduction_kernels | transformation_kernels + +series_transform_whitelist = transform_kernel_whitelist | frozenset(["unique"]) +dataframe_transform_whitelist = transform_kernel_whitelist diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 1bb512aee39e2..af7940625869f 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -467,7 +467,7 @@ def transform(self, func, *args, **kwargs): if not isinstance(func, str): return self._transform_general(func, *args, **kwargs) - elif func not in base.transform_kernel_whitelist: + elif func not in base.series_transform_whitelist: msg = f"'{func}' is not a valid function name for transform(name)" raise ValueError(msg) elif func in base.cythonized_kernels: @@ -1417,7 +1417,7 @@ def transform(self, func, *args, **kwargs): if not isinstance(func, str): return self._transform_general(func, *args, **kwargs) - elif func not in base.transform_kernel_whitelist: + elif func not in base.dataframe_transform_whitelist: msg = f"'{func}' is not a valid function name for transform(name)" raise ValueError(msg) elif func in base.cythonized_kernels: diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 740103eec185a..154d905a7d05f 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -1196,3 +1196,16 @@ def test_transform_lambda_indexing(): ), ) tm.assert_frame_equal(result, expected) + + +def test_unique_whitelisted(): + # GH 31849 + df = pd.DataFrame( + {"title": ["a", "b", "c", "c", "a"], "asset_id": ["a1", "b1", "c1", "c2", "c1"]} + ) + result = df.groupby("title").asset_id.transform("unique") + expected = pd.Series( + [["a1", "c1"], ["b1"], ["c1", "c2"], ["c1", "c2"], ["a1", "c1"]], + name="asset_id", + ) + tm.assert_series_equal(result, expected)
- [x] closes #31849 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32315
2020-02-27T22:46:27Z
2020-03-03T03:15:08Z
null
2020-10-10T14:14:56Z
REF/TST: misplaced MultiIndex tests
diff --git a/pandas/tests/indexes/multi/test_constructors.py b/pandas/tests/indexes/multi/test_constructors.py index 2c4b3ce04f96d..1157c7f8bb962 100644 --- a/pandas/tests/indexes/multi/test_constructors.py +++ b/pandas/tests/indexes/multi/test_constructors.py @@ -1,3 +1,6 @@ +from datetime import date, datetime +import itertools + import numpy as np import pytest @@ -6,7 +9,7 @@ from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike import pandas as pd -from pandas import Index, MultiIndex, date_range +from pandas import Index, MultiIndex, Series, date_range import pandas._testing as tm @@ -723,3 +726,73 @@ def test_index_equal_empty_iterable(): a = MultiIndex(levels=[[], []], codes=[[], []], names=["a", "b"]) b = MultiIndex.from_arrays(arrays=[[], []], names=["a", "b"]) tm.assert_index_equal(a, b) + + +def test_raise_invalid_sortorder(): + # Test that the MultiIndex constructor raise when a incorrect sortorder is given + # GH#28518 + + levels = [[0, 1], [0, 1, 2]] + + # Correct sortorder + MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], sortorder=2 + ) + + with pytest.raises(ValueError, match=r".* sortorder 2 with lexsort_depth 1.*"): + MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=2, + ) + + with pytest.raises(ValueError, match=r".* sortorder 1 with lexsort_depth 0.*"): + MultiIndex( + levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=1, + ) + + +def test_datetimeindex(): + idx1 = pd.DatetimeIndex( + ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"] * 2, tz="Asia/Tokyo", + ) + idx2 = pd.date_range("2010/01/01", periods=6, freq="M", tz="US/Eastern") + idx = MultiIndex.from_arrays([idx1, idx2]) + + expected1 = pd.DatetimeIndex( + ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"], tz="Asia/Tokyo" + ) + + tm.assert_index_equal(idx.levels[0], expected1) + tm.assert_index_equal(idx.levels[1], idx2) + + # from datetime combos + # GH 7888 + date1 = date.today() + date2 = datetime.today() + date3 = Timestamp.today() + + for d1, d2 in itertools.product([date1, date2, date3], [date1, date2, date3]): + index = MultiIndex.from_product([[d1], [d2]]) + assert isinstance(index.levels[0], pd.DatetimeIndex) + assert isinstance(index.levels[1], pd.DatetimeIndex) + + +def test_constructor_with_tz(): + + index = pd.DatetimeIndex( + ["2013/01/01 09:00", "2013/01/02 09:00"], name="dt1", tz="US/Pacific" + ) + columns = pd.DatetimeIndex( + ["2014/01/01 09:00", "2014/01/02 09:00"], name="dt2", tz="Asia/Tokyo" + ) + + result = MultiIndex.from_arrays([index, columns]) + + assert result.names == ["dt1", "dt2"] + tm.assert_index_equal(result.levels[0], index) + tm.assert_index_equal(result.levels[1], columns) + + result = MultiIndex.from_arrays([Series(index), Series(columns)]) + + assert result.names == ["dt1", "dt2"] + tm.assert_index_equal(result.levels[0], index) + tm.assert_index_equal(result.levels[1], columns) diff --git a/pandas/tests/indexes/multi/test_duplicates.py b/pandas/tests/indexes/multi/test_duplicates.py index 93e1de535835f..5e17a19335c7e 100644 --- a/pandas/tests/indexes/multi/test_duplicates.py +++ b/pandas/tests/indexes/multi/test_duplicates.py @@ -274,3 +274,29 @@ def test_duplicated2(): tm.assert_numpy_array_equal( mi.duplicated(), np.zeros(len(mi), dtype="bool") ) + + +def test_duplicated_drop_duplicates(): + # GH#4060 + idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2, 3], [1, 1, 1, 1, 2, 2])) + + expected = np.array([False, False, False, True, False, False], dtype=bool) + duplicated = idx.duplicated() + tm.assert_numpy_array_equal(duplicated, expected) + assert duplicated.dtype == bool + expected = MultiIndex.from_arrays(([1, 2, 3, 2, 3], [1, 1, 1, 2, 2])) + tm.assert_index_equal(idx.drop_duplicates(), expected) + + expected = np.array([True, False, False, False, False, False]) + duplicated = idx.duplicated(keep="last") + tm.assert_numpy_array_equal(duplicated, expected) + assert duplicated.dtype == bool + expected = MultiIndex.from_arrays(([2, 3, 1, 2, 3], [1, 1, 1, 2, 2])) + tm.assert_index_equal(idx.drop_duplicates(keep="last"), expected) + + expected = np.array([True, False, False, True, False, False]) + duplicated = idx.duplicated(keep=False) + tm.assert_numpy_array_equal(duplicated, expected) + assert duplicated.dtype == bool + expected = MultiIndex.from_arrays(([2, 3, 2, 3], [1, 1, 2, 2])) + tm.assert_index_equal(idx.drop_duplicates(keep=False), expected) diff --git a/pandas/tests/indexes/multi/test_format.py b/pandas/tests/indexes/multi/test_format.py index 75f23fb2f32ba..75499bd79cca0 100644 --- a/pandas/tests/indexes/multi/test_format.py +++ b/pandas/tests/indexes/multi/test_format.py @@ -1,9 +1,10 @@ import warnings +import numpy as np import pytest import pandas as pd -from pandas import MultiIndex +from pandas import Index, MultiIndex import pandas._testing as tm @@ -76,6 +77,17 @@ def test_repr_max_seq_item_setting(idx): class TestRepr: + def test_unicode_repr_issues(self): + levels = [Index(["a/\u03c3", "b/\u03c3", "c/\u03c3"]), Index([0, 1])] + codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)] + index = MultiIndex(levels=levels, codes=codes) + + repr(index.levels) + + # FIXME: dont leave commented-out + # NumPy bug + # repr(index.get_level_values(1)) + def test_repr(self, idx): result = idx[:1].__repr__() expected = """\ diff --git a/pandas/tests/indexes/multi/test_indexing.py b/pandas/tests/indexes/multi/test_indexing.py index 39049006edb7c..b7d7b3b459aff 100644 --- a/pandas/tests/indexes/multi/test_indexing.py +++ b/pandas/tests/indexes/multi/test_indexing.py @@ -498,3 +498,41 @@ def test_slice_indexer_with_missing_value(index_arr, expected, start_idx, end_id idx = MultiIndex.from_arrays(index_arr) result = idx.slice_indexer(start=start_idx, end=end_idx) assert result == expected + + +def test_pyint_engine(): + # GH#18519 : when combinations of codes cannot be represented in 64 + # bits, the index underlying the MultiIndex engine works with Python + # integers, rather than uint64. + N = 5 + keys = [ + tuple(l) + for l in [ + [0] * 10 * N, + [1] * 10 * N, + [2] * 10 * N, + [np.nan] * N + [2] * 9 * N, + [0] * N + [2] * 9 * N, + [np.nan] * N + [2] * 8 * N + [0] * N, + ] + ] + # Each level contains 4 elements (including NaN), so it is represented + # in 2 bits, for a total of 2*N*10 = 100 > 64 bits. If we were using a + # 64 bit engine and truncating the first levels, the fourth and fifth + # keys would collide; if truncating the last levels, the fifth and + # sixth; if rotating bits rather than shifting, the third and fifth. + + for idx in range(len(keys)): + index = MultiIndex.from_tuples(keys) + assert index.get_loc(keys[idx]) == idx + + expected = np.arange(idx + 1, dtype=np.intp) + result = index.get_indexer([keys[i] for i in expected]) + tm.assert_numpy_array_equal(result, expected) + + # With missing key: + idces = range(len(keys)) + expected = np.array([-1] + list(idces), dtype=np.intp) + missing = tuple([0, 1] * 5 * N) + result = index.get_indexer([missing] + [keys[i] for i in idces]) + tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/indexes/multi/test_lexsort.py b/pandas/tests/indexes/multi/test_lexsort.py new file mode 100644 index 0000000000000..1d2ad8e02697e --- /dev/null +++ b/pandas/tests/indexes/multi/test_lexsort.py @@ -0,0 +1,46 @@ +from pandas import MultiIndex + + +class TestIsLexsorted: + def test_is_lexsorted(self): + levels = [[0, 1], [0, 1, 2]] + + index = MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]] + ) + assert index.is_lexsorted() + + index = MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]] + ) + assert not index.is_lexsorted() + + index = MultiIndex( + levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]] + ) + assert not index.is_lexsorted() + assert index.lexsort_depth == 0 + + +class TestLexsortDepth: + def test_lexsort_depth(self): + # Test that lexsort_depth return the correct sortorder + # when it was given to the MultiIndex const. + # GH#28518 + + levels = [[0, 1], [0, 1, 2]] + + index = MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], sortorder=2 + ) + assert index.lexsort_depth == 2 + + index = MultiIndex( + levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=1 + ) + assert index.lexsort_depth == 1 + + index = MultiIndex( + levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=0 + ) + assert index.lexsort_depth == 0 diff --git a/pandas/tests/indexes/multi/test_missing.py b/pandas/tests/indexes/multi/test_missing.py index a17e1e9928bff..54ffec2e03fd3 100644 --- a/pandas/tests/indexes/multi/test_missing.py +++ b/pandas/tests/indexes/multi/test_missing.py @@ -141,3 +141,13 @@ def test_nan_stays_float(): assert pd.isna(df0.index.get_level_values(1)).all() # the following failed in 0.14.1 assert pd.isna(dfm.index.get_level_values(1)[:-1]).all() + + +def test_tuples_have_na(): + index = MultiIndex( + levels=[[1, 0], [0, 1, 2, 3]], + codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]], + ) + + assert pd.isna(index[4][0]) + assert pd.isna(index.values[4][0]) diff --git a/pandas/tests/indexes/multi/test_reshape.py b/pandas/tests/indexes/multi/test_reshape.py index 2e39c714ca7af..de32bd94be491 100644 --- a/pandas/tests/indexes/multi/test_reshape.py +++ b/pandas/tests/indexes/multi/test_reshape.py @@ -1,5 +1,8 @@ +from datetime import datetime + import numpy as np import pytest +import pytz import pandas as pd from pandas import Index, MultiIndex @@ -95,6 +98,53 @@ def test_append(idx): assert result.equals(idx) +def test_append_index(): + idx1 = Index([1.1, 1.2, 1.3]) + idx2 = pd.date_range("2011-01-01", freq="D", periods=3, tz="Asia/Tokyo") + idx3 = Index(["A", "B", "C"]) + + midx_lv2 = MultiIndex.from_arrays([idx1, idx2]) + midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3]) + + result = idx1.append(midx_lv2) + + # see gh-7112 + tz = pytz.timezone("Asia/Tokyo") + expected_tuples = [ + (1.1, tz.localize(datetime(2011, 1, 1))), + (1.2, tz.localize(datetime(2011, 1, 2))), + (1.3, tz.localize(datetime(2011, 1, 3))), + ] + expected = Index([1.1, 1.2, 1.3] + expected_tuples) + tm.assert_index_equal(result, expected) + + result = midx_lv2.append(idx1) + expected = Index(expected_tuples + [1.1, 1.2, 1.3]) + tm.assert_index_equal(result, expected) + + result = midx_lv2.append(midx_lv2) + expected = MultiIndex.from_arrays([idx1.append(idx1), idx2.append(idx2)]) + tm.assert_index_equal(result, expected) + + result = midx_lv2.append(midx_lv3) + tm.assert_index_equal(result, expected) + + result = midx_lv3.append(midx_lv2) + expected = Index._simple_new( + np.array( + [ + (1.1, tz.localize(datetime(2011, 1, 1)), "A"), + (1.2, tz.localize(datetime(2011, 1, 2)), "B"), + (1.3, tz.localize(datetime(2011, 1, 3)), "C"), + ] + + expected_tuples, + dtype=object, + ), + None, + ) + tm.assert_index_equal(result, expected) + + def test_repeat(): reps = 2 numbers = [1, 2, 3] diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index efaedfad1e093..e3cf46b466ae4 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -6,12 +6,11 @@ import numpy as np from numpy.random import randn import pytest -import pytz from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype import pandas as pd -from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, isna +from pandas import DataFrame, Index, MultiIndex, Series, Timestamp import pandas._testing as tm AGG_FUNCTIONS = [ @@ -80,52 +79,6 @@ def test_append(self): result = a["A"].append(b["A"]) tm.assert_series_equal(result, self.frame["A"]) - def test_append_index(self): - idx1 = Index([1.1, 1.2, 1.3]) - idx2 = pd.date_range("2011-01-01", freq="D", periods=3, tz="Asia/Tokyo") - idx3 = Index(["A", "B", "C"]) - - midx_lv2 = MultiIndex.from_arrays([idx1, idx2]) - midx_lv3 = MultiIndex.from_arrays([idx1, idx2, idx3]) - - result = idx1.append(midx_lv2) - - # see gh-7112 - tz = pytz.timezone("Asia/Tokyo") - expected_tuples = [ - (1.1, tz.localize(datetime.datetime(2011, 1, 1))), - (1.2, tz.localize(datetime.datetime(2011, 1, 2))), - (1.3, tz.localize(datetime.datetime(2011, 1, 3))), - ] - expected = Index([1.1, 1.2, 1.3] + expected_tuples) - tm.assert_index_equal(result, expected) - - result = midx_lv2.append(idx1) - expected = Index(expected_tuples + [1.1, 1.2, 1.3]) - tm.assert_index_equal(result, expected) - - result = midx_lv2.append(midx_lv2) - expected = MultiIndex.from_arrays([idx1.append(idx1), idx2.append(idx2)]) - tm.assert_index_equal(result, expected) - - result = midx_lv2.append(midx_lv3) - tm.assert_index_equal(result, expected) - - result = midx_lv3.append(midx_lv2) - expected = Index._simple_new( - np.array( - [ - (1.1, tz.localize(datetime.datetime(2011, 1, 1)), "A"), - (1.2, tz.localize(datetime.datetime(2011, 1, 2)), "B"), - (1.3, tz.localize(datetime.datetime(2011, 1, 3)), "C"), - ] - + expected_tuples, - dtype=object, - ), - None, - ) - tm.assert_index_equal(result, expected) - def test_dataframe_constructor(self): multi = DataFrame( np.random.randn(4, 4), @@ -1265,43 +1218,6 @@ def test_unstack_group_index_overflow(self): result = s.unstack(4) assert result.shape == (500, 2) - def test_pyint_engine(self): - # GH 18519 : when combinations of codes cannot be represented in 64 - # bits, the index underlying the MultiIndex engine works with Python - # integers, rather than uint64. - N = 5 - keys = [ - tuple(l) - for l in [ - [0] * 10 * N, - [1] * 10 * N, - [2] * 10 * N, - [np.nan] * N + [2] * 9 * N, - [0] * N + [2] * 9 * N, - [np.nan] * N + [2] * 8 * N + [0] * N, - ] - ] - # Each level contains 4 elements (including NaN), so it is represented - # in 2 bits, for a total of 2*N*10 = 100 > 64 bits. If we were using a - # 64 bit engine and truncating the first levels, the fourth and fifth - # keys would collide; if truncating the last levels, the fifth and - # sixth; if rotating bits rather than shifting, the third and fifth. - - for idx in range(len(keys)): - index = MultiIndex.from_tuples(keys) - assert index.get_loc(keys[idx]) == idx - - expected = np.arange(idx + 1, dtype=np.intp) - result = index.get_indexer([keys[i] for i in expected]) - tm.assert_numpy_array_equal(result, expected) - - # With missing key: - idces = range(len(keys)) - expected = np.array([-1] + list(idces), dtype=np.intp) - missing = tuple([0, 1] * 5 * N) - result = index.get_indexer([missing] + [keys[i] for i in idces]) - tm.assert_numpy_array_equal(result, expected) - def test_to_html(self): self.ymd.columns.name = "foo" self.ymd.to_html() @@ -1545,16 +1461,6 @@ def test_drop_preserve_names(self): result = df.drop([(0, 2)]) assert result.index.names == ("one", "two") - def test_unicode_repr_issues(self): - levels = [Index(["a/\u03c3", "b/\u03c3", "c/\u03c3"]), Index([0, 1])] - codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)] - index = MultiIndex(levels=levels, codes=codes) - - repr(index.levels) - - # NumPy bug - # repr(index.get_level_values(1)) - def test_unicode_repr_level_names(self): index = MultiIndex.from_tuples([(0, 0), (1, 1)], names=["\u0394", "i1"]) @@ -1631,15 +1537,6 @@ def test_assign_index_sequences(self): df.index = index repr(df) - def test_tuples_have_na(self): - index = MultiIndex( - levels=[[1, 0], [0, 1, 2, 3]], - codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0, 1, 2, 3]], - ) - - assert isna(index[4][0]) - assert isna(index.values[4][0]) - def test_duplicate_groupby_issues(self): idx_tp = [ ("600809", "20061231"), @@ -1677,31 +1574,6 @@ def test_duplicate_mi(self): result = df.loc[("foo", "bar")] tm.assert_frame_equal(result, expected) - def test_duplicated_drop_duplicates(self): - # GH 4060 - idx = MultiIndex.from_arrays(([1, 2, 3, 1, 2, 3], [1, 1, 1, 1, 2, 2])) - - expected = np.array([False, False, False, True, False, False], dtype=bool) - duplicated = idx.duplicated() - tm.assert_numpy_array_equal(duplicated, expected) - assert duplicated.dtype == bool - expected = MultiIndex.from_arrays(([1, 2, 3, 2, 3], [1, 1, 1, 2, 2])) - tm.assert_index_equal(idx.drop_duplicates(), expected) - - expected = np.array([True, False, False, False, False, False]) - duplicated = idx.duplicated(keep="last") - tm.assert_numpy_array_equal(duplicated, expected) - assert duplicated.dtype == bool - expected = MultiIndex.from_arrays(([2, 3, 1, 2, 3], [1, 1, 1, 2, 2])) - tm.assert_index_equal(idx.drop_duplicates(keep="last"), expected) - - expected = np.array([True, False, False, True, False, False]) - duplicated = idx.duplicated(keep=False) - tm.assert_numpy_array_equal(duplicated, expected) - assert duplicated.dtype == bool - expected = MultiIndex.from_arrays(([2, 3, 2, 3], [1, 1, 2, 2])) - tm.assert_index_equal(idx.drop_duplicates(keep=False), expected) - def test_multiindex_set_index(self): # segfault in #3308 d = {"t1": [2, 2.5, 3], "t2": [4, 5, 6]} @@ -1713,53 +1585,6 @@ def test_multiindex_set_index(self): # it works! df.set_index(index) - def test_datetimeindex(self): - idx1 = pd.DatetimeIndex( - ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"] * 2, - tz="Asia/Tokyo", - ) - idx2 = pd.date_range("2010/01/01", periods=6, freq="M", tz="US/Eastern") - idx = MultiIndex.from_arrays([idx1, idx2]) - - expected1 = pd.DatetimeIndex( - ["2013-04-01 9:00", "2013-04-02 9:00", "2013-04-03 9:00"], tz="Asia/Tokyo" - ) - - tm.assert_index_equal(idx.levels[0], expected1) - tm.assert_index_equal(idx.levels[1], idx2) - - # from datetime combos - # GH 7888 - date1 = datetime.date.today() - date2 = datetime.datetime.today() - date3 = Timestamp.today() - - for d1, d2 in itertools.product([date1, date2, date3], [date1, date2, date3]): - index = MultiIndex.from_product([[d1], [d2]]) - assert isinstance(index.levels[0], pd.DatetimeIndex) - assert isinstance(index.levels[1], pd.DatetimeIndex) - - def test_constructor_with_tz(self): - - index = pd.DatetimeIndex( - ["2013/01/01 09:00", "2013/01/02 09:00"], name="dt1", tz="US/Pacific" - ) - columns = pd.DatetimeIndex( - ["2014/01/01 09:00", "2014/01/02 09:00"], name="dt2", tz="Asia/Tokyo" - ) - - result = MultiIndex.from_arrays([index, columns]) - - assert result.names == ["dt1", "dt2"] - tm.assert_index_equal(result.levels[0], index) - tm.assert_index_equal(result.levels[1], columns) - - result = MultiIndex.from_arrays([Series(index), Series(columns)]) - - assert result.names == ["dt1", "dt2"] - tm.assert_index_equal(result.levels[0], index) - tm.assert_index_equal(result.levels[1], columns) - def test_set_index_datetime(self): # GH 3950 df = DataFrame( @@ -2210,72 +2035,6 @@ def test_sort_index_categorical_multiindex(self): ) tm.assert_frame_equal(result, expected) - def test_is_lexsorted(self): - levels = [[0, 1], [0, 1, 2]] - - index = MultiIndex( - levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]] - ) - assert index.is_lexsorted() - - index = MultiIndex( - levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]] - ) - assert not index.is_lexsorted() - - index = MultiIndex( - levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]] - ) - assert not index.is_lexsorted() - assert index.lexsort_depth == 0 - - def test_raise_invalid_sortorder(self): - # Test that the MultiIndex constructor raise when a incorrect sortorder is given - # Issue #28518 - - levels = [[0, 1], [0, 1, 2]] - - # Correct sortorder - MultiIndex( - levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], sortorder=2 - ) - - with pytest.raises(ValueError, match=r".* sortorder 2 with lexsort_depth 1.*"): - MultiIndex( - levels=levels, - codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], - sortorder=2, - ) - - with pytest.raises(ValueError, match=r".* sortorder 1 with lexsort_depth 0.*"): - MultiIndex( - levels=levels, - codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], - sortorder=1, - ) - - def test_lexsort_depth(self): - # Test that lexsort_depth return the correct sortorder - # when it was given to the MultiIndex const. - # Issue #28518 - - levels = [[0, 1], [0, 1, 2]] - - index = MultiIndex( - levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]], sortorder=2 - ) - assert index.lexsort_depth == 2 - - index = MultiIndex( - levels=levels, codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 2, 1]], sortorder=1 - ) - assert index.lexsort_depth == 1 - - index = MultiIndex( - levels=levels, codes=[[0, 0, 1, 0, 1, 1], [0, 1, 0, 2, 2, 1]], sortorder=0 - ) - assert index.lexsort_depth == 0 - def test_sort_index_and_reconstruction(self): # 15622
AFAICT this gets all of the tests currently in test_multilevel that are only testing MultiIndex.
https://api.github.com/repos/pandas-dev/pandas/pulls/32314
2020-02-27T22:45:40Z
2020-02-28T11:47:13Z
2020-02-28T11:47:13Z
2020-02-28T16:23:08Z
TST: Using more fixtures in of tests/base/test_ops.py
diff --git a/pandas/conftest.py b/pandas/conftest.py index be44e6c2b36da..834015371bb49 100644 --- a/pandas/conftest.py +++ b/pandas/conftest.py @@ -1047,6 +1047,16 @@ def series_with_simple_index(indices): for dtype in _narrow_dtypes } + +@pytest.fixture(params=_narrow_series.keys()) +def narrow_series(request): + """ + Fixture for Series with low precision data types + """ + # copy to avoid mutation, e.g. setting .name + return _narrow_series[request.param].copy() + + _index_or_series_objs = {**indices_dict, **_series, **_narrow_series} diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py index f85d823cb2fac..c1fa03d7396dd 100644 --- a/pandas/tests/base/test_ops.py +++ b/pandas/tests/base/test_ops.py @@ -11,6 +11,7 @@ from pandas.compat.numpy import np_array_datetime64_compat from pandas.core.dtypes.common import ( + is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_object_dtype, @@ -797,62 +798,83 @@ def test_fillna(self): assert o is not result @pytest.mark.skipif(PYPY, reason="not relevant for PyPy") - def test_memory_usage(self): - for o in self.objs: - res = o.memory_usage() - res_deep = o.memory_usage(deep=True) - - if is_object_dtype(o) or ( - isinstance(o, Series) and is_object_dtype(o.index) - ): - # if there are objects, only deep will pick them up - assert res_deep > res - else: - assert res == res_deep - - if isinstance(o, Series): - assert ( - o.memory_usage(index=False) + o.index.memory_usage() - ) == o.memory_usage(index=True) + def test_memory_usage(self, index_or_series_obj): + obj = index_or_series_obj + res = obj.memory_usage() + res_deep = obj.memory_usage(deep=True) - # sys.getsizeof will call the .memory_usage with - # deep=True, and add on some GC overhead - diff = res_deep - sys.getsizeof(o) - assert abs(diff) < 100 + is_object = is_object_dtype(obj) or ( + isinstance(obj, Series) and is_object_dtype(obj.index) + ) + is_categorical = is_categorical_dtype(obj) or ( + isinstance(obj, Series) and is_categorical_dtype(obj.index) + ) - def test_searchsorted(self): - # See gh-12238 - for o in self.objs: - index = np.searchsorted(o, max(o)) - assert 0 <= index <= len(o) + if len(obj) == 0: + assert res_deep == res == 0 + elif is_object or is_categorical: + # only deep will pick them up + assert res_deep > res + else: + assert res == res_deep - index = np.searchsorted(o, max(o), sorter=range(len(o))) - assert 0 <= index <= len(o) + # sys.getsizeof will call the .memory_usage with + # deep=True, and add on some GC overhead + diff = res_deep - sys.getsizeof(obj) + assert abs(diff) < 100 - def test_validate_bool_args(self): - invalid_values = [1, "True", [1, 2, 3], 5.0] + def test_memory_usage_components_series(self, series_with_simple_index): + series = series_with_simple_index + total_usage = series.memory_usage(index=True) + non_index_usage = series.memory_usage(index=False) + index_usage = series.index.memory_usage() + assert total_usage == non_index_usage + index_usage + + def test_memory_usage_components_narrow_series(self, narrow_series): + series = narrow_series + total_usage = series.memory_usage(index=True) + non_index_usage = series.memory_usage(index=False) + index_usage = series.index.memory_usage() + assert total_usage == non_index_usage + index_usage + + def test_searchsorted(self, index_or_series_obj): + # numpy.searchsorted calls obj.searchsorted under the hood. + # See gh-12238 + obj = index_or_series_obj - for value in invalid_values: - msg = "expected type bool" - with pytest.raises(ValueError, match=msg): - self.int_series.drop_duplicates(inplace=value) + if isinstance(obj, pd.MultiIndex): + # See gh-14833 + pytest.skip("np.searchsorted doesn't work on pd.MultiIndex") - def test_getitem(self): - for i in self.indexes: - s = pd.Series(i) + max_obj = max(obj, default=0) + index = np.searchsorted(obj, max_obj) + assert 0 <= index <= len(obj) - assert i[0] == s.iloc[0] - assert i[5] == s.iloc[5] - assert i[-1] == s.iloc[-1] + index = np.searchsorted(obj, max_obj, sorter=range(len(obj))) + assert 0 <= index <= len(obj) - assert i[-1] == i[9] + def test_access_by_position(self, indices): + index = indices - msg = "index 20 is out of bounds for axis 0 with size 10" - with pytest.raises(IndexError, match=msg): - i[20] - msg = "single positional indexer is out-of-bounds" - with pytest.raises(IndexError, match=msg): - s.iloc[20] + if len(index) == 0: + pytest.skip("Test doesn't make sense on empty data") + elif isinstance(index, pd.MultiIndex): + pytest.skip("Can't instantiate Series from MultiIndex") + + series = pd.Series(index) + assert index[0] == series.iloc[0] + assert index[5] == series.iloc[5] + assert index[-1] == series.iloc[-1] + + size = len(index) + assert index[-1] == index[size - 1] + + msg = f"index {size} is out of bounds for axis 0 with size {size}" + with pytest.raises(IndexError, match=msg): + index[size] + msg = "single positional indexer is out-of-bounds" + with pytest.raises(IndexError, match=msg): + series.iloc[size] @pytest.mark.parametrize("indexer_klass", [list, pd.Index]) @pytest.mark.parametrize( diff --git a/pandas/tests/series/test_validate.py b/pandas/tests/series/test_validate.py index 511d24ca7fa29..e2f050650b298 100644 --- a/pandas/tests/series/test_validate.py +++ b/pandas/tests/series/test_validate.py @@ -3,7 +3,15 @@ @pytest.mark.parametrize( "func", - ["reset_index", "_set_name", "sort_values", "sort_index", "rename", "dropna"], + [ + "reset_index", + "_set_name", + "sort_values", + "sort_index", + "rename", + "dropna", + "drop_duplicates", + ], ) @pytest.mark.parametrize("inplace", [1, "True", [1, 2, 3], 5.0]) def test_validate_bool_args(string_series, func, inplace):
part of #23877 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/32313
2020-02-27T22:29:52Z
2020-03-03T02:48:38Z
2020-03-03T02:48:38Z
2020-03-03T02:48:43Z
DOC: Add examples to the method MultiIndex.is_lexsorted()
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 4bd462e83a5bc..d6187819c2a2e 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1625,6 +1625,30 @@ def is_lexsorted(self) -> bool: Returns ------- bool + + Examples + -------- + In the below examples, the first level of the MultiIndex is sorted because + a<b<c, so there is no need to look at the next level. + + >>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'e', 'f']]).is_lexsorted() + True + >>> pd.MultiIndex.from_arrays([['a', 'b', 'c'], ['d', 'f', 'e']]).is_lexsorted() + True + + In case there is a tie, the lexicographical sorting looks + at the next level of the MultiIndex. + + >>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'b', 'c']]).is_lexsorted() + True + >>> pd.MultiIndex.from_arrays([[0, 1, 1], ['a', 'c', 'b']]).is_lexsorted() + False + >>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'], + ... ['aa', 'bb', 'aa', 'bb']]).is_lexsorted() + True + >>> pd.MultiIndex.from_arrays([['a', 'a', 'b', 'b'], + ... ['bb', 'aa', 'aa', 'bb']]).is_lexsorted() + False """ return self.lexsort_depth == self.nlevels
- [ ] xref #32179 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32312
2020-02-27T22:26:43Z
2020-04-02T00:13:26Z
2020-04-02T00:13:26Z
2020-04-02T00:13:33Z
TST: refactored test_factorize
diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py index 8f48d0a3e8378..7ec617acc198e 100644 --- a/pandas/tests/base/test_ops.py +++ b/pandas/tests/base/test_ops.py @@ -533,66 +533,27 @@ def test_value_counts_datetime64(self, index_or_series): result2 = td2.value_counts() tm.assert_series_equal(result2, expected_s) - def test_factorize(self): - for orig in self.objs: - o = orig.copy() - - if isinstance(o, Index) and o.is_boolean(): - exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp) - exp_uniques = o - exp_uniques = Index([False, True]) - else: - exp_arr = np.array(range(len(o)), dtype=np.intp) - exp_uniques = o - codes, uniques = o.factorize() - - tm.assert_numpy_array_equal(codes, exp_arr) - if isinstance(o, Series): - tm.assert_index_equal(uniques, Index(orig), check_names=False) - else: - # factorize explicitly resets name - tm.assert_index_equal(uniques, exp_uniques, check_names=False) - - def test_factorize_repeated(self): - for orig in self.objs: - o = orig.copy() + @pytest.mark.parametrize("sort", [True, False]) + def test_factorize(self, index_or_series_obj, sort): + obj = index_or_series_obj + result_codes, result_uniques = obj.factorize(sort=sort) - # don't test boolean - if isinstance(o, Index) and o.is_boolean(): - continue + constructor = pd.Index + if isinstance(obj, pd.MultiIndex): + constructor = pd.MultiIndex.from_tuples + expected_uniques = constructor(obj.unique()) - # sort by value, and create duplicates - if isinstance(o, Series): - o = o.sort_values() - n = o.iloc[5:].append(o) - else: - indexer = o.argsort() - o = o.take(indexer) - n = o[5:].append(o) - - exp_arr = np.array( - [5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp - ) - codes, uniques = n.factorize(sort=True) - - tm.assert_numpy_array_equal(codes, exp_arr) - if isinstance(o, Series): - tm.assert_index_equal( - uniques, Index(orig).sort_values(), check_names=False - ) - else: - tm.assert_index_equal(uniques, o, check_names=False) + if sort: + expected_uniques = expected_uniques.sort_values() - exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp) - codes, uniques = n.factorize(sort=False) - tm.assert_numpy_array_equal(codes, exp_arr) + # construct an integer ndarray so that + # `expected_uniques.take(expected_codes)` is equal to `obj` + expected_uniques_list = list(expected_uniques) + expected_codes = [expected_uniques_list.index(val) for val in obj] + expected_codes = np.asarray(expected_codes, dtype=np.intp) - if isinstance(o, Series): - expected = Index(o.iloc[5:10].append(o.iloc[:5])) - tm.assert_index_equal(uniques, expected, check_names=False) - else: - expected = o[5:10].append(o[:5]) - tm.assert_index_equal(uniques, expected, check_names=False) + tm.assert_numpy_array_equal(result_codes, expected_codes) + tm.assert_index_equal(result_uniques, expected_uniques) def test_duplicated_drop_duplicates_index(self): # GH 4060
part of #23877 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/32311
2020-02-27T21:22:58Z
2020-03-07T18:35:21Z
2020-03-07T18:35:21Z
2020-03-07T18:35:35Z
BUG: PeriodIndex.asof_locs
diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 35a5d99abf4e6..02d81b1b1396d 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -388,27 +388,26 @@ def __array_wrap__(self, result, context=None): # cannot pass _simple_new as it is return type(self)(result, freq=self.freq, name=self.name) - def asof_locs(self, where, mask): + def asof_locs(self, where, mask: np.ndarray) -> np.ndarray: """ where : array of timestamps mask : array of booleans where data is not NA - """ where_idx = where if isinstance(where_idx, DatetimeIndex): where_idx = PeriodIndex(where_idx.values, freq=self.freq) + elif not isinstance(where_idx, PeriodIndex): + raise TypeError("asof_locs `where` must be DatetimeIndex or PeriodIndex") + elif where_idx.freq != self.freq: + raise raise_on_incompatible(self, where_idx) - locs = self._ndarray_values[mask].searchsorted( - where_idx._ndarray_values, side="right" - ) + locs = self.asi8[mask].searchsorted(where_idx.asi8, side="right") locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self))[mask].take(locs) first = mask.argmax() - result[ - (locs == 0) & (where_idx._ndarray_values < self._ndarray_values[first]) - ] = -1 + result[(locs == 0) & (where_idx.asi8 < self.asi8[first])] = -1 return result diff --git a/pandas/plotting/_matplotlib/converter.py b/pandas/plotting/_matplotlib/converter.py index c399e5b9b7017..8260684c02ea6 100644 --- a/pandas/plotting/_matplotlib/converter.py +++ b/pandas/plotting/_matplotlib/converter.py @@ -218,13 +218,13 @@ def _convert_1d(values, units, axis): if isinstance(values, valid_types) or is_integer(values) or is_float(values): return get_datevalue(values, axis.freq) elif isinstance(values, PeriodIndex): - return values.asfreq(axis.freq)._ndarray_values + return values.asfreq(axis.freq).asi8 elif isinstance(values, Index): return values.map(lambda x: get_datevalue(x, axis.freq)) elif lib.infer_dtype(values, skipna=False) == "period": # https://github.com/pandas-dev/pandas/issues/24304 # convert ndarray[period] -> PeriodIndex - return PeriodIndex(values, freq=axis.freq)._ndarray_values + return PeriodIndex(values, freq=axis.freq).asi8 elif isinstance(values, (list, tuple, np.ndarray, Index)): return [get_datevalue(x, axis.freq) for x in values] return values @@ -607,7 +607,7 @@ def _daily_finder(vmin, vmax, freq): info = np.zeros( span, dtype=[("val", np.int64), ("maj", bool), ("min", bool), ("fmt", "|S20")] ) - info["val"][:] = dates_._ndarray_values + info["val"][:] = dates_.asi8 info["fmt"][:] = "" info["maj"][[0, -1]] = True # .. and set some shortcuts diff --git a/pandas/tests/frame/methods/test_asof.py b/pandas/tests/frame/methods/test_asof.py index e2b417972638e..c68a547a3755a 100644 --- a/pandas/tests/frame/methods/test_asof.py +++ b/pandas/tests/frame/methods/test_asof.py @@ -1,7 +1,17 @@ import numpy as np import pytest -from pandas import DataFrame, Period, Series, Timestamp, date_range, to_datetime +from pandas._libs.tslibs import IncompatibleFrequency + +from pandas import ( + DataFrame, + Period, + Series, + Timestamp, + date_range, + period_range, + to_datetime, +) import pandas._testing as tm @@ -156,3 +166,13 @@ def test_is_copy(self, date_range_frame): with tm.assert_produces_warning(None): result["C"] = 1 + + def test_asof_periodindex_mismatched_freq(self): + N = 50 + rng = period_range("1/1/1990", periods=N, freq="H") + df = DataFrame(np.random.randn(N), index=rng) + + # Mismatched freq + msg = "Input has different freq" + with pytest.raises(IncompatibleFrequency, match=msg): + df.asof(rng.asfreq("D")) diff --git a/pandas/tests/indexes/period/test_indexing.py b/pandas/tests/indexes/period/test_indexing.py index 077fa2a0b1c56..a4c6764d065c9 100644 --- a/pandas/tests/indexes/period/test_indexing.py +++ b/pandas/tests/indexes/period/test_indexing.py @@ -795,3 +795,27 @@ def test_period_index_indexer(self): tm.assert_frame_equal(df, df.loc[list(idx)]) tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]]) tm.assert_frame_equal(df, df.loc[list(idx)]) + + +class TestAsOfLocs: + def test_asof_locs_mismatched_type(self): + dti = pd.date_range("2016-01-01", periods=3) + pi = dti.to_period("D") + pi2 = dti.to_period("H") + + mask = np.array([0, 1, 0], dtype=bool) + + msg = "must be DatetimeIndex or PeriodIndex" + with pytest.raises(TypeError, match=msg): + pi.asof_locs(pd.Int64Index(pi.asi8), mask) + + with pytest.raises(TypeError, match=msg): + pi.asof_locs(pd.Float64Index(pi.asi8), mask) + + with pytest.raises(TypeError, match=msg): + # TimedeltaIndex + pi.asof_locs(dti - dti, mask) + + msg = "Input has different freq=H" + with pytest.raises(libperiod.IncompatibleFrequency, match=msg): + pi.asof_locs(pi2, mask) diff --git a/pandas/tests/series/methods/test_asof.py b/pandas/tests/series/methods/test_asof.py index b121efd202744..d6462e13b1ce7 100644 --- a/pandas/tests/series/methods/test_asof.py +++ b/pandas/tests/series/methods/test_asof.py @@ -1,6 +1,8 @@ import numpy as np import pytest +from pandas._libs.tslibs import IncompatibleFrequency + from pandas import Series, Timestamp, date_range, isna, notna, offsets import pandas._testing as tm @@ -132,6 +134,11 @@ def test_periodindex(self): d = ts.index[0].to_timestamp() - offsets.BDay() assert isna(ts.asof(d)) + # Mismatched freq + msg = "Input has different freq" + with pytest.raises(IncompatibleFrequency, match=msg): + ts.asof(rng.asfreq("D")) + def test_errors(self): s = Series(
https://api.github.com/repos/pandas-dev/pandas/pulls/32310
2020-02-27T21:04:06Z
2020-03-08T15:39:07Z
2020-03-08T15:39:07Z
2020-03-08T16:04:31Z
DOC: Reorder 1.0 releases in whatsnew/index.rst
diff --git a/doc/source/whatsnew/index.rst b/doc/source/whatsnew/index.rst index 68aabfe76d8de..cbfeb0352c283 100644 --- a/doc/source/whatsnew/index.rst +++ b/doc/source/whatsnew/index.rst @@ -7,7 +7,7 @@ Release Notes ************* This is the list of changes to pandas between each release. For full details, -see the commit logs at https://github.com/pandas-dev/pandas. For install and +see the `commit logs <https://github.com/pandas-dev/pandas/commits/>`_. For install and upgrade instructions, see :ref:`install`. Version 1.1 @@ -24,9 +24,9 @@ Version 1.0 .. toctree:: :maxdepth: 2 - v1.0.0 - v1.0.1 v1.0.2 + v1.0.1 + v1.0.0 Version 0.25 ------------
- Reorders 1.x releases in descending chronological order (consistent with the other versions sections) - Hyperlinks `commit logs`
https://api.github.com/repos/pandas-dev/pandas/pulls/32309
2020-02-27T20:11:29Z
2020-02-28T00:09:16Z
2020-02-28T00:09:16Z
2020-02-28T00:09:20Z
BUG: set df.plot.area linewidth to 0
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 2b64b85863def..ee11c69e5b414 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -233,6 +233,7 @@ Plotting - :func:`.plot` for line/bar now accepts color by dictonary (:issue:`8193`). - - Bug in :meth:`DataFrame.boxplot` and :meth:`DataFrame.plot.boxplot` lost color attributes of ``medianprops``, ``whiskerprops``, ``capprops`` and ``medianprops`` (:issue:`30346`) +- Bug in :meth:`DataFrame.plot.area` set default `linewidth` parameter to a nonzero value, giving glitchy outputs using matplotlib backend. (:issue:`32106`) Groupby/resample/rolling diff --git a/pandas/plotting/_matplotlib/core.py b/pandas/plotting/_matplotlib/core.py index 63d0b8abe59d9..18bc96dc6f523 100644 --- a/pandas/plotting/_matplotlib/core.py +++ b/pandas/plotting/_matplotlib/core.py @@ -1241,6 +1241,7 @@ def _plot( **kwds, ): + kwds.setdefault("lw", 0) if column_num == 0: cls._initialize_stacker(ax, stacking_id, len(y)) y_values = cls._get_stacked_values(ax, stacking_id, y, kwds["label"])
- [x] closes #32106 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry (`1.1.0`)
https://api.github.com/repos/pandas-dev/pandas/pulls/32305
2020-02-27T18:39:27Z
2020-04-07T15:47:10Z
null
2020-04-07T15:47:11Z
BUG: Fix __ne__ comparison for Categorical
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 0f18a1fd81815..7e6c817910476 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -198,6 +198,7 @@ Categorical - Bug where :func:`merge` was unable to join on non-unique categorical indices (:issue:`28189`) - Bug when passing categorical data to :class:`Index` constructor along with ``dtype=object`` incorrectly returning a :class:`CategoricalIndex` instead of object-dtype :class:`Index` (:issue:`32167`) +- Bug where :class:`Categorical` comparison operator ``__ne__`` would incorrectly evaluate to ``False`` when either element was missing (:issue:`32276`) - Datetimelike diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index a5048e3aae899..6d1d29db10865 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -103,7 +103,10 @@ def func(self, other): mask = (self._codes == -1) | (other_codes == -1) if mask.any(): # In other series, the leads to False, so do that here too - ret[mask] = False + if opname == "__ne__": + ret[(self._codes == -1) & (other_codes == -1)] = True + else: + ret[mask] = False return ret if is_scalar(other): diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index 69a97f5c9fe02..059d3453995bd 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -282,6 +282,19 @@ def _compare_other(self, s, data, op_name, other): with pytest.raises(TypeError, match=msg): op(data, other) + @pytest.mark.parametrize( + "categories", + [["a", "b"], [0, 1], [pd.Timestamp("2019"), pd.Timestamp("2020")]], + ) + def test_not_equal_with_na(self, categories): + # https://github.com/pandas-dev/pandas/issues/32276 + c1 = Categorical.from_codes([-1, 0], categories=categories) + c2 = Categorical.from_codes([0, 1], categories=categories) + + result = c1 != c2 + + assert result.all() + class TestParsing(base.BaseParsingTests): pass
- [x] closes #32276 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32304
2020-02-27T18:25:01Z
2020-03-03T03:28:43Z
2020-03-03T03:28:43Z
2020-09-09T01:01:00Z
TYP: Groupby sum|prod|min|max|first|last methods
diff --git a/pandas/core/groupby/groupby.py b/pandas/core/groupby/groupby.py index b9b403ffdc69a..20da8ca3ec2b7 100644 --- a/pandas/core/groupby/groupby.py +++ b/pandas/core/groupby/groupby.py @@ -36,7 +36,6 @@ class providing the base-class of operations. from pandas._libs import Timestamp import pandas._libs.groupby as libgroupby from pandas._typing import FrameOrSeries, Scalar -from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, Substitution, cache_readonly, doc @@ -192,6 +191,24 @@ class providing the base-class of operations. """, ) +_groupby_agg_method_template = """ +Compute {fname} of group values. + +Parameters +---------- +numeric_only : bool, default {no} + Include only float, int, boolean columns. If None, will attempt to use + everything, then use only numeric data. +min_count : int, default {mc} + The required number of valid values to perform the operation. If fewer + than ``min_count`` non-NA values are present the result will be NA. + +Returns +------- +Series or DataFrame + Computed {fname} of values within each group. +""" + _pipe_template = """ Apply a function `func` with arguments to this %(klass)s object and return the function's result. @@ -945,6 +962,37 @@ def _wrap_transformed_output(self, output: Mapping[base.OutputKey, np.ndarray]): def _wrap_applied_output(self, keys, values, not_indexed_same: bool = False): raise AbstractMethodError(self) + def _agg_general( + self, + numeric_only: bool = True, + min_count: int = -1, + *, + alias: str, + npfunc: Callable, + ): + self._set_group_selection() + + # try a cython aggregation if we can + try: + return self._cython_agg_general( + how=alias, alt=npfunc, numeric_only=numeric_only, min_count=min_count, + ) + except DataError: + pass + except NotImplementedError as err: + if "function is not implemented for this dtype" in str( + err + ) or "category dtype not supported" in str(err): + # raised in _get_cython_function, in some cases can + # be trimmed by implementing cython funcs for more dtypes + pass + else: + raise + + # apply a non-cython aggregation + result = self.aggregate(lambda x: npfunc(x, axis=self.axis)) + return result + def _cython_agg_general( self, how: str, alt=None, numeric_only: bool = True, min_count: int = -1 ): @@ -1117,6 +1165,27 @@ def _apply_filter(self, indices, dropna): OutputFrameOrSeries = TypeVar("OutputFrameOrSeries", bound=NDFrame) +def get_loc_notna(obj: "Series", *, loc: int): + """Find the value in position ``loc`` after filtering ``obj`` for nan values. + + if ``obj`` is empty or has only nan values, np.nan er returned. + + Examples + -------- + >>> ser = pd.Series([np.nan, np.nan, 1, 2, np.nan]) + >>> get_loc_notna(ser, loc=0) # get first non-na + 1.0 + >>> get_loc_notna(ser, loc=-1) # get last non-na + 2.0 + """ + x = obj.to_numpy() + x = x[notna(x)] + + if len(x) == 0: + return np.nan + return x[loc] + + class GroupBy(_GroupBy[FrameOrSeries]): """ Class for grouping and aggregating relational data. @@ -1438,105 +1507,63 @@ def size(self): result = self._obj_1d_constructor(result) return self._reindex_output(result, fill_value=0) - @classmethod - def _add_numeric_operations(cls): - """ - Add numeric operations to the GroupBy generically. - """ - - def groupby_function( - name: str, - alias: str, - npfunc, - numeric_only: bool = True, - min_count: int = -1, - ): + @doc(_groupby_agg_method_template, fname="sum", no=True, mc=0) + def sum(self, numeric_only: bool = True, min_count: int = 0): + return self._agg_general( + numeric_only=numeric_only, min_count=min_count, alias="add", npfunc=np.sum + ) - _local_template = """ - Compute %(f)s of group values. - - Parameters - ---------- - numeric_only : bool, default %(no)s - Include only float, int, boolean columns. If None, will attempt to use - everything, then use only numeric data. - min_count : int, default %(mc)s - The required number of valid values to perform the operation. If fewer - than ``min_count`` non-NA values are present the result will be NA. - - Returns - ------- - Series or DataFrame - Computed %(f)s of values within each group. - """ + @doc(_groupby_agg_method_template, fname="prod", no=True, mc=0) + def prod(self, numeric_only: bool = True, min_count: int = 0): + return self._agg_general( + numeric_only=numeric_only, min_count=min_count, alias="prod", npfunc=np.prod + ) - @Substitution(name="groupby", f=name, no=numeric_only, mc=min_count) - @Appender(_common_see_also) - @Appender(_local_template) - def func(self, numeric_only=numeric_only, min_count=min_count): - self._set_group_selection() + @doc(_groupby_agg_method_template, fname="min", no=False, mc=-1) + def min(self, numeric_only: bool = False, min_count: int = -1): + return self._agg_general( + numeric_only=numeric_only, min_count=min_count, alias="min", npfunc=np.min + ) - # try a cython aggregation if we can - try: - return self._cython_agg_general( - how=alias, - alt=npfunc, - numeric_only=numeric_only, - min_count=min_count, - ) - except DataError: - pass - except NotImplementedError as err: - if "function is not implemented for this dtype" in str( - err - ) or "category dtype not supported" in str(err): - # raised in _get_cython_function, in some cases can - # be trimmed by implementing cython funcs for more dtypes - pass - else: - raise - - # apply a non-cython aggregation - result = self.aggregate(lambda x: npfunc(x, axis=self.axis)) - return result - - set_function_name(func, name, cls) - - return func - - def first_compat(x, axis=0): - def first(x): - x = x.to_numpy() - - x = x[notna(x)] - if len(x) == 0: - return np.nan - return x[0] + @doc(_groupby_agg_method_template, fname="max", no=False, mc=-1) + def max(self, numeric_only: bool = False, min_count: int = -1): + return self._agg_general( + numeric_only=numeric_only, min_count=min_count, alias="max", npfunc=np.max + ) + @doc(_groupby_agg_method_template, fname="first", no=False, mc=-1) + def first(self, numeric_only: bool = False, min_count: int = -1): + def first_compat(x, axis: int = 0): + """Helper function for first item that isn't NA. + """ if isinstance(x, DataFrame): - return x.apply(first, axis=axis) + return x.apply(get_loc_notna, axis=axis, loc=0) else: - return first(x) + return get_loc_notna(x, loc=0) - def last_compat(x, axis=0): - def last(x): - x = x.to_numpy() - x = x[notna(x)] - if len(x) == 0: - return np.nan - return x[-1] + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="first", + npfunc=first_compat, + ) + @doc(_groupby_agg_method_template, fname="last", no=False, mc=-1) + def last(self, numeric_only: bool = False, min_count: int = -1): + def last_compat(x, axis: int = 0): + """Helper function for last item that isn't NA. + """ if isinstance(x, DataFrame): - return x.apply(last, axis=axis) + return x.apply(get_loc_notna, axis=axis, loc=-1) else: - return last(x) + return get_loc_notna(x, loc=-1) - cls.sum = groupby_function("sum", "add", np.sum, min_count=0) - cls.prod = groupby_function("prod", "prod", np.prod, min_count=0) - cls.min = groupby_function("min", "min", np.min, numeric_only=False) - cls.max = groupby_function("max", "max", np.max, numeric_only=False) - cls.first = groupby_function("first", "first", first_compat, numeric_only=False) - cls.last = groupby_function("last", "last", last_compat, numeric_only=False) + return self._agg_general( + numeric_only=numeric_only, + min_count=min_count, + alias="last", + npfunc=last_compat, + ) @Substitution(name="groupby") @Appender(_common_see_also) @@ -2636,9 +2663,6 @@ def _reindex_output( return output.reset_index(drop=True) -GroupBy._add_numeric_operations() - - @doc(GroupBy) def get_groupby( obj: NDFrame, diff --git a/pandas/util/_decorators.py b/pandas/util/_decorators.py index 92bfce7ec9c83..0fac0ce4d9bd0 100644 --- a/pandas/util/_decorators.py +++ b/pandas/util/_decorators.py @@ -329,7 +329,7 @@ def wrapper(*args, **kwargs) -> Callable[..., Any]: return decorate -def doc(*args: Union[str, Callable], **kwargs: str) -> Callable[[F], F]: +def doc(*args: Union[str, Callable], **kwargs: Any) -> Callable[[F], F]: """ A decorator take docstring templates, concatenate them and perform string substitution on it. @@ -345,8 +345,8 @@ def doc(*args: Union[str, Callable], **kwargs: str) -> Callable[[F], F]: *args : str or callable The string / docstring / docstring template to be appended in order after default docstring under function. - **kwargs : str - The string which would be used to format docstring template. + **kwargs : Any + The objects which would be used to format docstring template. """ def decorator(func: F) -> F:
This makes two things: * makes mypy know the signatures of the agg methods (``sum``, ``prod``, etc.) on Groupby * gives the agg methods on Groupby objects a return type This ensues that mypy and IDE's always know that the return type of e.g. ``df.groupby('a').sum()`` is a DataFrame, which is nice when chaining.
https://api.github.com/repos/pandas-dev/pandas/pulls/32302
2020-02-27T18:14:32Z
2020-05-15T21:20:59Z
null
2020-05-15T21:21:23Z
REF/TST: misplaced tests in test_timeseries, test_timezones
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 72fc0010cbbce..a42cfc6a214ad 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -7,6 +7,7 @@ import numpy.ma as ma import numpy.ma.mrecords as mrecords import pytest +import pytz from pandas.compat import is_platform_little_endian from pandas.compat.numpy import _is_numpy_dev @@ -2389,6 +2390,12 @@ def test_from_records_series_list_dict(self): result = DataFrame.from_records(data) tm.assert_frame_equal(result, expected) + def test_frame_from_records_utc(self): + rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)} + + # it works + DataFrame.from_records([rec], index="begin_time") + def test_to_frame_with_falsey_names(self): # GH 16114 result = Series(name=0, dtype=object).to_frame().dtypes @@ -2460,6 +2467,18 @@ def test_construct_with_two_categoricalindex_series(self): ) tm.assert_frame_equal(result, expected) + def test_from_M8_structured(self): + dates = [(datetime(2012, 9, 9, 0, 0), datetime(2012, 9, 8, 15, 10))] + arr = np.array(dates, dtype=[("Date", "M8[us]"), ("Forecasting", "M8[us]")]) + df = DataFrame(arr) + + assert df["Date"][0] == dates[0][0] + assert df["Forecasting"][0] == dates[0][1] + + s = Series(arr["Date"]) + assert isinstance(s[0], Timestamp) + assert s[0] == dates[0][0] + class TestDataFrameConstructorWithDatetimeTZ: def test_from_dict(self): diff --git a/pandas/tests/frame/test_timezones.py b/pandas/tests/frame/test_timezones.py index 00a253d4e5ad0..dfd4fb1855383 100644 --- a/pandas/tests/frame/test_timezones.py +++ b/pandas/tests/frame/test_timezones.py @@ -1,8 +1,6 @@ """ Tests for DataFrame timezone-related methods """ -from datetime import datetime - import numpy as np import pytest import pytz @@ -53,12 +51,6 @@ def test_frame_values_with_tz(self): result = df.values tm.assert_numpy_array_equal(result, expected) - def test_frame_from_records_utc(self): - rec = {"datum": 1.5, "begin_time": datetime(2006, 4, 27, tzinfo=pytz.utc)} - - # it works - DataFrame.from_records([rec], index="begin_time") - def test_frame_join_tzaware(self): test1 = DataFrame( np.zeros((6, 3)), diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index 1a72ef2bdf1aa..6217f225d496e 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -5,7 +5,7 @@ import pytest import pandas as pd -from pandas import DataFrame, DatetimeIndex, Index, Timestamp, date_range, offsets +from pandas import DataFrame, DatetimeIndex, Index, NaT, Timestamp, date_range, offsets import pandas._testing as tm randn = np.random.randn @@ -20,6 +20,24 @@ def test_roundtrip_pickle_with_tz(self): unpickled = tm.round_trip_pickle(index) tm.assert_index_equal(index, unpickled) + def test_pickle(self): + + # GH#4606 + p = tm.round_trip_pickle(NaT) + assert p is NaT + + idx = pd.to_datetime(["2013-01-01", NaT, "2014-01-06"]) + idx_p = tm.round_trip_pickle(idx) + assert idx_p[0] == idx[0] + assert idx_p[1] is NaT + assert idx_p[2] == idx[2] + + # GH#11002 + # don't infer freq + idx = date_range("1750-1-1", "2050-1-1", freq="7D") + idx_p = tm.round_trip_pickle(idx) + tm.assert_index_equal(idx, idx_p) + def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self): # GH7774 index = date_range("20130101", periods=3, tz="US/Eastern") diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index d58ecbad4c1b3..ba069f5245de4 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -347,6 +347,41 @@ def test_datetimeindex_diff(self, sort): dti2 = date_range(freq="Q-JAN", start=datetime(1997, 12, 31), periods=98) assert len(dti1.difference(dti2, sort)) == 2 + @pytest.mark.parametrize("tz", [None, "Asia/Tokyo", "US/Eastern"]) + def test_setops_preserve_freq(self, tz): + rng = date_range("1/1/2000", "1/1/2002", name="idx", tz=tz) + + result = rng[:50].union(rng[50:100]) + assert result.name == rng.name + assert result.freq == rng.freq + assert result.tz == rng.tz + + result = rng[:50].union(rng[30:100]) + assert result.name == rng.name + assert result.freq == rng.freq + assert result.tz == rng.tz + + result = rng[:50].union(rng[60:100]) + assert result.name == rng.name + assert result.freq is None + assert result.tz == rng.tz + + result = rng[:50].intersection(rng[25:75]) + assert result.name == rng.name + assert result.freqstr == "D" + assert result.tz == rng.tz + + nofreq = DatetimeIndex(list(rng[25:75]), name="other") + result = rng[:50].union(nofreq) + assert result.name is None + assert result.freq == rng.freq + assert result.tz == rng.tz + + result = rng[:50].intersection(nofreq) + assert result.name is None + assert result.freq == rng.freq + assert result.tz == rng.tz + class TestBusinessDatetimeIndex: def setup_method(self, method): diff --git a/pandas/tests/indexes/multi/test_get_level_values.py b/pandas/tests/indexes/multi/test_get_level_values.py new file mode 100644 index 0000000000000..6f0b23c1ef4a0 --- /dev/null +++ b/pandas/tests/indexes/multi/test_get_level_values.py @@ -0,0 +1,13 @@ +from pandas import MultiIndex, Timestamp, date_range + + +class TestGetLevelValues: + def test_get_level_values_box_datetime64(self): + + dates = date_range("1/1/2000", periods=4) + levels = [dates, [0, 1]] + codes = [[0, 0, 1, 1, 2, 2, 3, 3], [0, 1, 0, 1, 0, 1, 0, 1]] + + index = MultiIndex(levels=levels, codes=codes) + + assert isinstance(index.get_level_values(0)[0], Timestamp) diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index b0d06793dbe13..1a794f8656abe 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -1421,3 +1421,10 @@ def test_constructor_tz_mixed_data(self): result = Series(dt_list) expected = Series(dt_list, dtype=object) tm.assert_series_equal(result, expected) + + def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture): + # GH#25843 + tz = tz_aware_fixture + result = Series([Timestamp("2019", tz=tz)], dtype="datetime64[ns]") + expected = Series([Timestamp("2019")]) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index efaf5f806e935..c4b2e2edd845a 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -1,21 +1,11 @@ -from datetime import datetime from io import StringIO import numpy as np -import pytest from pandas._libs.tslib import iNaT import pandas as pd -from pandas import ( - DataFrame, - DatetimeIndex, - NaT, - Series, - Timestamp, - date_range, - timedelta_range, -) +from pandas import DataFrame, DatetimeIndex, Series, date_range, timedelta_range import pandas._testing as tm @@ -225,82 +215,6 @@ def test_asfreq_resample_set_correct_freq(self): # does .resample() set .freq correctly? assert df.resample("D").asfreq().index.freq == "D" - def test_pickle(self): - - # GH4606 - p = tm.round_trip_pickle(NaT) - assert p is NaT - - idx = pd.to_datetime(["2013-01-01", NaT, "2014-01-06"]) - idx_p = tm.round_trip_pickle(idx) - assert idx_p[0] == idx[0] - assert idx_p[1] is NaT - assert idx_p[2] == idx[2] - - # GH11002 - # don't infer freq - idx = date_range("1750-1-1", "2050-1-1", freq="7D") - idx_p = tm.round_trip_pickle(idx) - tm.assert_index_equal(idx, idx_p) - - @pytest.mark.parametrize("tz", [None, "Asia/Tokyo", "US/Eastern"]) - def test_setops_preserve_freq(self, tz): - rng = date_range("1/1/2000", "1/1/2002", name="idx", tz=tz) - - result = rng[:50].union(rng[50:100]) - assert result.name == rng.name - assert result.freq == rng.freq - assert result.tz == rng.tz - - result = rng[:50].union(rng[30:100]) - assert result.name == rng.name - assert result.freq == rng.freq - assert result.tz == rng.tz - - result = rng[:50].union(rng[60:100]) - assert result.name == rng.name - assert result.freq is None - assert result.tz == rng.tz - - result = rng[:50].intersection(rng[25:75]) - assert result.name == rng.name - assert result.freqstr == "D" - assert result.tz == rng.tz - - nofreq = DatetimeIndex(list(rng[25:75]), name="other") - result = rng[:50].union(nofreq) - assert result.name is None - assert result.freq == rng.freq - assert result.tz == rng.tz - - result = rng[:50].intersection(nofreq) - assert result.name is None - assert result.freq == rng.freq - assert result.tz == rng.tz - - def test_from_M8_structured(self): - dates = [(datetime(2012, 9, 9, 0, 0), datetime(2012, 9, 8, 15, 10))] - arr = np.array(dates, dtype=[("Date", "M8[us]"), ("Forecasting", "M8[us]")]) - df = DataFrame(arr) - - assert df["Date"][0] == dates[0][0] - assert df["Forecasting"][0] == dates[0][1] - - s = Series(arr["Date"]) - assert isinstance(s[0], Timestamp) - assert s[0] == dates[0][0] - - def test_get_level_values_box(self): - from pandas import MultiIndex - - dates = date_range("1/1/2000", periods=4) - levels = [dates, [0, 1]] - codes = [[0, 0, 1, 1, 2, 2, 3, 3], [0, 1, 0, 1, 0, 1, 0, 1]] - - index = MultiIndex(levels=levels, codes=codes) - - assert isinstance(index.get_level_values(0)[0], Timestamp) - def test_view_tz(self): # GH#24024 ser = pd.Series(pd.date_range("2000", periods=4, tz="US/Central")) diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py index a45c0bf8cf154..ae4fd12abdb88 100644 --- a/pandas/tests/series/test_timezones.py +++ b/pandas/tests/series/test_timezones.py @@ -76,10 +76,3 @@ def test_tz_localize_convert_copy_inplace_mutate(self, copy, method, tz): np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=tz) ) tm.assert_series_equal(result, expected) - - def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture): - # GH 25843 - tz = tz_aware_fixture - result = Series([Timestamp("2019", tz=tz)], dtype="datetime64[ns]") - expected = Series([Timestamp("2019")]) - tm.assert_series_equal(result, expected)
https://api.github.com/repos/pandas-dev/pandas/pulls/32300
2020-02-27T17:59:40Z
2020-02-27T18:45:19Z
2020-02-27T18:45:19Z
2020-02-27T20:05:29Z
CLN: Using sum instead of looping
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 7a18429f21a18..b66d67fedee33 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -120,13 +120,9 @@ def memory_usage_of_objects(arr: object[:]) -> int64_t: """ i: Py_ssize_t n: Py_ssize_t - size: int64_t - size = 0 n = len(arr) - for i in range(n): - size += arr[i].__sizeof__() - return size + return sum(arr[i].__sizeof__() for i in range(n)) # ----------------------------------------------------------------------
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32299
2020-02-27T17:32:41Z
2020-02-28T15:38:52Z
null
2020-02-29T10:28:28Z
fixed minor docstring typo
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b6b6a4fe74ed5..6f5aef4884ccd 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -832,7 +832,6 @@ def style(self) -> "Styler": Returns a Styler object. Contains methods for building a styled HTML representation of the DataFrame. - a styled HTML representation fo the DataFrame. See Also --------
- [X] passes `black pandas` - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/32298
2020-02-27T16:41:15Z
2020-02-27T17:53:35Z
2020-02-27T17:53:35Z
2020-02-27T17:53:49Z
TST: remove invalid internals tests
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 329bfdf543c62..e397167e4881f 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -711,16 +711,16 @@ def combine(self, blocks, copy=True): return type(self)(new_blocks, axes, do_integrity_check=False) def get_slice(self, slobj: slice, axis: int = 0): - if axis >= self.ndim: - raise IndexError("Requested axis not found in manager") if axis == 0: new_blocks = self._slice_take_blocks_ax0(slobj) - else: + elif axis == 1: _slicer = [slice(None)] * (axis + 1) _slicer[axis] = slobj slicer = tuple(_slicer) new_blocks = [blk.getitem_block(slicer) for blk in self.blocks] + else: + raise IndexError("Requested axis not found in manager") new_axes = list(self.axes) new_axes[axis] = new_axes[axis][slobj] diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 27b0500983afd..0b9d84d261708 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -748,11 +748,6 @@ class TestIndexing: create_mgr("a,b,c,d,e,f: i8", item_shape=(N,)), create_mgr("a,b: f8; c,d: i8; e,f: string", item_shape=(N,)), create_mgr("a,b: f8; c,d: i8; e,f: f8", item_shape=(N,)), - # 3-dim - create_mgr("a,b,c,d,e,f: f8", item_shape=(N, N)), - create_mgr("a,b,c,d,e,f: i8", item_shape=(N, N)), - create_mgr("a,b: f8; c,d: i8; e,f: string", item_shape=(N, N)), - create_mgr("a,b: f8; c,d: i8; e,f: f8", item_shape=(N, N)), ] @pytest.mark.parametrize("mgr", MANAGERS) @@ -775,6 +770,7 @@ def assert_slice_ok(mgr, axis, slobj): ) tm.assert_index_equal(mgr.axes[axis][slobj], sliced.axes[axis]) + assert mgr.ndim <= 2, mgr.ndim for ax in range(mgr.ndim): # slice assert_slice_ok(mgr, ax, slice(None))
Delay validation check in get_slice is also nice.
https://api.github.com/repos/pandas-dev/pandas/pulls/32297
2020-02-27T16:31:16Z
2020-02-28T11:49:24Z
2020-02-28T11:49:24Z
2020-02-28T16:16:32Z
ENH: Move corrwith from transformation to reduction kernels in groupby.base
diff --git a/pandas/core/groupby/base.py b/pandas/core/groupby/base.py index 700d8d503d086..363286704ba95 100644 --- a/pandas/core/groupby/base.py +++ b/pandas/core/groupby/base.py @@ -98,6 +98,7 @@ def _gotitem(self, key, ndim, subset=None): [ "all", "any", + "corrwith", "count", "first", "idxmax", @@ -132,7 +133,6 @@ def _gotitem(self, key, ndim, subset=None): [ "backfill", "bfill", - "corrwith", "cumcount", "cummax", "cummin", diff --git a/pandas/tests/groupby/test_categorical.py b/pandas/tests/groupby/test_categorical.py index 9b07269811d8e..9ea5252b91e13 100644 --- a/pandas/tests/groupby/test_categorical.py +++ b/pandas/tests/groupby/test_categorical.py @@ -1262,6 +1262,9 @@ def test_series_groupby_on_2_categoricals_unobserved( if reduction_func == "ngroup": pytest.skip("ngroup is not truly a reduction") + if reduction_func == "corrwith": # GH 32293 + pytest.xfail("TODO: implemented SeriesGroupBy.corrwith") + df = pd.DataFrame( { "cat_1": pd.Categorical(list("AABB"), categories=list("ABCD")), diff --git a/pandas/tests/groupby/test_transform.py b/pandas/tests/groupby/test_transform.py index 740103eec185a..2295eb2297fa6 100644 --- a/pandas/tests/groupby/test_transform.py +++ b/pandas/tests/groupby/test_transform.py @@ -327,9 +327,9 @@ def test_transform_transformation_func(transformation_func): } ) - if transformation_func in ["pad", "backfill", "tshift", "corrwith", "cumcount"]: + if transformation_func in ["pad", "backfill", "tshift", "cumcount"]: # These transformation functions are not yet covered in this test - pytest.xfail("See GH 31269 and GH 31270") + pytest.xfail("See GH 31269") elif _is_numpy_dev and transformation_func in ["cummin"]: pytest.xfail("https://github.com/pandas-dev/pandas/issues/31992") elif transformation_func == "fillna": @@ -1093,8 +1093,10 @@ def test_transform_agg_by_name(reduction_func, obj): pytest.xfail("TODO: g.transform('ngroup') doesn't work") if func == "size": # GH#27469 pytest.xfail("TODO: g.transform('size') doesn't work") + if func == "corrwith" and isinstance(obj, Series): # GH#32293 + pytest.xfail("TODO: implement SeriesGroupBy.corrwith") - args = {"nth": [0], "quantile": [0.5]}.get(func, []) + args = {"nth": [0], "quantile": [0.5], "corrwith": [obj]}.get(func, []) result = g.transform(func, *args)
- [x] closes #31270 - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/32294
2020-02-27T13:10:59Z
2020-03-12T04:47:13Z
2020-03-12T04:47:13Z
2020-05-29T10:47:56Z
TST: Use `sort` fixture in more places
diff --git a/pandas/tests/indexes/base_class/test_setops.py b/pandas/tests/indexes/base_class/test_setops.py index ec3ef8050967c..77b5e2780464d 100644 --- a/pandas/tests/indexes/base_class/test_setops.py +++ b/pandas/tests/indexes/base_class/test_setops.py @@ -91,7 +91,6 @@ def test_union_sort_other_incomparable_true(self): with pytest.raises(TypeError, match=".*"): idx.union(idx[:1], sort=True) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection_base(self, sort): # (same results for py2 and py3 but sortedness not tested elsewhere) index = Index([0, "a", 1, "b", 2, "c"]) @@ -103,7 +102,6 @@ def test_intersection_base(self, sort): tm.assert_index_equal(result, expected) @pytest.mark.parametrize("klass", [np.array, Series, list]) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection_different_type_base(self, klass, sort): # GH 10149 index = Index([0, "a", 1, "b", 2, "c"]) @@ -123,7 +121,6 @@ def test_intersection_equal_sort(self): tm.assert_index_equal(idx.intersection(idx, sort=False), idx) tm.assert_index_equal(idx.intersection(idx, sort=None), idx) - @pytest.mark.parametrize("sort", [None, False]) def test_difference_base(self, sort): # (same results for py2 and py3 but sortedness not tested elsewhere) index = Index([0, "a", 1, "b", 2, "c"]) diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index dca317a9eb03f..69451501fd7bd 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -512,7 +512,6 @@ def test_union_base(self, indices): with pytest.raises(TypeError, match=msg): first.union([1, 2, 3]) - @pytest.mark.parametrize("sort", [None, False]) def test_difference_base(self, sort, indices): first = indices[2:] second = indices[:4] diff --git a/pandas/tests/indexes/conftest.py b/pandas/tests/indexes/conftest.py new file mode 100644 index 0000000000000..a9fb228073ab4 --- /dev/null +++ b/pandas/tests/indexes/conftest.py @@ -0,0 +1,18 @@ +import pytest + + +@pytest.fixture(params=[None, False]) +def sort(request): + """ + Valid values for the 'sort' parameter used in the Index + setops methods (intersection, union, etc.) + + Caution: + Don't confuse this one with the "sort" fixture used + for DataFrame.append or concat. That one has + parameters [True, False]. + + We can't combine them as sort=True is not permitted + in in the Index setops methods. + """ + return request.param diff --git a/pandas/tests/indexes/datetimes/test_join.py b/pandas/tests/indexes/datetimes/test_join.py index f2f88fd7dc90c..9a9c94fa19e6d 100644 --- a/pandas/tests/indexes/datetimes/test_join.py +++ b/pandas/tests/indexes/datetimes/test_join.py @@ -64,7 +64,6 @@ def test_join_utc_convert(self, join_type): assert isinstance(result, DatetimeIndex) assert result.tz.zone == "UTC" - @pytest.mark.parametrize("sort", [None, False]) def test_datetimeindex_union_join_empty(self, sort): dti = date_range(start="1/1/2001", end="2/1/2001", freq="D") empty = Index([]) diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index d58ecbad4c1b3..240da26cb19dc 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -33,7 +33,6 @@ class TestDatetimeIndexSetOps: ] # TODO: moved from test_datetimelike; dedup with version below - @pytest.mark.parametrize("sort", [None, False]) def test_union2(self, sort): everything = tm.makeDateIndex(10) first = everything[:5] @@ -42,7 +41,6 @@ def test_union2(self, sort): tm.assert_index_equal(union, everything) @pytest.mark.parametrize("box", [np.array, Series, list]) - @pytest.mark.parametrize("sort", [None, False]) def test_union3(self, sort, box): everything = tm.makeDateIndex(10) first = everything[:5] @@ -57,7 +55,6 @@ def test_union3(self, sort, box): tm.assert_index_equal(result, expected) @pytest.mark.parametrize("tz", tz) - @pytest.mark.parametrize("sort", [None, False]) def test_union(self, tz, sort): rng1 = pd.date_range("1/1/2000", freq="D", periods=5, tz=tz) other1 = pd.date_range("1/6/2000", freq="D", periods=5, tz=tz) @@ -89,7 +86,6 @@ def test_union(self, tz, sort): else: tm.assert_index_equal(result_union, exp_notsorted) - @pytest.mark.parametrize("sort", [None, False]) def test_union_coverage(self, sort): idx = DatetimeIndex(["2000-01-03", "2000-01-01", "2000-01-02"]) ordered = DatetimeIndex(idx.sort_values(), freq="infer") @@ -100,7 +96,6 @@ def test_union_coverage(self, sort): tm.assert_index_equal(result, ordered) assert result.freq == ordered.freq - @pytest.mark.parametrize("sort", [None, False]) def test_union_bug_1730(self, sort): rng_a = date_range("1/1/2012", periods=4, freq="3H") rng_b = date_range("1/1/2012", periods=4, freq="4H") @@ -113,7 +108,6 @@ def test_union_bug_1730(self, sort): exp = DatetimeIndex(exp) tm.assert_index_equal(result, exp) - @pytest.mark.parametrize("sort", [None, False]) def test_union_bug_1745(self, sort): left = DatetimeIndex(["2012-05-11 15:19:49.695000"]) right = DatetimeIndex( @@ -137,7 +131,6 @@ def test_union_bug_1745(self, sort): exp = exp.sort_values() tm.assert_index_equal(result, exp) - @pytest.mark.parametrize("sort", [None, False]) def test_union_bug_4564(self, sort): from pandas import DateOffset @@ -152,7 +145,6 @@ def test_union_bug_4564(self, sort): exp = DatetimeIndex(exp) tm.assert_index_equal(result, exp) - @pytest.mark.parametrize("sort", [None, False]) def test_union_freq_both_none(self, sort): # GH11086 expected = bdate_range("20150101", periods=10) @@ -188,7 +180,6 @@ def test_union_dataframe_index(self): exp = pd.date_range("1/1/1980", "1/1/2012", freq="MS") tm.assert_index_equal(df.index, exp) - @pytest.mark.parametrize("sort", [None, False]) def test_union_with_DatetimeIndex(self, sort): i1 = Int64Index(np.arange(0, 20, 2)) i2 = date_range(start="2012-01-03 00:00:00", periods=10, freq="D") @@ -218,7 +209,6 @@ def test_intersection2(self): @pytest.mark.parametrize( "tz", [None, "Asia/Tokyo", "US/Eastern", "dateutil/US/Pacific"] ) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection(self, tz, sort): # GH 4690 (with tz) base = date_range("6/1/2000", "6/30/2000", freq="D", name="idx") @@ -298,7 +288,6 @@ def test_intersection_bug_1708(self): assert len(result) == 0 @pytest.mark.parametrize("tz", tz) - @pytest.mark.parametrize("sort", [None, False]) def test_difference(self, tz, sort): rng_dates = ["1/2/2000", "1/3/2000", "1/1/2000", "1/4/2000", "1/5/2000"] @@ -324,7 +313,6 @@ def test_difference(self, tz, sort): expected = expected.sort_values() tm.assert_index_equal(result_diff, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_difference_freq(self, sort): # GH14323: difference of DatetimeIndex should not preserve frequency @@ -341,7 +329,6 @@ def test_difference_freq(self, sort): tm.assert_index_equal(idx_diff, expected) tm.assert_attr_equal("freq", idx_diff, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_datetimeindex_diff(self, sort): dti1 = date_range(freq="Q-JAN", start=datetime(1997, 12, 31), periods=100) dti2 = date_range(freq="Q-JAN", start=datetime(1997, 12, 31), periods=98) @@ -352,7 +339,6 @@ class TestBusinessDatetimeIndex: def setup_method(self, method): self.rng = bdate_range(START, END) - @pytest.mark.parametrize("sort", [None, False]) def test_union(self, sort): # overlapping left = self.rng[:10] @@ -388,7 +374,6 @@ def test_union(self, sort): the_union = self.rng.union(rng, sort=sort) assert isinstance(the_union, DatetimeIndex) - @pytest.mark.parametrize("sort", [None, False]) def test_union_not_cacheable(self, sort): rng = date_range("1/1/2000", periods=50, freq=Minute()) rng1 = rng[10:] @@ -431,7 +416,6 @@ def test_intersection_bug(self): result = a.intersection(b) tm.assert_index_equal(result, b) - @pytest.mark.parametrize("sort", [None, False]) def test_month_range_union_tz_pytz(self, sort): from pytz import timezone @@ -449,7 +433,6 @@ def test_month_range_union_tz_pytz(self, sort): early_dr.union(late_dr, sort=sort) @td.skip_if_windows_python_3 - @pytest.mark.parametrize("sort", [None, False]) def test_month_range_union_tz_dateutil(self, sort): from pandas._libs.tslibs.timezones import dateutil_gettz @@ -471,7 +454,6 @@ class TestCustomDatetimeIndex: def setup_method(self, method): self.rng = bdate_range(START, END, freq="C") - @pytest.mark.parametrize("sort", [None, False]) def test_union(self, sort): # overlapping left = self.rng[:10] diff --git a/pandas/tests/indexes/interval/test_setops.py b/pandas/tests/indexes/interval/test_setops.py index d9359d717de1d..e3e5070064aff 100644 --- a/pandas/tests/indexes/interval/test_setops.py +++ b/pandas/tests/indexes/interval/test_setops.py @@ -10,11 +10,6 @@ def name(request): return request.param -@pytest.fixture(params=[None, False]) -def sort(request): - return request.param - - def monotonic_index(start, end, dtype="int64", closed="right"): return IntervalIndex.from_breaks(np.arange(start, end, dtype=dtype), closed=closed) @@ -153,7 +148,6 @@ def test_symmetric_difference(self, closed, sort): @pytest.mark.parametrize( "op_name", ["union", "intersection", "difference", "symmetric_difference"] ) - @pytest.mark.parametrize("sort", [None, False]) def test_set_incompatible_types(self, closed, op_name, sort): index = monotonic_index(0, 11, closed=closed) set_op = getattr(index, op_name) diff --git a/pandas/tests/indexes/multi/test_setops.py b/pandas/tests/indexes/multi/test_setops.py index 627127f7b5b53..d7d0ff4c411aa 100644 --- a/pandas/tests/indexes/multi/test_setops.py +++ b/pandas/tests/indexes/multi/test_setops.py @@ -7,7 +7,6 @@ @pytest.mark.parametrize("case", [0.5, "xxx"]) -@pytest.mark.parametrize("sort", [None, False]) @pytest.mark.parametrize( "method", ["intersection", "union", "difference", "symmetric_difference"] ) @@ -18,7 +17,6 @@ def test_set_ops_error_cases(idx, case, sort, method): getattr(idx, method)(case, sort=sort) -@pytest.mark.parametrize("sort", [None, False]) @pytest.mark.parametrize("klass", [MultiIndex, np.array, Series, list]) def test_intersection_base(idx, sort, klass): first = idx[2::-1] # first 3 elements reversed @@ -39,7 +37,6 @@ def test_intersection_base(idx, sort, klass): first.intersection([1, 2, 3], sort=sort) -@pytest.mark.parametrize("sort", [None, False]) @pytest.mark.parametrize("klass", [MultiIndex, np.array, Series, list]) def test_union_base(idx, sort, klass): first = idx[::-1] @@ -60,7 +57,6 @@ def test_union_base(idx, sort, klass): first.union([1, 2, 3], sort=sort) -@pytest.mark.parametrize("sort", [None, False]) def test_difference_base(idx, sort): second = idx[4:] answer = idx[:4] @@ -83,7 +79,6 @@ def test_difference_base(idx, sort): idx.difference([1, 2, 3], sort=sort) -@pytest.mark.parametrize("sort", [None, False]) def test_symmetric_difference(idx, sort): first = idx[1:] second = idx[:-1] @@ -123,7 +118,6 @@ def test_empty(idx): assert idx[:0].empty -@pytest.mark.parametrize("sort", [None, False]) def test_difference(idx, sort): first = idx @@ -234,7 +228,6 @@ def test_difference_sort_incomparable_true(): idx.difference(other, sort=True) -@pytest.mark.parametrize("sort", [None, False]) def test_union(idx, sort): piece1 = idx[:5][::-1] piece2 = idx[3:] @@ -270,7 +263,6 @@ def test_union(idx, sort): # assert result.equals(result2) -@pytest.mark.parametrize("sort", [None, False]) def test_intersection(idx, sort): piece1 = idx[:5][::-1] piece2 = idx[3:] diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py index 647d56d33f312..88f1687b8bb10 100644 --- a/pandas/tests/indexes/period/test_setops.py +++ b/pandas/tests/indexes/period/test_setops.py @@ -13,7 +13,6 @@ def _permute(obj): class TestPeriodIndex: - @pytest.mark.parametrize("sort", [None, False]) def test_union(self, sort): # union other1 = period_range("1/1/2000", freq="D", periods=5) @@ -134,7 +133,6 @@ def test_union(self, sort): expected = expected.sort_values() tm.assert_index_equal(result_union, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_union_misc(self, sort): index = period_range("1/1/2000", "1/20/2000", freq="D") @@ -165,7 +163,6 @@ def test_union_dataframe_index(self): exp = period_range("1/1/1980", "1/1/2012", freq="M") tm.assert_index_equal(df.index, exp) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection(self, sort): index = period_range("1/1/2000", "1/20/2000", freq="D") @@ -190,7 +187,6 @@ def test_intersection(self, sort): with pytest.raises(IncompatibleFrequency): index.intersection(index3, sort=sort) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection_cases(self, sort): base = period_range("6/1/2000", "6/30/2000", freq="D", name="idx") @@ -259,7 +255,6 @@ def test_intersection_cases(self, sort): result = rng.intersection(rng[0:0]) assert len(result) == 0 - @pytest.mark.parametrize("sort", [None, False]) def test_difference(self, sort): # diff period_rng = ["1/3/2000", "1/2/2000", "1/1/2000", "1/5/2000", "1/4/2000"] @@ -324,7 +319,6 @@ def test_difference(self, sort): expected = expected.sort_values() tm.assert_index_equal(result_difference, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_difference_freq(self, sort): # GH14323: difference of Period MUST preserve frequency # but the ability to union results must be preserved diff --git a/pandas/tests/indexes/ranges/test_setops.py b/pandas/tests/indexes/ranges/test_setops.py index 8e749e0752087..5b565310cfb9c 100644 --- a/pandas/tests/indexes/ranges/test_setops.py +++ b/pandas/tests/indexes/ranges/test_setops.py @@ -8,7 +8,6 @@ class TestRangeIndexSetOps: - @pytest.mark.parametrize("sort", [None, False]) def test_intersection(self, sort): # intersect with Int64Index index = RangeIndex(start=0, stop=20, step=2) @@ -79,7 +78,6 @@ def test_intersection(self, sort): expected = RangeIndex(0, 0, 1) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [False, None]) def test_union_noncomparable(self, sort): # corner case, non-Int64Index index = RangeIndex(start=0, stop=20, step=2) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 22f6af2af4aed..0c4a790646a81 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -681,7 +681,6 @@ def test_empty_fancy_raises(self, index): with pytest.raises(IndexError, match=msg): index[empty_farr] - @pytest.mark.parametrize("sort", [None, False]) def test_intersection(self, index, sort): first = index[:20] second = index[:10] @@ -702,7 +701,6 @@ def test_intersection(self, index, sort): (Index([3, 4, 5, 6, 7]), False), ], ) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection_name_preservation(self, index2, keeps_name, sort): index1 = Index([1, 2, 3, 4, 5], name="index") expected = Index([3, 4, 5]) @@ -718,7 +716,6 @@ def test_intersection_name_preservation(self, index2, keeps_name, sort): "first_name,second_name,expected_name", [("A", "A", "A"), ("A", "B", None), (None, "B", None)], ) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection_name_preservation2( self, index, first_name, second_name, expected_name, sort ): @@ -736,7 +733,6 @@ def test_intersection_name_preservation2( (Index([4, 7, 6, 5, 3], name="other"), False), ], ) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection_monotonic(self, index2, keeps_name, sort): index1 = Index([5, 3, 2, 4, 1], name="index") expected = Index([5, 3, 4]) @@ -753,7 +749,6 @@ def test_intersection_monotonic(self, index2, keeps_name, sort): "index2,expected_arr", [(Index(["B", "D"]), ["B"]), (Index(["B", "D", "A"]), ["A", "B", "A"])], ) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection_non_monotonic_non_unique(self, index2, expected_arr, sort): # non-monotonic non-unique index1 = Index(["A", "B", "A", "C"]) @@ -763,7 +758,6 @@ def test_intersection_non_monotonic_non_unique(self, index2, expected_arr, sort) expected = expected.sort_values() tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_intersect_str_dates(self, sort): dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)] @@ -780,7 +774,6 @@ def test_intersection_equal_sort_true(self): sorted_ = pd.Index(["a", "b", "c"]) tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_) - @pytest.mark.parametrize("sort", [None, False]) def test_chained_union(self, sort): # Chained unions handles names correctly i1 = Index([1, 2], name="i1") @@ -797,7 +790,6 @@ def test_chained_union(self, sort): expected = j1.union(j2, sort=sort).union(j3, sort=sort) tm.assert_index_equal(union, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_union(self, index, sort): first = index[5:20] second = index[:10] @@ -835,7 +827,6 @@ def test_union_sort_special_true(self, slice_): tm.assert_index_equal(result, expected) @pytest.mark.parametrize("klass", [np.array, Series, list]) - @pytest.mark.parametrize("sort", [None, False]) def test_union_from_iterables(self, index, klass, sort): # GH 10149 first = index[5:20] @@ -848,7 +839,6 @@ def test_union_from_iterables(self, index, klass, sort): tm.assert_index_equal(result, everything.sort_values()) assert tm.equalContents(result, everything) - @pytest.mark.parametrize("sort", [None, False]) def test_union_identity(self, index, sort): first = index[5:20] @@ -870,7 +860,6 @@ def test_union_identity(self, index, sort): "first_name, second_name, expected_name", [("A", "B", None), (None, "B", None), ("A", None, None)], ) - @pytest.mark.parametrize("sort", [None, False]) def test_union_name_preservation( self, first_list, second_list, first_name, second_name, expected_name, sort ): @@ -887,7 +876,6 @@ def test_union_name_preservation( expected = Index(vals, name=expected_name) assert tm.equalContents(union, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_union_dt_as_obj(self, sort): # TODO: Replace with fixturesult index = self.create_index() @@ -1022,7 +1010,6 @@ def test_append_empty_preserve_name(self, name, expected): assert result.name == expected @pytest.mark.parametrize("second_name,expected", [(None, None), ("name", "name")]) - @pytest.mark.parametrize("sort", [None, False]) def test_difference_name_preservation(self, index, second_name, expected, sort): first = index[5:20] second = index[:10] @@ -1039,7 +1026,6 @@ def test_difference_name_preservation(self, index, second_name, expected, sort): else: assert result.name == expected - @pytest.mark.parametrize("sort", [None, False]) def test_difference_empty_arg(self, index, sort): first = index[5:20] first.name == "name" @@ -1048,7 +1034,6 @@ def test_difference_empty_arg(self, index, sort): assert tm.equalContents(result, first) assert result.name == first.name - @pytest.mark.parametrize("sort", [None, False]) def test_difference_identity(self, index, sort): first = index[5:20] first.name == "name" @@ -1057,7 +1042,6 @@ def test_difference_identity(self, index, sort): assert len(result) == 0 assert result.name == first.name - @pytest.mark.parametrize("sort", [None, False]) def test_difference_sort(self, index, sort): first = index[5:20] second = index[:10] @@ -1070,7 +1054,6 @@ def test_difference_sort(self, index, sort): tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_symmetric_difference(self, sort): # smoke index1 = Index([5, 2, 3, 4], name="index1") @@ -1118,7 +1101,6 @@ def test_difference_incomparable_true(self, opname): with pytest.raises(TypeError, match="Cannot compare"): op(a) - @pytest.mark.parametrize("sort", [None, False]) def test_symmetric_difference_mi(self, sort): index1 = MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])) index2 = MultiIndex.from_tuples([("foo", 1), ("bar", 3)]) @@ -1136,7 +1118,6 @@ def test_symmetric_difference_mi(self, sort): (Index([0, 1]), Index([np.nan, 2.0, 3.0, 0.0])), ], ) - @pytest.mark.parametrize("sort", [None, False]) def test_symmetric_difference_missing(self, index2, expected, sort): # GH 13514 change: {nan} - {nan} == {} # (GH 6444, sorting of nans, is no longer an issue) @@ -1147,7 +1128,6 @@ def test_symmetric_difference_missing(self, index2, expected, sort): expected = expected.sort_values() tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_symmetric_difference_non_index(self, sort): index1 = Index([1, 2, 3, 4], name="index1") index2 = np.array([2, 3, 4, 5]) @@ -1160,7 +1140,6 @@ def test_symmetric_difference_non_index(self, sort): assert tm.equalContents(result, expected) assert result.name == "new_name" - @pytest.mark.parametrize("sort", [None, False]) def test_difference_type(self, indices, sort): # GH 20040 # If taking difference of a set and itself, it @@ -1171,7 +1150,6 @@ def test_difference_type(self, indices, sort): expected = indices.drop(indices) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection_difference(self, indices, sort): # GH 20040 # Test that the intersection of an index with an diff --git a/pandas/tests/indexes/timedeltas/test_setops.py b/pandas/tests/indexes/timedeltas/test_setops.py index 0aa784cbb7710..4808950f17b52 100644 --- a/pandas/tests/indexes/timedeltas/test_setops.py +++ b/pandas/tests/indexes/timedeltas/test_setops.py @@ -107,7 +107,6 @@ def test_intersection_bug_1708(self): expected = timedelta_range("1 day 01:00:00", periods=3, freq="h") tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection_equal(self, sort): # GH 24471 Test intersection outcome given the sort keyword # for equal indicies intersection should return the original index @@ -123,7 +122,6 @@ def test_intersection_equal(self, sort): assert inter is first @pytest.mark.parametrize("period_1, period_2", [(0, 4), (4, 0)]) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection_zero_length(self, period_1, period_2, sort): # GH 24471 test for non overlap the intersection should be zero length index_1 = timedelta_range("1 day", periods=period_1, freq="h") @@ -132,7 +130,6 @@ def test_intersection_zero_length(self, period_1, period_2, sort): result = index_1.intersection(index_2, sort=sort) tm.assert_index_equal(result, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_zero_length_input_index(self, sort): # GH 24966 test for 0-len intersections are copied index_1 = timedelta_range("1 day", periods=0, freq="h") @@ -162,7 +159,6 @@ def test_zero_length_input_index(self, sort): ), ], ) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection(self, rng, expected, sort): # GH 4690 (with tz) base = timedelta_range("1 day", periods=4, freq="h", name="idx") @@ -195,7 +191,6 @@ def test_intersection(self, rng, expected, sort): ), ], ) - @pytest.mark.parametrize("sort", [None, False]) def test_intersection_non_monotonic(self, rng, expected, sort): # 24471 non-monotonic base = TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx") @@ -213,7 +208,6 @@ def test_intersection_non_monotonic(self, rng, expected, sort): class TestTimedeltaIndexDifference: - @pytest.mark.parametrize("sort", [None, False]) def test_difference_freq(self, sort): # GH14323: Difference of TimedeltaIndex should not preserve frequency @@ -231,7 +225,6 @@ def test_difference_freq(self, sort): tm.assert_index_equal(idx_diff, expected) tm.assert_attr_equal("freq", idx_diff, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_difference_sort(self, sort): index = pd.TimedeltaIndex(
- [x] closes #32183 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/32292
2020-02-27T12:55:54Z
2020-02-28T14:40:32Z
2020-02-28T14:40:31Z
2020-02-28T14:40:32Z
BUG: groupby with sort=False creates buggy MultiIndex
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 2b64b85863def..5709727f0db3d 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -215,6 +215,7 @@ MultiIndex # Common elements are now guaranteed to be ordered by the left side left.intersection(right, sort=False) +- Bug in :meth:`MultiIndex.is_lexsorted` was returning incorrect results when the levels of the multiindex weren't monotonic (:issue:`32259`) - I/O diff --git a/pandas/core/groupby/ops.py b/pandas/core/groupby/ops.py index 7259268ac3f2b..2071c6d31dbab 100644 --- a/pandas/core/groupby/ops.py +++ b/pandas/core/groupby/ops.py @@ -299,9 +299,14 @@ def reconstructed_codes(self) -> List[np.ndarray]: def result_index(self) -> Index: if not self.compressed and len(self.groupings) == 1: return self.groupings[0].result_index.rename(self.names[0]) - - codes = self.reconstructed_codes - levels = [ping.result_index for ping in self.groupings] + codes = [] + levels = [] + for code, ping in zip(self.reconstructed_codes, self.groupings): + levels.append(ping.result_index) + if ping.result_index.is_monotonic: + codes.append(code) + else: + codes.append(algorithms.factorize(ping.grouper, sort=True)[0]) result = MultiIndex( levels=levels, codes=codes, verify_integrity=False, names=self.names ) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 5662d41e19885..3aec2f531a8cb 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2015,6 +2015,30 @@ def test_dup_labels_output_shape(groupby_func, idx): tm.assert_index_equal(result.columns, idx) +def test_sort_false_multiindex_lexsorted(): + # GH 32259 + d = pd.to_datetime( + [ + "2020-11-02", + "2019-01-02", + "2020-01-02", + "2020-02-04", + "2020-11-03", + "2019-11-03", + "2019-11-13", + "2019-11-13", + ] + ) + a = np.arange(len(d)) + b = np.random.rand(len(d)) + df = pd.DataFrame({"d": d, "a": a, "b": b}) + t = df.groupby(["d", "a"], sort=False).mean() + assert not t.index.is_lexsorted() + + t = df.groupby(["d", "a"], sort=True).mean() + assert t.index.is_lexsorted() + + def test_groupby_crash_on_nunique(axis): # Fix following 30253 df = pd.DataFrame({("A", "B"): [1, 2], ("A", "C"): [1, 3], ("D", "B"): [0, 0]})
- [ ] closes #32259 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry EDIT ---- probably better/less invasive to make sure .groupby(, sort=False) returns an object where the index has codes which reflect the ordering
https://api.github.com/repos/pandas-dev/pandas/pulls/32291
2020-02-27T12:28:04Z
2020-02-27T15:51:21Z
null
2020-02-27T15:51:21Z
TST/CLN: Follow-up to #32158
diff --git a/pandas/tests/window/test_window.py b/pandas/tests/window/test_window.py index 41b9d9e84f27e..c7c45f0e5e0de 100644 --- a/pandas/tests/window/test_window.py +++ b/pandas/tests/window/test_window.py @@ -29,11 +29,10 @@ def test_constructor(self, which): c(win_type="boxcar", window=2, min_periods=1, center=False) # not valid - msg = "|".join(["min_periods must be an integer", "center must be a boolean"]) for w in [2.0, "foo", np.array([2])]: - with pytest.raises(ValueError, match=msg): + with pytest.raises(ValueError, match="min_periods must be an integer"): c(win_type="boxcar", window=2, min_periods=w) - with pytest.raises(ValueError, match=msg): + with pytest.raises(ValueError, match="center must be a boolean"): c(win_type="boxcar", window=2, min_periods=1, center=w) for wt in ["foobar", 1]:
xref #32158
https://api.github.com/repos/pandas-dev/pandas/pulls/32290
2020-02-27T12:23:25Z
2020-02-27T16:11:48Z
2020-02-27T16:11:47Z
2020-02-27T16:32:33Z
DOC: Fix errors in pandas.Series.argmin
diff --git a/pandas/core/base.py b/pandas/core/base.py index f55d9f905945d..508582540e169 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -927,16 +927,17 @@ def max(self, axis=None, skipna=True, *args, **kwargs): nv.validate_max(args, kwargs) return nanops.nanmax(self._values, skipna=skipna) + @doc(op="max", oppose="min", value="largest") def argmax(self, axis=None, skipna=True, *args, **kwargs): """ - Return int position of the largest value in the Series. + Return int position of the {value} value in the Series. - If the maximum is achieved in multiple locations, + If the {op}imum is achieved in multiple locations, the first row position is returned. Parameters ---------- - axis : {None} + axis : {{None}} Dummy argument for consistency with Series. skipna : bool, default True Exclude NA/null values when showing the result. @@ -946,12 +947,13 @@ def argmax(self, axis=None, skipna=True, *args, **kwargs): Returns ------- int - Row position of the maximum values. + Row position of the {op}imum value. See Also -------- - numpy.ndarray.argmax : Equivalent method for numpy arrays. - Series.argmin : Similar method, but returning the minimum. + Series.arg{op} : Return position of the {op}imum value. + Series.arg{oppose} : Return position of the {oppose}imum value. + numpy.ndarray.arg{op} : Equivalent method for numpy arrays. Series.idxmax : Return index label of the maximum values. Series.idxmin : Return index label of the minimum values. @@ -959,8 +961,8 @@ def argmax(self, axis=None, skipna=True, *args, **kwargs): -------- Consider dataset containing cereal calories - >>> s = pd.Series({'Corn Flakes': 100.0, 'Almond Delight': 110.0, - ... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}) + >>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0, + ... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}}) >>> s Corn Flakes 100.0 Almond Delight 110.0 @@ -970,8 +972,11 @@ def argmax(self, axis=None, skipna=True, *args, **kwargs): >>> s.argmax() 2 + >>> s.argmin() + 0 - The maximum cereal calories is in the third element, + The maximum cereal calories is the third element and + the minimum cereal calories is the first element, since series is zero-indexed. """ nv.validate_minmax_axis(axis) @@ -1019,25 +1024,8 @@ def min(self, axis=None, skipna=True, *args, **kwargs): nv.validate_min(args, kwargs) return nanops.nanmin(self._values, skipna=skipna) + @doc(argmax, op="min", oppose="max", value="smallest") def argmin(self, axis=None, skipna=True, *args, **kwargs): - """ - Return a ndarray of the minimum argument indexer. - - Parameters - ---------- - axis : {None} - Dummy argument for consistency with Series. - skipna : bool, default True - - Returns - ------- - numpy.ndarray - - See Also - -------- - numpy.ndarray.argmin : Return indices of the minimum values along - the given axis. - """ nv.validate_minmax_axis(axis) nv.validate_argmax_with_skipna(skipna, args, kwargs) return nanops.nanargmin(self._values, skipna=skipna)
- [x] closes https://github.com/pandanistas/pandanistas_sprint_jakarta2020/issues/19 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry output of `python scripts/validate_docstrings.py pandas.Series.argmin`: ``` ################################################################################ ################################## Validation ################################## ################################################################################ ```
https://api.github.com/repos/pandas-dev/pandas/pulls/32286
2020-02-27T07:53:20Z
2020-03-06T12:30:37Z
2020-03-06T12:30:36Z
2020-03-06T12:31:08Z
REF: move misplaced to_time tests
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index ecfecfb414326..a91c837c9d9a2 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -2,7 +2,7 @@ import calendar from collections import deque -from datetime import datetime, time, timedelta +from datetime import datetime, timedelta import locale from dateutil.parser import parse @@ -2112,52 +2112,6 @@ def test_parsers_timestring(self, cache): assert result4 == exp_now assert result5 == exp_now - @td.skip_if_has_locale - def test_parsers_time(self): - # GH11818 - strings = [ - "14:15", - "1415", - "2:15pm", - "0215pm", - "14:15:00", - "141500", - "2:15:00pm", - "021500pm", - time(14, 15), - ] - expected = time(14, 15) - - for time_string in strings: - assert tools.to_time(time_string) == expected - - new_string = "14.15" - msg = r"Cannot convert arg \['14\.15'\] to a time" - with pytest.raises(ValueError, match=msg): - tools.to_time(new_string) - assert tools.to_time(new_string, format="%H.%M") == expected - - arg = ["14:15", "20:20"] - expected_arr = [time(14, 15), time(20, 20)] - assert tools.to_time(arg) == expected_arr - assert tools.to_time(arg, format="%H:%M") == expected_arr - assert tools.to_time(arg, infer_time_format=True) == expected_arr - assert tools.to_time(arg, format="%I:%M%p", errors="coerce") == [None, None] - - res = tools.to_time(arg, format="%I:%M%p", errors="ignore") - tm.assert_numpy_array_equal(res, np.array(arg, dtype=np.object_)) - - with pytest.raises(ValueError): - tools.to_time(arg, format="%I:%M%p", errors="raise") - - tm.assert_series_equal( - tools.to_time(Series(arg, name="test")), Series(expected_arr, name="test") - ) - - res = tools.to_time(np.array(arg)) - assert isinstance(res, list) - assert res == expected_arr - @pytest.mark.parametrize("cache", [True, False]) @pytest.mark.parametrize( "dt_string, tz, dt_string_repr", diff --git a/pandas/tests/tools/test_to_time.py b/pandas/tests/tools/test_to_time.py new file mode 100644 index 0000000000000..17ab492aca725 --- /dev/null +++ b/pandas/tests/tools/test_to_time.py @@ -0,0 +1,58 @@ +from datetime import time + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import Series +import pandas._testing as tm +from pandas.core.tools.datetimes import to_time + + +class TestToTime: + @td.skip_if_has_locale + def test_parsers_time(self): + # GH#11818 + strings = [ + "14:15", + "1415", + "2:15pm", + "0215pm", + "14:15:00", + "141500", + "2:15:00pm", + "021500pm", + time(14, 15), + ] + expected = time(14, 15) + + for time_string in strings: + assert to_time(time_string) == expected + + new_string = "14.15" + msg = r"Cannot convert arg \['14\.15'\] to a time" + with pytest.raises(ValueError, match=msg): + to_time(new_string) + assert to_time(new_string, format="%H.%M") == expected + + arg = ["14:15", "20:20"] + expected_arr = [time(14, 15), time(20, 20)] + assert to_time(arg) == expected_arr + assert to_time(arg, format="%H:%M") == expected_arr + assert to_time(arg, infer_time_format=True) == expected_arr + assert to_time(arg, format="%I:%M%p", errors="coerce") == [None, None] + + res = to_time(arg, format="%I:%M%p", errors="ignore") + tm.assert_numpy_array_equal(res, np.array(arg, dtype=np.object_)) + + with pytest.raises(ValueError): + to_time(arg, format="%I:%M%p", errors="raise") + + tm.assert_series_equal( + to_time(Series(arg, name="test")), Series(expected_arr, name="test") + ) + + res = to_time(np.array(arg)) + assert isinstance(res, list) + assert res == expected_arr
https://api.github.com/repos/pandas-dev/pandas/pulls/32285
2020-02-27T02:45:33Z
2020-02-27T17:01:22Z
2020-02-27T17:01:22Z
2020-02-27T17:20:02Z
CI: nested DataFrames in npdev
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 14162bc433317..72fc0010cbbce 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -9,6 +9,7 @@ import pytest from pandas.compat import is_platform_little_endian +from pandas.compat.numpy import _is_numpy_dev from pandas.core.dtypes.common import is_integer_dtype @@ -144,6 +145,7 @@ def test_constructor_dtype_list_data(self): assert df.loc[1, 0] is None assert df.loc[0, 1] == "2" + @pytest.mark.xfail(_is_numpy_dev, reason="Interprets list of frame as 3D") def test_constructor_list_frames(self): # see gh-3243 result = DataFrame([DataFrame()]) @@ -503,6 +505,7 @@ def test_constructor_error_msgs(self): with pytest.raises(ValueError, match=msg): DataFrame({"a": False, "b": True}) + @pytest.mark.xfail(_is_numpy_dev, reason="Interprets embedded frame as 3D") def test_constructor_with_embedded_frames(self): # embedded data frames
xref #32289
https://api.github.com/repos/pandas-dev/pandas/pulls/32284
2020-02-27T02:10:08Z
2020-02-27T11:47:25Z
2020-02-27T11:47:24Z
2020-02-29T13:39:29Z
REF: make DatetimeIndex._simple_new actually simple
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 48a3886c20a3a..da59c635b5a18 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -114,7 +114,18 @@ def apply_index_wraps(func): # Note: normally we would use `@functools.wraps(func)`, but this does # not play nicely with cython class methods def wrapper(self, other): - result = func(self, other) + + is_index = getattr(other, "_typ", "") == "datetimeindex" + + # operate on DatetimeArray + arr = other._data if is_index else other + + result = func(self, arr) + + if is_index: + # Wrap DatetimeArray result back to DatetimeIndex + result = type(other)._simple_new(result, name=other.name) + if self.normalize: result = result.to_period('D').to_timestamp() return result diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index cef8a39d75a4c..23e68802eb126 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3281,13 +3281,11 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None): target = _ensure_has_len(target) # target may be an iterator if not isinstance(target, Index) and len(target) == 0: - attrs = self._get_attributes_dict() - attrs.pop("freq", None) # don't preserve freq if isinstance(self, ABCRangeIndex): values = range(0) else: values = self._data[:0] # appropriately-dtyped empty array - target = self._simple_new(values, **attrs) + target = self._simple_new(values, name=self.name) else: target = ensure_index(target) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index bf35c85ac8ed5..054a64bf3f990 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -622,21 +622,11 @@ def _shallow_copy(self, values=None, name: Label = lib.no_default): if values is None: values = self._data - if isinstance(values, type(self)): - values = values._data if isinstance(values, np.ndarray): # TODO: We would rather not get here values = type(self._data)(values, dtype=self.dtype) - attributes = self._get_attributes_dict() - - if self.freq is not None: - if isinstance(values, (DatetimeArray, TimedeltaArray)): - if values.freq is None: - del attributes["freq"] - - attributes["name"] = name - result = self._simple_new(values, **attributes) + result = type(self)._simple_new(values, name=name) result._cache = cache return result @@ -780,7 +770,10 @@ def _fast_union(self, other, sort=None): loc = right.searchsorted(left_start, side="left") right_chunk = right.values[:loc] dates = concat_compat((left.values, right_chunk)) - return self._shallow_copy(dates) + result = self._shallow_copy(dates) + result._set_freq("infer") + # TODO: can we infer that it has self.freq? + return result else: left, right = other, self @@ -792,7 +785,10 @@ def _fast_union(self, other, sort=None): loc = right.searchsorted(left_end, side="right") right_chunk = right.values[loc:] dates = concat_compat((left.values, right_chunk)) - return self._shallow_copy(dates) + result = self._shallow_copy(dates) + result._set_freq("infer") + # TODO: can we infer that it has self.freq? + return result else: return left diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index c8035a9de432b..e791133220dbf 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -7,17 +7,13 @@ from pandas._libs import NaT, Period, Timestamp, index as libindex, lib, tslib as libts from pandas._libs.tslibs import fields, parsing, timezones +from pandas._typing import Label from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import _NS_DTYPE, is_float, is_integer, is_scalar -from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import is_valid_nat_for_dtype -from pandas.core.arrays.datetimes import ( - DatetimeArray, - tz_to_dtype, - validate_tz_from_dtype, -) +from pandas.core.arrays.datetimes import DatetimeArray, tz_to_dtype import pandas.core.common as com from pandas.core.indexes.base import Index, InvalidIndexError, maybe_extract_name from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin @@ -36,7 +32,20 @@ def _new_DatetimeIndex(cls, d): if "data" in d and not isinstance(d["data"], DatetimeIndex): # Avoid need to verify integrity by calling simple_new directly data = d.pop("data") - result = cls._simple_new(data, **d) + if not isinstance(data, DatetimeArray): + # For backward compat with older pickles, we may need to construct + # a DatetimeArray to adapt to the newer _simple_new signature + tz = d.pop("tz") + freq = d.pop("freq") + dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq) + else: + dta = data + for key in ["tz", "freq"]: + # These are already stored in our DatetimeArray; if they are + # also in the pickle and don't match, we have a problem. + if key in d: + assert d.pop(key) == getattr(dta, key) + result = cls._simple_new(dta, **d) else: with warnings.catch_warnings(): # TODO: If we knew what was going in to **d, we might be able to @@ -244,34 +253,16 @@ def __new__( return subarr @classmethod - def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None): - """ - We require the we have a dtype compat for the values - if we are passed a non-dtype compat, then coerce using the constructor - """ - if isinstance(values, DatetimeArray): - if tz: - tz = validate_tz_from_dtype(dtype, tz) - dtype = DatetimeTZDtype(tz=tz) - elif dtype is None: - dtype = _NS_DTYPE - - values = DatetimeArray(values, freq=freq, dtype=dtype) - tz = values.tz - freq = values.freq - values = values._data - - dtype = tz_to_dtype(tz) - dtarr = DatetimeArray._simple_new(values, freq=freq, dtype=dtype) - assert isinstance(dtarr, DatetimeArray) + def _simple_new(cls, values: DatetimeArray, name: Label = None): + assert isinstance(values, DatetimeArray), type(values) result = object.__new__(cls) - result._data = dtarr + result._data = values result.name = name result._cache = {} result._no_setting_name = False # For groupby perf. See note in indexes/base about _index_data - result._index_data = dtarr._data + result._index_data = values._data result._reset_identity() return result diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index a6d9d4dfc330b..7a7670b0e7965 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -1,6 +1,7 @@ """ implement the TimedeltaIndex """ from pandas._libs import NaT, Timedelta, index as libindex +from pandas._typing import Label from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( @@ -154,7 +155,7 @@ def __new__( if isinstance(data, TimedeltaArray) and freq is None: if copy: data = data.copy() - return cls._simple_new(data, name=name, freq=freq) + return cls._simple_new(data, name=name) if isinstance(data, TimedeltaIndex) and freq is None and name is None: if copy: @@ -170,12 +171,8 @@ def __new__( return cls._simple_new(tdarr, name=name) @classmethod - def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE): - # `dtype` is passed by _shallow_copy in corner cases, should always - # be timedelta64[ns] if present - assert dtype == _TD_DTYPE, dtype + def _simple_new(cls, values: TimedeltaArray, name: Label = None): assert isinstance(values, TimedeltaArray) - assert freq is None or values.freq == freq result = object.__new__(cls) result._data = values diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index b8a70752330c5..e505917da1dc4 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -65,8 +65,8 @@ def test_compare_len1_raises(self): # to the case where one has length-1, which numpy would broadcast data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9 - idx = self.array_cls._simple_new(data, freq="D") - arr = self.index_cls(idx) + arr = self.array_cls._simple_new(data, freq="D") + idx = self.index_cls(arr) with pytest.raises(ValueError, match="Lengths must match"): arr == arr[:1] diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index 8ed98410ad9a4..a533d06a924e6 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -363,7 +363,7 @@ def test_equals(self): assert not idx.equals(pd.Series(idx2)) # same internal, different tz - idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz="US/Pacific") + idx3 = pd.DatetimeIndex(idx.asi8, tz="US/Pacific") tm.assert_numpy_array_equal(idx.asi8, idx3.asi8) assert not idx.equals(idx3) assert not idx.equals(idx3.copy()) diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index b6bbe008812cb..bc20d784c8dee 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -337,9 +337,6 @@ def apply_index(self, i): # integer addition on PeriodIndex is deprecated, # so we directly use _time_shift instead asper = i.to_period("W") - if not isinstance(asper._data, np.ndarray): - # unwrap PeriodIndex --> PeriodArray - asper = asper._data shifted = asper._time_shift(weeks) i = shifted.to_timestamp() + i.to_perioddelta("W") @@ -629,9 +626,6 @@ def apply_index(self, i): # to_period rolls forward to next BDay; track and # reduce n where it does when rolling forward asper = i.to_period("B") - if not isinstance(asper._data, np.ndarray): - # unwrap PeriodIndex --> PeriodArray - asper = asper._data if self.n > 0: shifted = (i.to_perioddelta("B") - time).asi8 != 0 @@ -1384,9 +1378,6 @@ def apply_index(self, i): # integer-array addition on PeriodIndex is deprecated, # so we use _addsub_int_array directly asper = i.to_period("M") - if not isinstance(asper._data, np.ndarray): - # unwrap PeriodIndex --> PeriodArray - asper = asper._data shifted = asper._addsub_int_array(roll // 2, operator.add) i = type(dti)(shifted.to_timestamp()) @@ -1582,9 +1573,6 @@ def apply_index(self, i): # integer addition on PeriodIndex is deprecated, # so we use _time_shift directly asper = i.to_period("W") - if not isinstance(asper._data, np.ndarray): - # unwrap PeriodIndex --> PeriodArray - asper = asper._data shifted = asper._time_shift(self.n) return shifted.to_timestamp() + i.to_perioddelta("W") @@ -1608,9 +1596,6 @@ def _end_apply_index(self, dtindex): base, mult = libfrequencies.get_freq_code(self.freqstr) base_period = dtindex.to_period(base) - if not isinstance(base_period._data, np.ndarray): - # unwrap PeriodIndex --> PeriodArray - base_period = base_period._data if self.n > 0: # when adding, dates on end roll to next
https://api.github.com/repos/pandas-dev/pandas/pulls/32282
2020-02-27T00:37:35Z
2020-03-14T02:19:51Z
2020-03-14T02:19:51Z
2020-03-14T02:29:29Z
TST: Split and simplify test_value_counts_unique_nunique
diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py index 39dca1a9742df..8f48d0a3e8378 100644 --- a/pandas/tests/base/test_ops.py +++ b/pandas/tests/base/test_ops.py @@ -1,3 +1,4 @@ +import collections from datetime import datetime, timedelta from io import StringIO import sys @@ -15,7 +16,6 @@ is_datetime64_dtype, is_datetime64tz_dtype, is_object_dtype, - is_period_dtype, needs_i8_conversion, ) @@ -26,11 +26,9 @@ Index, Interval, IntervalIndex, - PeriodIndex, Series, Timedelta, TimedeltaIndex, - Timestamp, ) import pandas._testing as tm @@ -207,180 +205,152 @@ def test_ndarray_compat_properties(self, index_or_series_obj): assert Index([1]).item() == 1 assert Series([1]).item() == 1 - def test_value_counts_unique_nunique(self, index_or_series_obj): - orig = index_or_series_obj - obj = orig.copy() - klass = type(obj) - values = obj._values - - if orig.duplicated().any(): - pytest.xfail( - "The test implementation isn't flexible enough to deal " - "with duplicated values. This isn't a bug in the " - "application code, but in the test code." - ) + def test_unique(self, index_or_series_obj): + obj = index_or_series_obj + obj = np.repeat(obj, range(1, len(obj) + 1)) + result = obj.unique() - # create repeated values, 'n'th element is repeated by n+1 times - if isinstance(obj, Index): - expected_index = Index(obj[::-1]) - expected_index.name = None - obj = obj.repeat(range(1, len(obj) + 1)) + # dict.fromkeys preserves the order + unique_values = list(dict.fromkeys(obj.values)) + if isinstance(obj, pd.MultiIndex): + expected = pd.MultiIndex.from_tuples(unique_values) + expected.names = obj.names + tm.assert_index_equal(result, expected) + elif isinstance(obj, pd.Index): + expected = pd.Index(unique_values, dtype=obj.dtype) + if is_datetime64tz_dtype(obj): + expected = expected.normalize() + tm.assert_index_equal(result, expected) else: - expected_index = Index(values[::-1]) - idx = obj.index.repeat(range(1, len(obj) + 1)) - # take-based repeat - indices = np.repeat(np.arange(len(obj)), range(1, len(obj) + 1)) - rep = values.take(indices) - obj = klass(rep, index=idx) - - # check values has the same dtype as the original - assert obj.dtype == orig.dtype - - expected_s = Series( - range(len(orig), 0, -1), index=expected_index, dtype="int64" - ) + expected = np.array(unique_values) + tm.assert_numpy_array_equal(result, expected) - result = obj.value_counts() - tm.assert_series_equal(result, expected_s) - assert result.index.name is None + @pytest.mark.parametrize("null_obj", [np.nan, None]) + def test_unique_null(self, null_obj, index_or_series_obj): + obj = index_or_series_obj + + if not allow_na_ops(obj): + pytest.skip("type doesn't allow for NA operations") + elif len(obj) < 1: + pytest.skip("Test doesn't make sense on empty data") + elif isinstance(obj, pd.MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj.values + if needs_i8_conversion(obj): + values[0:2] = iNaT + else: + values[0:2] = null_obj + klass = type(obj) + repeated_values = np.repeat(values, range(1, len(values) + 1)) + obj = klass(repeated_values, dtype=obj.dtype) result = obj.unique() - if isinstance(obj, Index): - assert isinstance(result, type(obj)) - tm.assert_index_equal(result, orig) - assert result.dtype == orig.dtype - elif is_datetime64tz_dtype(obj): - # datetimetz Series returns array of Timestamp - assert result[0] == orig[0] - for r in result: - assert isinstance(r, Timestamp) - - tm.assert_numpy_array_equal( - result.astype(object), orig._values.astype(object) - ) + + unique_values_raw = dict.fromkeys(obj.values) + # because np.nan == np.nan is False, but None == None is True + # np.nan would be duplicated, whereas None wouldn't + unique_values_not_null = [ + val for val in unique_values_raw if not pd.isnull(val) + ] + unique_values = [null_obj] + unique_values_not_null + + if isinstance(obj, pd.Index): + expected = pd.Index(unique_values, dtype=obj.dtype) + if is_datetime64tz_dtype(obj): + result = result.normalize() + expected = expected.normalize() + elif isinstance(obj, pd.CategoricalIndex): + expected = expected.set_categories(unique_values_not_null) + tm.assert_index_equal(result, expected) else: - tm.assert_numpy_array_equal(result, orig.values) - assert result.dtype == orig.dtype + expected = np.array(unique_values, dtype=obj.dtype) + tm.assert_numpy_array_equal(result, expected) - # dropna=True would break for MultiIndex - assert obj.nunique(dropna=False) == len(np.unique(obj.values)) + def test_nunique(self, index_or_series_obj): + obj = index_or_series_obj + obj = np.repeat(obj, range(1, len(obj) + 1)) + expected = len(obj.unique()) + assert obj.nunique(dropna=False) == expected @pytest.mark.parametrize("null_obj", [np.nan, None]) - def test_value_counts_unique_nunique_null(self, null_obj, index_or_series_obj): - orig = index_or_series_obj - obj = orig.copy() - klass = type(obj) - values = obj._ndarray_values - num_values = len(orig) + def test_nunique_null(self, null_obj, index_or_series_obj): + obj = index_or_series_obj if not allow_na_ops(obj): pytest.skip("type doesn't allow for NA operations") - elif isinstance(orig, (pd.CategoricalIndex, pd.IntervalIndex)): - pytest.skip(f"values of {klass} cannot be changed") - elif isinstance(orig, pd.MultiIndex): - pytest.skip("MultiIndex doesn't support isna") - elif orig.duplicated().any(): - pytest.xfail( - "The test implementation isn't flexible enough to deal " - "with duplicated values. This isn't a bug in the " - "application code, but in the test code." - ) - - # special assign to the numpy array - if is_datetime64tz_dtype(obj): - if isinstance(obj, DatetimeIndex): - v = obj.asi8 - v[0:2] = iNaT - values = obj._shallow_copy(v) - else: - obj = obj.copy() - obj[0:2] = pd.NaT - values = obj._values + elif isinstance(obj, pd.MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") - elif is_period_dtype(obj): - values[0:2] = iNaT - parr = type(obj._data)(values, dtype=obj.dtype) - values = obj._shallow_copy(parr) - elif needs_i8_conversion(obj): + values = obj.values + if needs_i8_conversion(obj): values[0:2] = iNaT - values = obj._shallow_copy(values) else: values[0:2] = null_obj - # check values has the same dtype as the original - assert values.dtype == obj.dtype - - # create repeated values, 'n'th element is repeated by n+1 - # times - if isinstance(obj, (DatetimeIndex, PeriodIndex)): - expected_index = obj.copy() - expected_index.name = None + klass = type(obj) + repeated_values = np.repeat(values, range(1, len(values) + 1)) + obj = klass(repeated_values, dtype=obj.dtype) - # attach name to klass - obj = klass(values.repeat(range(1, len(obj) + 1))) - obj.name = "a" - else: - if isinstance(obj, DatetimeIndex): - expected_index = orig._values._shallow_copy(values) - else: - expected_index = Index(values) - expected_index.name = None - obj = obj.repeat(range(1, len(obj) + 1)) - obj.name = "a" - - # check values has the same dtype as the original - assert obj.dtype == orig.dtype - - # check values correctly have NaN - nanloc = np.zeros(len(obj), dtype=np.bool) - nanloc[:3] = True - if isinstance(obj, Index): - tm.assert_numpy_array_equal(pd.isna(obj), nanloc) + if isinstance(obj, pd.CategoricalIndex): + assert obj.nunique() == len(obj.categories) + assert obj.nunique(dropna=False) == len(obj.categories) + 1 else: - exp = Series(nanloc, obj.index, name="a") - tm.assert_series_equal(pd.isna(obj), exp) - - expected_data = list(range(num_values, 2, -1)) - expected_data_na = expected_data.copy() - if expected_data_na: - expected_data_na.append(3) - expected_s_na = Series( - expected_data_na, - index=expected_index[num_values - 1 : 0 : -1], - dtype="int64", - name="a", - ) - expected_s = Series( - expected_data, - index=expected_index[num_values - 1 : 1 : -1], - dtype="int64", - name="a", - ) + num_unique_values = len(obj.unique()) + assert obj.nunique() == max(0, num_unique_values - 1) + assert obj.nunique(dropna=False) == max(0, num_unique_values) - result_s_na = obj.value_counts(dropna=False) - tm.assert_series_equal(result_s_na, expected_s_na) - assert result_s_na.index.name is None - assert result_s_na.name == "a" - result_s = obj.value_counts() - tm.assert_series_equal(obj.value_counts(), expected_s) - assert result_s.index.name is None - assert result_s.name == "a" + def test_value_counts(self, index_or_series_obj): + obj = index_or_series_obj + obj = np.repeat(obj, range(1, len(obj) + 1)) + result = obj.value_counts() - result = obj.unique() - if isinstance(obj, Index): - tm.assert_index_equal(result, Index(values[1:], name="a")) - elif is_datetime64tz_dtype(obj): - # unable to compare NaT / nan - tm.assert_extension_array_equal(result[1:], values[2:]) - assert result[0] is pd.NaT - elif len(obj) > 0: - tm.assert_numpy_array_equal(result[1:], values[2:]) - - assert pd.isna(result[0]) - assert result.dtype == orig.dtype - - assert obj.nunique() == max(0, num_values - 2) - assert obj.nunique(dropna=False) == max(0, num_values - 1) + counter = collections.Counter(obj) + expected = pd.Series(dict(counter.most_common()), dtype=np.int64, name=obj.name) + expected.index = expected.index.astype(obj.dtype) + if isinstance(obj, pd.MultiIndex): + expected.index = pd.Index(expected.index) + + # sort_index to avoid switched order when values share the same count + result = result.sort_index() + expected = expected.sort_index() + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("null_obj", [np.nan, None]) + def test_value_counts_null(self, null_obj, index_or_series_obj): + orig = index_or_series_obj + obj = orig.copy() + + if not allow_na_ops(obj): + pytest.skip("type doesn't allow for NA operations") + elif len(obj) < 1: + pytest.skip("Test doesn't make sense on empty data") + elif isinstance(orig, pd.MultiIndex): + pytest.skip(f"MultiIndex can't hold '{null_obj}'") + + values = obj.values + if needs_i8_conversion(obj): + values[0:2] = iNaT + else: + values[0:2] = null_obj + + klass = type(obj) + repeated_values = np.repeat(values, range(1, len(values) + 1)) + obj = klass(repeated_values, dtype=obj.dtype) + + # because np.nan == np.nan is False, but None == None is True + # np.nan would be duplicated, whereas None wouldn't + counter = collections.Counter(obj.dropna()) + expected = pd.Series(dict(counter.most_common()), dtype=np.int64) + expected.index = expected.index.astype(obj.dtype) + + tm.assert_series_equal(obj.value_counts(), expected) + + # can't use expected[null_obj] = 3 as + # IntervalIndex doesn't allow assignment + new_entry = pd.Series({np.nan: 3}, dtype=np.int64) + expected = expected.append(new_entry) + tm.assert_series_equal(obj.value_counts(dropna=False), expected) def test_value_counts_inferred(self, index_or_series): klass = index_or_series
closes #32205, closes #32220 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/32281
2020-02-26T22:19:03Z
2020-03-04T13:52:31Z
2020-03-04T13:52:30Z
2020-03-04T17:16:02Z
REF: simplify PeriodIndex._shallow_copy
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index b5e323fbd0fa4..a1681322a4a5f 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -4234,6 +4234,9 @@ def putmask(self, mask, value): values = self.values.copy() try: np.putmask(values, mask, self._convert_for_op(value)) + if is_period_dtype(self.dtype): + # .values cast to object, so we need to cast back + values = type(self)(values)._data return self._shallow_copy(values) except (ValueError, TypeError) as err: if is_object_dtype(self): diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 72a2aba2d8a88..f9ce8eb6d720d 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -520,7 +520,8 @@ def where(self, cond, other=None): other = other.view("i8") result = np.where(cond, values, other).astype("i8") - return self._shallow_copy(result) + arr = type(self._data)._simple_new(result, dtype=self.dtype) + return type(self)._simple_new(arr, name=self.name) def _summary(self, name=None) -> str: """ diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 35a5d99abf4e6..017f104e18493 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -250,22 +250,11 @@ def _has_complex_internals(self): return True def _shallow_copy(self, values=None, name: Label = no_default): - # TODO: simplify, figure out type of values name = name if name is not no_default else self.name if values is None: values = self._data - if isinstance(values, type(self)): - values = values._data - - if not isinstance(values, PeriodArray): - if isinstance(values, np.ndarray) and values.dtype == "i8": - values = PeriodArray(values, freq=self.freq) - else: - # GH#30713 this should never be reached - raise TypeError(type(values), getattr(values, "dtype", None)) - return self._simple_new(values, name=name) def _maybe_convert_timedelta(self, other): @@ -618,10 +607,11 @@ def insert(self, loc, item): if not isinstance(item, Period) or self.freq != item.freq: return self.astype(object).insert(loc, item) - idx = np.concatenate( + i8result = np.concatenate( (self[:loc].asi8, np.array([item.ordinal]), self[loc:].asi8) ) - return self._shallow_copy(idx) + arr = type(self._data)._simple_new(i8result, dtype=self.dtype) + return type(self)._simple_new(arr, name=self.name) def join(self, other, how="left", level=None, return_indexers=False, sort=False): """ diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py index f85d823cb2fac..298955654a89f 100644 --- a/pandas/tests/base/test_ops.py +++ b/pandas/tests/base/test_ops.py @@ -14,6 +14,7 @@ is_datetime64_dtype, is_datetime64tz_dtype, is_object_dtype, + is_period_dtype, needs_i8_conversion, ) @@ -295,6 +296,10 @@ def test_value_counts_unique_nunique_null(self, null_obj, index_or_series_obj): obj[0:2] = pd.NaT values = obj._values + elif is_period_dtype(obj): + values[0:2] = iNaT + parr = type(obj._data)(values, dtype=obj.dtype) + values = obj._shallow_copy(parr) elif needs_i8_conversion(obj): values[0:2] = iNaT values = obj._shallow_copy(values) diff --git a/pandas/tests/indexes/datetimes/test_indexing.py b/pandas/tests/indexes/datetimes/test_indexing.py index ceab670fb5041..554ae76979ba8 100644 --- a/pandas/tests/indexes/datetimes/test_indexing.py +++ b/pandas/tests/indexes/datetimes/test_indexing.py @@ -121,6 +121,14 @@ def test_dti_custom_getitem_matplotlib_hackaround(self): class TestWhere: + def test_where_doesnt_retain_freq(self): + dti = date_range("20130101", periods=3, freq="D", name="idx") + cond = [True, True, False] + expected = DatetimeIndex([dti[0], dti[1], dti[0]], freq=None, name="idx") + + result = dti.where(cond, dti[::-1]) + tm.assert_index_equal(result, expected) + def test_where_other(self): # other is ndarray or Index i = pd.date_range("20130101", periods=3, tz="US/Eastern") diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 40c7ffba46450..ab3e967f12360 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -117,7 +117,6 @@ def test_make_time_series(self): assert isinstance(series, Series) def test_shallow_copy_empty(self): - # GH13067 idx = PeriodIndex([], freq="M") result = idx._shallow_copy() @@ -125,11 +124,16 @@ def test_shallow_copy_empty(self): tm.assert_index_equal(result, expected) - def test_shallow_copy_i8(self): + def test_shallow_copy_disallow_i8(self): # GH-24391 pi = period_range("2018-01-01", periods=3, freq="2D") - result = pi._shallow_copy(pi.asi8) - tm.assert_index_equal(result, pi) + with pytest.raises(AssertionError, match="ndarray"): + pi._shallow_copy(pi.asi8) + + def test_shallow_copy_requires_disallow_period_index(self): + pi = period_range("2018-01-01", periods=3, freq="2D") + with pytest.raises(AssertionError, match="PeriodIndex"): + pi._shallow_copy(pi) def test_view_asi8(self): idx = PeriodIndex([], freq="M") diff --git a/pandas/tests/indexes/test_common.py b/pandas/tests/indexes/test_common.py index b46e6514b4536..c6ba5c9d61e9e 100644 --- a/pandas/tests/indexes/test_common.py +++ b/pandas/tests/indexes/test_common.py @@ -10,7 +10,7 @@ from pandas._libs.tslibs import iNaT -from pandas.core.dtypes.common import needs_i8_conversion +from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion import pandas as pd from pandas import CategoricalIndex, MultiIndex, RangeIndex @@ -219,7 +219,10 @@ def test_get_unique_index(self, indices): if not indices._can_hold_na: pytest.skip("Skip na-check if index cannot hold na") - if needs_i8_conversion(indices): + if is_period_dtype(indices): + vals = indices[[0] * 5]._data + vals[0] = pd.NaT + elif needs_i8_conversion(indices): vals = indices.asi8[[0] * 5] vals[0] = iNaT else: diff --git a/pandas/tests/indexes/timedeltas/test_indexing.py b/pandas/tests/indexes/timedeltas/test_indexing.py index 14fff6f9c85b5..5dec799832291 100644 --- a/pandas/tests/indexes/timedeltas/test_indexing.py +++ b/pandas/tests/indexes/timedeltas/test_indexing.py @@ -66,6 +66,14 @@ def test_timestamp_invalid_key(self, key): class TestWhere: + def test_where_doesnt_retain_freq(self): + tdi = timedelta_range("1 day", periods=3, freq="D", name="idx") + cond = [True, True, False] + expected = TimedeltaIndex([tdi[0], tdi[1], tdi[0]], freq=None, name="idx") + + result = tdi.where(cond, tdi[::-1]) + tm.assert_index_equal(result, expected) + def test_where_invalid_dtypes(self): tdi = timedelta_range("1 day", periods=3, freq="D", name="idx")
Adds a test for incorrect behavior in DTI.where and TDI.where that is fixed in master.
https://api.github.com/repos/pandas-dev/pandas/pulls/32280
2020-02-26T22:18:02Z
2020-03-03T01:58:27Z
2020-03-03T01:58:27Z
2020-03-03T02:08:07Z
CI: Temporary fix to the docs build while we fix the ssh problems
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7493be34d10c7..a337ccbc98650 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -188,5 +188,5 @@ jobs: run: | cd pandas_web git remote add origin git@github.com:pandas-dev/pandas-dev.github.io.git - git push -f origin master + git push -f origin master || true if: github.event_name == 'push'
Looks like the ssh key we have in the settings is not working as expected in #32074. While we fix it (I don't have access to the settings, so can't check much), this prevents master from failing for the publishing of the dev docs to GitHub pages.
https://api.github.com/repos/pandas-dev/pandas/pulls/32279
2020-02-26T22:08:46Z
2020-02-27T16:56:47Z
2020-02-27T16:56:47Z
2020-02-27T18:21:38Z
CLN: Use defaultdict for minor optimization
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 3fc10444ee064..b6b6a4fe74ed5 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -8446,9 +8446,8 @@ def isin(self, values) -> "DataFrame": def _from_nested_dict(data): # TODO: this should be seriously cythonized - new_data = {} + new_data = collections.defaultdict(dict) for index, s in data.items(): for col, v in s.items(): - new_data[col] = new_data.get(col, {}) new_data[col][index] = v return new_data
Edit `_from_nested_dict()` by using defaultdict for optimization in performance - [ ] closes #32209 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32278
2020-02-26T21:56:55Z
2020-02-27T12:39:49Z
2020-02-27T12:39:49Z
2020-03-02T15:34:15Z
TST: misplaced arithmetic tests
diff --git a/pandas/tests/frame/test_arithmetic.py b/pandas/tests/frame/test_arithmetic.py index 44ad55517dcea..e4be8a979a70f 100644 --- a/pandas/tests/frame/test_arithmetic.py +++ b/pandas/tests/frame/test_arithmetic.py @@ -4,6 +4,7 @@ import numpy as np import pytest +import pytz import pandas as pd import pandas._testing as tm @@ -771,3 +772,35 @@ def test_frame_single_columns_object_sum_axis_1(): result = df.sum(axis=1) expected = pd.Series(["A", 1.2, 0]) tm.assert_series_equal(result, expected) + + +# ------------------------------------------------------------------- +# Unsorted +# These arithmetic tests were previously in other files, eventually +# should be parametrized and put into tests.arithmetic + + +class TestFrameArithmeticUnsorted: + def test_frame_add_tz_mismatch_converts_to_utc(self): + rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern") + df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"]) + + df_moscow = df.tz_convert("Europe/Moscow") + result = df + df_moscow + assert result.index.tz is pytz.utc + + result = df_moscow + df + assert result.index.tz is pytz.utc + + def test_align_frame(self): + rng = pd.period_range("1/1/2000", "1/1/2010", freq="A") + ts = pd.DataFrame(np.random.randn(len(rng), 3), index=rng) + + result = ts + ts[::2] + expected = ts + ts + expected.values[1::2] = np.nan + tm.assert_frame_equal(result, expected) + + half = ts[::2] + result = ts + half.take(np.random.permutation(len(half))) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_period.py b/pandas/tests/frame/test_period.py index 1ce13fd31ba88..c378194b9e2b2 100644 --- a/pandas/tests/frame/test_period.py +++ b/pandas/tests/frame/test_period.py @@ -4,10 +4,6 @@ import pandas._testing as tm -def _permute(obj): - return obj.take(np.random.permutation(len(obj))) - - class TestPeriodIndex: def test_as_frame_columns(self): rng = period_range("1/1/2000", periods=5) @@ -42,15 +38,3 @@ def test_frame_index_to_string(self): # it works! frame.to_string() - - def test_align_frame(self): - rng = period_range("1/1/2000", "1/1/2010", freq="A") - ts = DataFrame(np.random.randn(len(rng), 3), index=rng) - - result = ts + ts[::2] - expected = ts + ts - expected.values[1::2] = np.nan - tm.assert_frame_equal(result, expected) - - result = ts + _permute(ts[::2]) - tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_timezones.py b/pandas/tests/frame/test_timezones.py index 62e8a4b470218..00a253d4e5ad0 100644 --- a/pandas/tests/frame/test_timezones.py +++ b/pandas/tests/frame/test_timezones.py @@ -80,17 +80,6 @@ def test_frame_join_tzaware(self): tm.assert_index_equal(result.index, ex_index) assert result.index.tz.zone == "US/Central" - def test_frame_add_tz_mismatch_converts_to_utc(self): - rng = date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern") - df = DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"]) - - df_moscow = df.tz_convert("Europe/Moscow") - result = df + df_moscow - assert result.index.tz is pytz.utc - - result = df_moscow + df - assert result.index.tz is pytz.utc - def test_frame_align_aware(self): idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern") idx2 = date_range("2001", periods=5, freq="2H", tz="US/Eastern") diff --git a/pandas/tests/series/test_arithmetic.py b/pandas/tests/series/test_arithmetic.py index f3ffdc373e178..10197766ce4a6 100644 --- a/pandas/tests/series/test_arithmetic.py +++ b/pandas/tests/series/test_arithmetic.py @@ -2,11 +2,12 @@ import numpy as np import pytest +import pytz from pandas._libs.tslibs import IncompatibleFrequency import pandas as pd -from pandas import Series +from pandas import Series, date_range import pandas._testing as tm @@ -203,3 +204,67 @@ def test_ser_cmp_result_names(self, names, op): ser = Series(cidx).rename(names[1]) result = op(ser, cidx) assert result.name == names[2] + + +# ------------------------------------------------------------------ +# Unsorted +# These arithmetic tests were previously in other files, eventually +# should be parametrized and put into tests.arithmetic + + +class TestTimeSeriesArithmetic: + # TODO: De-duplicate with test below + def test_series_add_tz_mismatch_converts_to_utc_duplicate(self): + rng = date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern") + ser = Series(np.random.randn(len(rng)), index=rng) + + ts_moscow = ser.tz_convert("Europe/Moscow") + + result = ser + ts_moscow + assert result.index.tz is pytz.utc + + result = ts_moscow + ser + assert result.index.tz is pytz.utc + + def test_series_add_tz_mismatch_converts_to_utc(self): + rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") + + perm = np.random.permutation(100)[:90] + ser1 = Series( + np.random.randn(90), index=rng.take(perm).tz_convert("US/Eastern") + ) + + perm = np.random.permutation(100)[:90] + ser2 = Series( + np.random.randn(90), index=rng.take(perm).tz_convert("Europe/Berlin") + ) + + result = ser1 + ser2 + + uts1 = ser1.tz_convert("utc") + uts2 = ser2.tz_convert("utc") + expected = uts1 + uts2 + + assert result.index.tz == pytz.UTC + tm.assert_series_equal(result, expected) + + def test_series_add_aware_naive_raises(self): + rng = date_range("1/1/2011", periods=10, freq="H") + ser = Series(np.random.randn(len(rng)), index=rng) + + ser_utc = ser.tz_localize("utc") + + with pytest.raises(Exception): + ser + ser_utc + + with pytest.raises(Exception): + ser_utc + ser + + def test_datetime_understood(self): + # Ensures it doesn't fail to create the right series + # reported in issue#16726 + series = pd.Series(pd.date_range("2012-01-01", periods=3)) + offset = pd.offsets.DateOffset(days=6) + result = series - offset + expected = pd.Series(pd.to_datetime(["2011-12-26", "2011-12-27", "2011-12-28"])) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index 59ae0cd63690c..d22dc72eaaadd 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -632,15 +632,6 @@ def test_date_tz(self): tm.assert_series_equal(s.dt.date, expected) tm.assert_series_equal(s.apply(lambda x: x.date()), expected) - def test_datetime_understood(self): - # Ensures it doesn't fail to create the right series - # reported in issue#16726 - series = pd.Series(pd.date_range("2012-01-01", periods=3)) - offset = pd.offsets.DateOffset(days=6) - result = series - offset - expected = pd.Series(pd.to_datetime(["2011-12-26", "2011-12-27", "2011-12-28"])) - tm.assert_series_equal(result, expected) - def test_dt_timetz_accessor(self, tz_naive_fixture): # GH21358 tz = maybe_get_tz(tz_naive_fixture) diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py index e729ff91293a8..a45c0bf8cf154 100644 --- a/pandas/tests/series/test_timezones.py +++ b/pandas/tests/series/test_timezones.py @@ -38,53 +38,6 @@ def test_string_index_alias_tz_aware(self, tz): result = ser["1/3/2000"] tm.assert_almost_equal(result, ser[2]) - # TODO: De-duplicate with test below - def test_series_add_tz_mismatch_converts_to_utc_duplicate(self): - rng = date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern") - ser = Series(np.random.randn(len(rng)), index=rng) - - ts_moscow = ser.tz_convert("Europe/Moscow") - - result = ser + ts_moscow - assert result.index.tz is pytz.utc - - result = ts_moscow + ser - assert result.index.tz is pytz.utc - - def test_series_add_tz_mismatch_converts_to_utc(self): - rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") - - perm = np.random.permutation(100)[:90] - ser1 = Series( - np.random.randn(90), index=rng.take(perm).tz_convert("US/Eastern") - ) - - perm = np.random.permutation(100)[:90] - ser2 = Series( - np.random.randn(90), index=rng.take(perm).tz_convert("Europe/Berlin") - ) - - result = ser1 + ser2 - - uts1 = ser1.tz_convert("utc") - uts2 = ser2.tz_convert("utc") - expected = uts1 + uts2 - - assert result.index.tz == pytz.UTC - tm.assert_series_equal(result, expected) - - def test_series_add_aware_naive_raises(self): - rng = date_range("1/1/2011", periods=10, freq="H") - ser = Series(np.random.randn(len(rng)), index=rng) - - ser_utc = ser.tz_localize("utc") - - with pytest.raises(Exception): - ser + ser_utc - - with pytest.raises(Exception): - ser_utc + ser - def test_series_align_aware(self): idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern") ser = Series(np.random.randn(len(idx1)), index=idx1)
https://api.github.com/repos/pandas-dev/pandas/pulls/32275
2020-02-26T18:25:37Z
2020-02-27T16:11:00Z
2020-02-27T16:11:00Z
2020-02-27T16:16:04Z
TST: implement test_first
diff --git a/pandas/tests/frame/methods/test_first_and_last.py b/pandas/tests/frame/methods/test_first_and_last.py new file mode 100644 index 0000000000000..73e4128ddebb9 --- /dev/null +++ b/pandas/tests/frame/methods/test_first_and_last.py @@ -0,0 +1,61 @@ +""" +Note: includes tests for `last` +""" +import pytest + +from pandas import DataFrame +import pandas._testing as tm + + +class TestFirst: + def test_first_subset(self): + ts = tm.makeTimeDataFrame(freq="12h") + result = ts.first("10d") + assert len(result) == 20 + + ts = tm.makeTimeDataFrame(freq="D") + result = ts.first("10d") + assert len(result) == 10 + + result = ts.first("3M") + expected = ts[:"3/31/2000"] + tm.assert_frame_equal(result, expected) + + result = ts.first("21D") + expected = ts[:21] + tm.assert_frame_equal(result, expected) + + result = ts[:0].first("3M") + tm.assert_frame_equal(result, ts[:0]) + + def test_first_raises(self): + # GH#20725 + df = DataFrame([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(TypeError): # index is not a DatetimeIndex + df.first("1D") + + def test_last_subset(self): + ts = tm.makeTimeDataFrame(freq="12h") + result = ts.last("10d") + assert len(result) == 20 + + ts = tm.makeTimeDataFrame(nper=30, freq="D") + result = ts.last("10d") + assert len(result) == 10 + + result = ts.last("21D") + expected = ts["2000-01-10":] + tm.assert_frame_equal(result, expected) + + result = ts.last("21D") + expected = ts[-21:] + tm.assert_frame_equal(result, expected) + + result = ts[:0].last("3M") + tm.assert_frame_equal(result, ts[:0]) + + def test_last_raises(self): + # GH20725 + df = DataFrame([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(TypeError): # index is not a DatetimeIndex + df.last("1D") diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index b713af92eac27..5956f73bb11f0 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -119,58 +119,6 @@ def test_first_valid_index_all_nan(self, klass): assert obj.first_valid_index() is None assert obj.iloc[:0].first_valid_index() is None - def test_first_subset(self): - ts = tm.makeTimeDataFrame(freq="12h") - result = ts.first("10d") - assert len(result) == 20 - - ts = tm.makeTimeDataFrame(freq="D") - result = ts.first("10d") - assert len(result) == 10 - - result = ts.first("3M") - expected = ts[:"3/31/2000"] - tm.assert_frame_equal(result, expected) - - result = ts.first("21D") - expected = ts[:21] - tm.assert_frame_equal(result, expected) - - result = ts[:0].first("3M") - tm.assert_frame_equal(result, ts[:0]) - - def test_first_raises(self): - # GH20725 - df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) - with pytest.raises(TypeError): # index is not a DatetimeIndex - df.first("1D") - - def test_last_subset(self): - ts = tm.makeTimeDataFrame(freq="12h") - result = ts.last("10d") - assert len(result) == 20 - - ts = tm.makeTimeDataFrame(nper=30, freq="D") - result = ts.last("10d") - assert len(result) == 10 - - result = ts.last("21D") - expected = ts["2000-01-10":] - tm.assert_frame_equal(result, expected) - - result = ts.last("21D") - expected = ts[-21:] - tm.assert_frame_equal(result, expected) - - result = ts[:0].last("3M") - tm.assert_frame_equal(result, ts[:0]) - - def test_last_raises(self): - # GH20725 - df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) - with pytest.raises(TypeError): # index is not a DatetimeIndex - df.last("1D") - def test_operation_on_NaT(self): # Both NaT and Timestamp are in DataFrame. df = pd.DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]}) diff --git a/pandas/tests/series/methods/test_first_and_last.py b/pandas/tests/series/methods/test_first_and_last.py new file mode 100644 index 0000000000000..7629dc8cda30b --- /dev/null +++ b/pandas/tests/series/methods/test_first_and_last.py @@ -0,0 +1,69 @@ +""" +Note: includes tests for `last` +""" + +import numpy as np +import pytest + +from pandas import Series, date_range +import pandas._testing as tm + + +class TestFirst: + def test_first_subset(self): + rng = date_range("1/1/2000", "1/1/2010", freq="12h") + ts = Series(np.random.randn(len(rng)), index=rng) + result = ts.first("10d") + assert len(result) == 20 + + rng = date_range("1/1/2000", "1/1/2010", freq="D") + ts = Series(np.random.randn(len(rng)), index=rng) + result = ts.first("10d") + assert len(result) == 10 + + result = ts.first("3M") + expected = ts[:"3/31/2000"] + tm.assert_series_equal(result, expected) + + result = ts.first("21D") + expected = ts[:21] + tm.assert_series_equal(result, expected) + + result = ts[:0].first("3M") + tm.assert_series_equal(result, ts[:0]) + + def test_first_raises(self): + # GH#20725 + ser = Series("a b c".split()) + msg = "'first' only supports a DatetimeIndex index" + with pytest.raises(TypeError, match=msg): + ser.first("1D") + + def test_last_subset(self): + rng = date_range("1/1/2000", "1/1/2010", freq="12h") + ts = Series(np.random.randn(len(rng)), index=rng) + result = ts.last("10d") + assert len(result) == 20 + + rng = date_range("1/1/2000", "1/1/2010", freq="D") + ts = Series(np.random.randn(len(rng)), index=rng) + result = ts.last("10d") + assert len(result) == 10 + + result = ts.last("21D") + expected = ts["12/12/2009":] + tm.assert_series_equal(result, expected) + + result = ts.last("21D") + expected = ts[-21:] + tm.assert_series_equal(result, expected) + + result = ts[:0].last("3M") + tm.assert_series_equal(result, ts[:0]) + + def test_last_raises(self): + # GH#20725 + ser = Series("a b c".split()) + msg = "'last' only supports a DatetimeIndex index" + with pytest.raises(TypeError, match=msg): + ser.last("1D") diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 544634a2d16e9..592fd6ba558ff 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -167,60 +167,6 @@ def test_promote_datetime_date(self): expected = rng.get_indexer(ts_slice.index) tm.assert_numpy_array_equal(result, expected) - def test_first_subset(self): - ts = _simple_ts("1/1/2000", "1/1/2010", freq="12h") - result = ts.first("10d") - assert len(result) == 20 - - ts = _simple_ts("1/1/2000", "1/1/2010") - result = ts.first("10d") - assert len(result) == 10 - - result = ts.first("3M") - expected = ts[:"3/31/2000"] - tm.assert_series_equal(result, expected) - - result = ts.first("21D") - expected = ts[:21] - tm.assert_series_equal(result, expected) - - result = ts[:0].first("3M") - tm.assert_series_equal(result, ts[:0]) - - def test_first_raises(self): - # GH20725 - ser = pd.Series("a b c".split()) - msg = "'first' only supports a DatetimeIndex index" - with pytest.raises(TypeError, match=msg): - ser.first("1D") - - def test_last_subset(self): - ts = _simple_ts("1/1/2000", "1/1/2010", freq="12h") - result = ts.last("10d") - assert len(result) == 20 - - ts = _simple_ts("1/1/2000", "1/1/2010") - result = ts.last("10d") - assert len(result) == 10 - - result = ts.last("21D") - expected = ts["12/12/2009":] - tm.assert_series_equal(result, expected) - - result = ts.last("21D") - expected = ts[-21:] - tm.assert_series_equal(result, expected) - - result = ts[:0].last("3M") - tm.assert_series_equal(result, ts[:0]) - - def test_last_raises(self): - # GH20725 - ser = pd.Series("a b c".split()) - msg = "'last' only supports a DatetimeIndex index" - with pytest.raises(TypeError, match=msg): - ser.last("1D") - def test_format_pre_1900_dates(self): rng = date_range("1/1/1850", "1/1/1950", freq="A-DEC") rng.format()
cc @MomIsBestFriend there are three things about these tests that I think would make for good (separate) follow-ups if you're interested: 1) They are just begging to be parametrized 2) We don't have a systematic way of naming/organizing tests that are for two specific methods rather than one specific method, which is the idea behind tests.(frame|series).methods. (see also: head/tail, first_valid_index/last_valid_index) 3) These tests could/should also be shared/parametrized over Series vs DataFrame, but we dont have a systematic home or naming convention for these.
https://api.github.com/repos/pandas-dev/pandas/pulls/32274
2020-02-26T18:09:49Z
2020-02-27T16:19:14Z
2020-02-27T16:19:14Z
2020-02-27T16:21:28Z
TST: move misplaced to_datetime test
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 0a774e9c0f008..ecfecfb414326 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -2,7 +2,7 @@ import calendar from collections import deque -from datetime import datetime, time +from datetime import datetime, time, timedelta import locale from dateutil.parser import parse @@ -1376,6 +1376,86 @@ def test_to_datetime_errors_ignore_utc_true(self): expected = DatetimeIndex(["1970-01-01 00:00:01"], tz="UTC") tm.assert_index_equal(result, expected) + # TODO: this is moved from tests.series.test_timeseries, may be redundant + def test_to_datetime_unit(self): + + epoch = 1370745748 + s = Series([epoch + t for t in range(20)]) + result = to_datetime(s, unit="s") + expected = Series( + [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] + ) + tm.assert_series_equal(result, expected) + + s = Series([epoch + t for t in range(20)]).astype(float) + result = to_datetime(s, unit="s") + expected = Series( + [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] + ) + tm.assert_series_equal(result, expected) + + s = Series([epoch + t for t in range(20)] + [iNaT]) + result = to_datetime(s, unit="s") + expected = Series( + [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] + + [NaT] + ) + tm.assert_series_equal(result, expected) + + s = Series([epoch + t for t in range(20)] + [iNaT]).astype(float) + result = to_datetime(s, unit="s") + expected = Series( + [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] + + [NaT] + ) + tm.assert_series_equal(result, expected) + + # GH13834 + s = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float) + result = to_datetime(s, unit="s") + expected = Series( + [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) + for t in np.arange(0, 2, 0.25) + ] + + [NaT] + ) + tm.assert_series_equal(result, expected) + + s = pd.concat( + [Series([epoch + t for t in range(20)]).astype(float), Series([np.nan])], + ignore_index=True, + ) + result = to_datetime(s, unit="s") + expected = Series( + [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] + + [NaT] + ) + tm.assert_series_equal(result, expected) + + result = to_datetime([1, 2, "NaT", pd.NaT, np.nan], unit="D") + expected = DatetimeIndex( + [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3 + ) + tm.assert_index_equal(result, expected) + + msg = "non convertible value foo with the unit 'D'" + with pytest.raises(ValueError, match=msg): + to_datetime([1, 2, "foo"], unit="D") + msg = "cannot convert input 111111111 with the unit 'D'" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime([1, 2, 111111111], unit="D") + + # coerce we can process + expected = DatetimeIndex( + [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1 + ) + result = to_datetime([1, 2, "foo"], unit="D", errors="coerce") + tm.assert_index_equal(result, expected) + + result = to_datetime([1, 2, 111111111], unit="D", errors="coerce") + tm.assert_index_equal(result, expected) + class TestToDatetimeMisc: def test_to_datetime_barely_out_of_bounds(self): diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 8f06ea69f5d66..544634a2d16e9 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -1,11 +1,10 @@ -from datetime import datetime, timedelta +from datetime import datetime from io import StringIO import numpy as np import pytest from pandas._libs.tslib import iNaT -from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime import pandas as pd from pandas import ( @@ -14,10 +13,8 @@ NaT, Series, Timestamp, - concat, date_range, timedelta_range, - to_datetime, ) import pandas._testing as tm @@ -127,85 +124,6 @@ def test_contiguous_boolean_preserve_freq(self): masked = rng[mask] assert masked.freq is None - def test_to_datetime_unit(self): - - epoch = 1370745748 - s = Series([epoch + t for t in range(20)]) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - ) - tm.assert_series_equal(result, expected) - - s = Series([epoch + t for t in range(20)]).astype(float) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - ) - tm.assert_series_equal(result, expected) - - s = Series([epoch + t for t in range(20)] + [iNaT]) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - + [NaT] - ) - tm.assert_series_equal(result, expected) - - s = Series([epoch + t for t in range(20)] + [iNaT]).astype(float) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - + [NaT] - ) - tm.assert_series_equal(result, expected) - - # GH13834 - s = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float) - result = to_datetime(s, unit="s") - expected = Series( - [ - Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) - for t in np.arange(0, 2, 0.25) - ] - + [NaT] - ) - tm.assert_series_equal(result, expected) - - s = concat( - [Series([epoch + t for t in range(20)]).astype(float), Series([np.nan])], - ignore_index=True, - ) - result = to_datetime(s, unit="s") - expected = Series( - [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] - + [NaT] - ) - tm.assert_series_equal(result, expected) - - result = to_datetime([1, 2, "NaT", pd.NaT, np.nan], unit="D") - expected = DatetimeIndex( - [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3 - ) - tm.assert_index_equal(result, expected) - - msg = "non convertible value foo with the unit 'D'" - with pytest.raises(ValueError, match=msg): - to_datetime([1, 2, "foo"], unit="D") - msg = "cannot convert input 111111111 with the unit 'D'" - with pytest.raises(OutOfBoundsDatetime, match=msg): - to_datetime([1, 2, 111111111], unit="D") - - # coerce we can process - expected = DatetimeIndex( - [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1 - ) - result = to_datetime([1, 2, "foo"], unit="D", errors="coerce") - tm.assert_index_equal(result, expected) - - result = to_datetime([1, 2, 111111111], unit="D", errors="coerce") - tm.assert_index_equal(result, expected) - def test_series_ctor_datetime64(self): rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s") dates = np.asarray(rng)
https://api.github.com/repos/pandas-dev/pandas/pulls/32273
2020-02-26T17:45:29Z
2020-02-27T04:44:57Z
2020-02-27T04:44:57Z
2020-03-02T17:06:55Z
TST: method-specific files for droplevel
diff --git a/pandas/tests/frame/methods/test_droplevel.py b/pandas/tests/frame/methods/test_droplevel.py new file mode 100644 index 0000000000000..517905cf23259 --- /dev/null +++ b/pandas/tests/frame/methods/test_droplevel.py @@ -0,0 +1,23 @@ +from pandas import DataFrame, Index, MultiIndex +import pandas._testing as tm + + +class TestDropLevel: + def test_droplevel(self): + # GH#20342 + df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) + df = df.set_index([0, 1]).rename_axis(["a", "b"]) + df.columns = MultiIndex.from_tuples( + [("c", "e"), ("d", "f")], names=["level_1", "level_2"] + ) + + # test that dropping of a level in index works + expected = df.reset_index("a", drop=True) + result = df.droplevel("a", axis="index") + tm.assert_frame_equal(result, expected) + + # test that dropping of a level in columns works + expected = df.copy() + expected.columns = Index(["c", "d"], name="level_1") + result = df.droplevel("level_2", axis="columns") + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 751ed1dfdd847..34df8bb57dd91 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -840,25 +840,6 @@ def test_reindex_signature(self): "tolerance", } - def test_droplevel(self): - # GH20342 - df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) - df = df.set_index([0, 1]).rename_axis(["a", "b"]) - df.columns = MultiIndex.from_tuples( - [("c", "e"), ("d", "f")], names=["level_1", "level_2"] - ) - - # test that dropping of a level in index works - expected = df.reset_index("a", drop=True) - result = df.droplevel("a", axis="index") - tm.assert_frame_equal(result, expected) - - # test that dropping of a level in columns works - expected = df.copy() - expected.columns = Index(["c", "d"], name="level_1") - result = df.droplevel("level_2", axis="columns") - tm.assert_frame_equal(result, expected) - class TestIntervalIndex: def test_setitem(self): diff --git a/pandas/tests/series/methods/test_droplevel.py b/pandas/tests/series/methods/test_droplevel.py new file mode 100644 index 0000000000000..435eb5751de4b --- /dev/null +++ b/pandas/tests/series/methods/test_droplevel.py @@ -0,0 +1,19 @@ +import pytest + +from pandas import MultiIndex, Series +import pandas._testing as tm + + +class TestDropLevel: + def test_droplevel(self): + # GH#20342 + ser = Series([1, 2, 3, 4]) + ser.index = MultiIndex.from_arrays( + [(1, 2, 3, 4), (5, 6, 7, 8)], names=["a", "b"] + ) + expected = ser.reset_index("b", drop=True) + result = ser.droplevel("b", axis="index") + tm.assert_series_equal(result, expected) + # test that droplevel raises ValueError on axis != 0 + with pytest.raises(ValueError): + ser.droplevel(1, axis="columns") diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index 9be8744d7223f..f6ca93b0c2882 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -152,16 +152,3 @@ def test_set_axis_inplace(self): for axis in [2, "foo"]: with pytest.raises(ValueError, match="No axis named"): s.set_axis(list("abcd"), axis=axis, inplace=False) - - def test_droplevel(self): - # GH20342 - ser = Series([1, 2, 3, 4]) - ser.index = MultiIndex.from_arrays( - [(1, 2, 3, 4), (5, 6, 7, 8)], names=["a", "b"] - ) - expected = ser.reset_index("b", drop=True) - result = ser.droplevel("b", axis="index") - tm.assert_series_equal(result, expected) - # test that droplevel raises ValueError on axis != 0 - with pytest.raises(ValueError): - ser.droplevel(1, axis="columns")
https://api.github.com/repos/pandas-dev/pandas/pulls/32272
2020-02-26T17:37:25Z
2020-02-27T12:35:36Z
2020-02-27T12:35:36Z
2020-02-27T14:55:37Z
TST: method-specific file for to_period
diff --git a/pandas/tests/series/methods/test_to_period.py b/pandas/tests/series/methods/test_to_period.py new file mode 100644 index 0000000000000..28c4aad3edf32 --- /dev/null +++ b/pandas/tests/series/methods/test_to_period.py @@ -0,0 +1,47 @@ +import numpy as np + +from pandas import ( + DataFrame, + DatetimeIndex, + PeriodIndex, + Series, + date_range, + period_range, +) +import pandas._testing as tm + + +class TestToPeriod: + def test_to_period(self): + rng = date_range("1/1/2000", "1/1/2001", freq="D") + ts = Series(np.random.randn(len(rng)), index=rng) + + pts = ts.to_period() + exp = ts.copy() + exp.index = period_range("1/1/2000", "1/1/2001") + tm.assert_series_equal(pts, exp) + + pts = ts.to_period("M") + exp.index = exp.index.asfreq("M") + tm.assert_index_equal(pts.index, exp.index.asfreq("M")) + tm.assert_series_equal(pts, exp) + + # GH#7606 without freq + idx = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"]) + exp_idx = PeriodIndex( + ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], freq="D" + ) + + s = Series(np.random.randn(4), index=idx) + expected = s.copy() + expected.index = exp_idx + tm.assert_series_equal(s.to_period(), expected) + + df = DataFrame(np.random.randn(4, 4), index=idx, columns=idx) + expected = df.copy() + expected.index = exp_idx + tm.assert_frame_equal(df.to_period(), expected) + + expected = df.copy() + expected.columns = exp_idx + tm.assert_frame_equal(df.to_period(axis=1), expected) diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 8f06ea69f5d66..572c71fba9f9f 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -309,41 +309,6 @@ def test_format_pre_1900_dates(self): ts = Series(1, index=rng) repr(ts) - def test_to_period(self): - from pandas.core.indexes.period import period_range - - ts = _simple_ts("1/1/2000", "1/1/2001") - - pts = ts.to_period() - exp = ts.copy() - exp.index = period_range("1/1/2000", "1/1/2001") - tm.assert_series_equal(pts, exp) - - pts = ts.to_period("M") - exp.index = exp.index.asfreq("M") - tm.assert_index_equal(pts.index, exp.index.asfreq("M")) - tm.assert_series_equal(pts, exp) - - # GH 7606 without freq - idx = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"]) - exp_idx = pd.PeriodIndex( - ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"], freq="D" - ) - - s = Series(np.random.randn(4), index=idx) - expected = s.copy() - expected.index = exp_idx - tm.assert_series_equal(s.to_period(), expected) - - df = DataFrame(np.random.randn(4, 4), index=idx, columns=idx) - expected = df.copy() - expected.index = exp_idx - tm.assert_frame_equal(df.to_period(), expected) - - expected = df.copy() - expected.columns = exp_idx - tm.assert_frame_equal(df.to_period(axis=1), expected) - def test_groupby_count_dateparseerror(self): dr = date_range(start="1/1/2012", freq="5min", periods=10)
https://api.github.com/repos/pandas-dev/pandas/pulls/32270
2020-02-26T16:12:01Z
2020-02-27T17:07:11Z
2020-02-27T17:07:11Z
2020-02-27T17:21:40Z
Backport PR #32166 on branch 1.0.x
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst index d3b1442953e41..1b6098e6b6ac1 100644 --- a/doc/source/whatsnew/v1.0.2.rst +++ b/doc/source/whatsnew/v1.0.2.rst @@ -23,6 +23,7 @@ Fixed regressions - Fixed regression where :func:`read_pickle` raised a ``UnicodeDecodeError`` when reading a py27 pickle with :class:`MultiIndex` column (:issue:`31988`). - Fixed regression in :class:`DataFrame` arithmetic operations with mis-matched columns (:issue:`31623`) - Fixed regression in :meth:`GroupBy.agg` calling a user-provided function an extra time on an empty input (:issue:`31760`) +- Joining on :class:`DatetimeIndex` or :class:`TimedeltaIndex` will preserve ``freq`` in simple cases (:issue:`32166`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index c98b4f21dbb92..aaea609ec5049 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -857,21 +857,16 @@ def _is_convertible_to_index_for_join(cls, other: Index) -> bool: return True return False - def _wrap_joined_index(self, joined, other): + def _wrap_joined_index(self, joined: np.ndarray, other): + assert other.dtype == self.dtype, (other.dtype, self.dtype) name = get_op_result_name(self, other) - if ( - isinstance(other, type(self)) - and self.freq == other.freq - and self._can_fast_union(other) - ): - joined = self._shallow_copy(joined) - joined.name = name - return joined - else: - kwargs = {} - if hasattr(self, "tz"): - kwargs["tz"] = getattr(other, "tz", None) - return self._simple_new(joined, name, **kwargs) + + freq = self.freq if self._can_fast_union(other) else None + new_data = type(self._data)._simple_new( # type: ignore + joined, dtype=self.dtype, freq=freq + ) + + return type(self)._simple_new(new_data, name=name) class DatetimelikeDelegateMixin(PandasDelegate): diff --git a/pandas/tests/indexes/datetimes/test_join.py b/pandas/tests/indexes/datetimes/test_join.py new file mode 100644 index 0000000000000..f2f88fd7dc90c --- /dev/null +++ b/pandas/tests/indexes/datetimes/test_join.py @@ -0,0 +1,144 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas import DatetimeIndex, Index, Timestamp, date_range, to_datetime +import pandas._testing as tm + +from pandas.tseries.offsets import BDay, BMonthEnd + + +class TestJoin: + def test_does_not_convert_mixed_integer(self): + df = tm.makeCustomDataframe( + 10, + 10, + data_gen_f=lambda *args, **kwargs: np.random.randn(), + r_idx_type="i", + c_idx_type="dt", + ) + cols = df.columns.join(df.index, how="outer") + joined = cols.join(df.columns) + assert cols.dtype == np.dtype("O") + assert cols.dtype == joined.dtype + tm.assert_numpy_array_equal(cols.values, joined.values) + + def test_join_self(self, join_type): + index = date_range("1/1/2000", periods=10) + joined = index.join(index, how=join_type) + assert index is joined + + def test_join_with_period_index(self, join_type): + df = tm.makeCustomDataframe( + 10, + 10, + data_gen_f=lambda *args: np.random.randint(2), + c_idx_type="p", + r_idx_type="dt", + ) + s = df.iloc[:5, 0] + + expected = df.columns.astype("O").join(s.index, how=join_type) + result = df.columns.join(s.index, how=join_type) + tm.assert_index_equal(expected, result) + + def test_join_object_index(self): + rng = date_range("1/1/2000", periods=10) + idx = Index(["a", "b", "c", "d"]) + + result = rng.join(idx, how="outer") + assert isinstance(result[0], Timestamp) + + def test_join_utc_convert(self, join_type): + rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") + + left = rng.tz_convert("US/Eastern") + right = rng.tz_convert("Europe/Berlin") + + result = left.join(left[:-5], how=join_type) + assert isinstance(result, DatetimeIndex) + assert result.tz == left.tz + + result = left.join(right[:-5], how=join_type) + assert isinstance(result, DatetimeIndex) + assert result.tz.zone == "UTC" + + @pytest.mark.parametrize("sort", [None, False]) + def test_datetimeindex_union_join_empty(self, sort): + dti = date_range(start="1/1/2001", end="2/1/2001", freq="D") + empty = Index([]) + + result = dti.union(empty, sort=sort) + expected = dti.astype("O") + tm.assert_index_equal(result, expected) + + result = dti.join(empty) + assert isinstance(result, DatetimeIndex) + tm.assert_index_equal(result, dti) + + def test_join_nonunique(self): + idx1 = to_datetime(["2012-11-06 16:00:11.477563", "2012-11-06 16:00:11.477563"]) + idx2 = to_datetime(["2012-11-06 15:11:09.006507", "2012-11-06 15:11:09.006507"]) + rs = idx1.join(idx2, how="outer") + assert rs.is_monotonic + + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_outer_join(self, freq): + # should just behave as union + start, end = datetime(2009, 1, 1), datetime(2010, 1, 1) + rng = date_range(start=start, end=end, freq=freq) + + # overlapping + left = rng[:10] + right = rng[5:10] + + the_join = left.join(right, how="outer") + assert isinstance(the_join, DatetimeIndex) + + # non-overlapping, gap in middle + left = rng[:5] + right = rng[10:] + + the_join = left.join(right, how="outer") + assert isinstance(the_join, DatetimeIndex) + assert the_join.freq is None + + # non-overlapping, no gap + left = rng[:5] + right = rng[5:10] + + the_join = left.join(right, how="outer") + assert isinstance(the_join, DatetimeIndex) + + # overlapping, but different offset + other = date_range(start, end, freq=BMonthEnd()) + + the_join = rng.join(other, how="outer") + assert isinstance(the_join, DatetimeIndex) + assert the_join.freq is None + + def test_naive_aware_conflicts(self): + start, end = datetime(2009, 1, 1), datetime(2010, 1, 1) + naive = date_range(start, end, freq=BDay(), tz=None) + aware = date_range(start, end, freq=BDay(), tz="Asia/Hong_Kong") + + msg = "tz-naive.*tz-aware" + with pytest.raises(TypeError, match=msg): + naive.join(aware) + + with pytest.raises(TypeError, match=msg): + aware.join(naive) + + @pytest.mark.parametrize("tz", [None, "US/Pacific"]) + def test_join_preserves_freq(self, tz): + # GH#32157 + dti = date_range("2016-01-01", periods=10, tz=tz) + result = dti[:5].join(dti[5:], how="outer") + assert result.freq == dti.freq + tm.assert_index_equal(result, dti) + + result = dti[:5].join(dti[6:], how="outer") + assert result.freq is None + expected = dti.delete(5) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/timedeltas/test_join.py b/pandas/tests/indexes/timedeltas/test_join.py new file mode 100644 index 0000000000000..aaf4ef29e162b --- /dev/null +++ b/pandas/tests/indexes/timedeltas/test_join.py @@ -0,0 +1,49 @@ +import numpy as np + +from pandas import Index, Timedelta, timedelta_range +import pandas._testing as tm + + +class TestJoin: + def test_append_join_nondatetimeindex(self): + rng = timedelta_range("1 days", periods=10) + idx = Index(["a", "b", "c", "d"]) + + result = rng.append(idx) + assert isinstance(result[0], Timedelta) + + # it works + rng.join(idx, how="outer") + + def test_join_self(self, join_type): + index = timedelta_range("1 day", periods=10) + joined = index.join(index, how=join_type) + tm.assert_index_equal(index, joined) + + def test_does_not_convert_mixed_integer(self): + df = tm.makeCustomDataframe( + 10, + 10, + data_gen_f=lambda *args, **kwargs: np.random.randn(), + r_idx_type="i", + c_idx_type="td", + ) + str(df) + + cols = df.columns.join(df.index, how="outer") + joined = cols.join(df.columns) + assert cols.dtype == np.dtype("O") + assert cols.dtype == joined.dtype + tm.assert_index_equal(cols, joined) + + def test_join_preserves_freq(self): + # GH#32157 + tdi = timedelta_range("1 day", periods=10) + result = tdi[:5].join(tdi[5:], how="outer") + assert result.freq == tdi.freq + tm.assert_index_equal(result, tdi) + + result = tdi[:5].join(tdi[6:], how="outer") + assert result.freq is None + expected = tdi.delete(5) + tm.assert_index_equal(result, expected)
#32166
https://api.github.com/repos/pandas-dev/pandas/pulls/32269
2020-02-26T15:56:14Z
2020-02-27T12:25:22Z
2020-02-27T12:25:22Z
2020-02-27T14:57:25Z
Backport PR #32214 on branch 1.0.x (BUG: Cast pd.NA to pd.NaT in to_datetime)
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst index f491774991090..d3b1442953e41 100644 --- a/doc/source/whatsnew/v1.0.2.rst +++ b/doc/source/whatsnew/v1.0.2.rst @@ -62,6 +62,7 @@ Bug fixes **Datetimelike** - Bug in :meth:`DataFrame.reindex` and :meth:`Series.reindex` when reindexing with a tz-aware index (:issue:`26683`) +- Bug where :func:`to_datetime` would raise when passed ``pd.NA`` (:issue:`32213`) **Categorical** diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 67c0f0cc33ab8..2f972a3153e5e 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -22,6 +22,8 @@ from pandas._libs.tslibs.util cimport ( get_nat, is_integer_object, is_float_object, is_datetime64_object, is_timedelta64_object) +from pandas._libs.missing cimport C_NA + # ---------------------------------------------------------------------- # Constants @@ -769,7 +771,7 @@ NaT = c_NaT # Python-visible cdef inline bint checknull_with_nat(object val): """ utility to check if a value is a nat or not """ - return val is None or util.is_nan(val) or val is c_NaT + return val is None or util.is_nan(val) or val is c_NaT or val is C_NA cpdef bint is_null_datetimelike(object val, bint inat_is_null=True): diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 198ae1cbd4967..6cae6f47f1885 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -2315,3 +2315,10 @@ def test_nullable_integer_to_datetime(): tm.assert_series_equal(res, expected) # Check that ser isn't mutated tm.assert_series_equal(ser, ser_copy) + + +@pytest.mark.parametrize("klass", [np.array, list]) +def test_na_to_datetime(nulls_fixture, klass): + result = pd.to_datetime(klass([nulls_fixture])) + + assert result[0] is pd.NaT
Backport PR #32214: BUG: Cast pd.NA to pd.NaT in to_datetime
https://api.github.com/repos/pandas-dev/pandas/pulls/32267
2020-02-26T12:40:34Z
2020-02-26T15:33:16Z
2020-02-26T15:33:16Z
2020-02-26T15:33:16Z
PERF: lazify blknos and blklocs
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 515e1bcd761b6..7570f6eddbd9c 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -48,8 +48,8 @@ def get_mgr_concatenation_plan(mgr, indexers): if 0 in indexers: ax0_indexer = indexers.pop(0) - blknos = algos.take_1d(mgr._blknos, ax0_indexer, fill_value=-1) - blklocs = algos.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1) + blknos = algos.take_1d(mgr.blknos, ax0_indexer, fill_value=-1) + blklocs = algos.take_1d(mgr.blklocs, ax0_indexer, fill_value=-1) else: if mgr._is_single_block: @@ -57,8 +57,8 @@ def get_mgr_concatenation_plan(mgr, indexers): return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))] ax0_indexer = None - blknos = mgr._blknos - blklocs = mgr._blklocs + blknos = mgr.blknos + blklocs = mgr.blklocs plan = [] for blkno, placements in libinternals.get_blkno_placements(blknos, group=False): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 329bfdf543c62..f4e0535172e64 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -141,9 +141,37 @@ def __init__( if do_integrity_check: self._verify_integrity() + # Populate known_consolidate, blknos, and blklocs lazily self._known_consolidated = False + self._blknos = None + self._blklocs = None - self._rebuild_blknos_and_blklocs() + @property + def blknos(self): + """ + Suppose we want to find the array corresponding to our i'th column. + + blknos[i] identifies the block from self.blocks that contains this column. + + blklocs[i] identifies the column of interest within + self.blocks[self.blknos[i]] + """ + if self._blknos is None: + # Note: these can be altered by other BlockManager methods. + self._rebuild_blknos_and_blklocs() + + return self._blknos + + @property + def blklocs(self): + """ + See blknos.__doc__ + """ + if self._blklocs is None: + # Note: these can be altered by other BlockManager methods. + self._rebuild_blknos_and_blklocs() + + return self._blklocs def make_empty(self, axes=None): """ return an empty BlockManager with the items axis of len 0 """ @@ -227,6 +255,7 @@ def _rebuild_blknos_and_blklocs(self): new_blklocs[rl.indexer] = np.arange(len(rl)) if (new_blknos == -1).any(): + # TODO: can we avoid this? it isn't cheap raise AssertionError("Gaps in blk ref_locs") self._blknos = new_blknos @@ -250,7 +279,7 @@ def get_dtype_counts(self): def get_dtypes(self): dtypes = np.array([blk.dtype for blk in self.blocks]) - return algos.take_1d(dtypes, self._blknos, allow_fill=False) + return algos.take_1d(dtypes, self.blknos, allow_fill=False) def __getstate__(self): block_values = [b.values for b in self.blocks] @@ -944,8 +973,8 @@ def iget(self, i: int) -> "SingleBlockManager": """ Return the data as a SingleBlockManager. """ - block = self.blocks[self._blknos[i]] - values = block.iget(self._blklocs[i]) + block = self.blocks[self.blknos[i]] + values = block.iget(self.blklocs[i]) # shortcut for select a single-dim from a 2-dim BM return SingleBlockManager( @@ -973,7 +1002,7 @@ def delete(self, item): else: affected_start = is_deleted.nonzero()[0][0] - for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]): + for blkno, _ in _fast_count_smallints(self.blknos[affected_start:]): blk = self.blocks[blkno] bml = blk.mgr_locs blk_del = is_deleted[bml.indexer].nonzero()[0] @@ -1002,6 +1031,8 @@ def set(self, item, value): """ # FIXME: refactor, clearly separate broadcasting & zip-like assignment # can prob also fix the various if tests for sparse/categorical + if self._blklocs is None and self.ndim > 1: + self._rebuild_blknos_and_blklocs() value_is_extension_type = is_extension_array_dtype(value) @@ -1038,8 +1069,9 @@ def value_getitem(placement): if isinstance(loc, int): loc = [loc] - blknos = self._blknos[loc] - blklocs = self._blklocs[loc].copy() + # Accessing public blknos ensures the public versions are initialized + blknos = self.blknos[loc] + blklocs = self.blklocs[loc].copy() unfit_mgr_locs = [] unfit_val_locs = [] @@ -1141,7 +1173,7 @@ def insert(self, loc: int, item, value, allow_duplicates: bool = False): block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1)) - for blkno, count in _fast_count_smallints(self._blknos[loc:]): + for blkno, count in _fast_count_smallints(self.blknos[loc:]): blk = self.blocks[blkno] if count == len(blk.mgr_locs): blk.mgr_locs = blk.mgr_locs.add(1) @@ -1150,7 +1182,8 @@ def insert(self, loc: int, item, value, allow_duplicates: bool = False): new_mgr_locs[new_mgr_locs >= loc] += 1 blk.mgr_locs = new_mgr_locs - if loc == self._blklocs.shape[0]: + # Accessing public blklocs ensures the public versions are initialized + if loc == self.blklocs.shape[0]: # np.append is a lot faster, let's use it if we can. self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) @@ -1268,14 +1301,14 @@ def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None): ] if sl_type in ("slice", "mask"): - blknos = self._blknos[slobj] - blklocs = self._blklocs[slobj] + blknos = self.blknos[slobj] + blklocs = self.blklocs[slobj] else: blknos = algos.take_1d( - self._blknos, slobj, fill_value=-1, allow_fill=allow_fill + self.blknos, slobj, fill_value=-1, allow_fill=allow_fill ) blklocs = algos.take_1d( - self._blklocs, slobj, fill_value=-1, allow_fill=allow_fill + self.blklocs, slobj, fill_value=-1, allow_fill=allow_fill ) # When filling blknos, make sure blknos is updated before appending to diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 636cca0df9d4e..7b64227763ecc 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -2209,16 +2209,17 @@ def test_object_casting_indexing_wraps_datetimelike(): assert isinstance(ser.values[2], pd.Timedelta) mgr = df._data + mgr._rebuild_blknos_and_blklocs() arr = mgr.fast_xs(0) assert isinstance(arr[1], pd.Timestamp) assert isinstance(arr[2], pd.Timedelta) - blk = mgr.blocks[mgr._blknos[1]] + blk = mgr.blocks[mgr.blknos[1]] assert blk.dtype == "M8[ns]" # we got the right block val = blk.iget((0, 0)) assert isinstance(val, pd.Timestamp) - blk = mgr.blocks[mgr._blknos[2]] + blk = mgr.blocks[mgr.blknos[2]] assert blk.dtype == "m8[ns]" # we got the right block val = blk.iget((0, 0)) assert isinstance(val, pd.Timedelta) diff --git a/pandas/tests/frame/test_nonunique_indexes.py b/pandas/tests/frame/test_nonunique_indexes.py index 32ead406a3e86..233c0f4bd3544 100644 --- a/pandas/tests/frame/test_nonunique_indexes.py +++ b/pandas/tests/frame/test_nonunique_indexes.py @@ -474,8 +474,8 @@ def test_columns_with_dups(self): ) df = pd.concat([df_float, df_int, df_bool, df_object, df_dt], axis=1) - assert len(df._data._blknos) == len(df.columns) - assert len(df._data._blklocs) == len(df.columns) + assert len(df._data.blknos) == len(df.columns) + assert len(df._data.blklocs) == len(df.columns) # testing iloc for i in range(len(df.columns)): diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 27b0500983afd..378446398404e 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -309,7 +309,8 @@ def test_duplicate_ref_loc_failure(self): msg = "Gaps in blk ref_locs" with pytest.raises(AssertionError, match=msg): - BlockManager(blocks, axes) + mgr = BlockManager(blocks, axes) + mgr._rebuild_blknos_and_blklocs() blocks[0].mgr_locs = np.array([0]) blocks[1].mgr_locs = np.array([1])
The benchmark I'm using for this is the same as for #32224, based on the asv that is most affected by removing `fast_apply` (see #32086) ``` import numpy as np from pandas import * %load_ext line_profiler def get_df(): N = 10 ** 4 labels = np.random.randint(0, 2000, size=N) labels2 = np.random.randint(0, 3, size=N) df = DataFrame( { "key": labels, "key2": labels2, "value1": np.random.randn(N), "value2": ["foo", "bar", "baz", "qux"] * (N // 4), } ) return df df = get_df() gb = df.groupby("key") %prun -s cumulative gb.apply(lambda x: 1) ``` If we disable `fast_apply` on master, this gives: ``` ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 0.207 0.207 groupby.py:701(apply) 1 0.000 0.000 0.207 0.207 groupby.py:750(_python_apply_general) 1 0.008 0.008 0.204 0.204 ops.py:151(apply) 1994 0.003 0.000 0.191 0.000 ops.py:858(__iter__) 1993 0.003 0.000 0.187 0.000 ops.py:889(_chop) 1993 0.003 0.000 0.184 0.000 indexing.py:814(__getitem__) 1993 0.001 0.000 0.180 0.000 indexing.py:1462(_getitem_axis) 1993 0.003 0.000 0.179 0.000 indexing.py:1488(_get_slice_axis) 1993 0.007 0.000 0.167 0.000 generic.py:3474(_slice) 1993 0.007 0.000 0.140 0.000 managers.py:713(get_slice) 1994 0.004 0.000 0.068 0.000 managers.py:125(__init__) 1994 0.027 0.000 0.059 0.000 managers.py:215(_rebuild_blknos_and_blklocs) 1993 0.002 0.000 0.050 0.000 managers.py:723(<listcomp>) 5979 0.010 0.000 0.048 0.000 blocks.py:310(getitem_block) 5983 0.003 0.000 0.033 0.000 blocks.py:275(make_block_same_class) ``` If we disable `fast_apply` on this PR: ``` ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 0.136 0.136 groupby.py:701(apply) 1 0.000 0.000 0.136 0.136 groupby.py:750(_python_apply_general) 1 0.007 0.007 0.134 0.134 ops.py:151(apply) 1984 0.002 0.000 0.120 0.000 ops.py:903(__iter__) 1983 0.002 0.000 0.117 0.000 ops.py:934(_chop) 1983 0.003 0.000 0.114 0.000 indexing.py:814(__getitem__) 1983 0.001 0.000 0.110 0.000 indexing.py:1462(_getitem_axis) 1983 0.003 0.000 0.109 0.000 indexing.py:1488(_get_slice_axis) 1983 0.006 0.000 0.098 0.000 generic.py:3474(_slice) 1983 0.006 0.000 0.076 0.000 managers.py:742(get_slice) 1983 0.002 0.000 0.048 0.000 managers.py:752(<listcomp>) 5949 0.010 0.000 0.046 0.000 blocks.py:310(getitem_block) 5957 0.003 0.000 0.032 0.000 blocks.py:275(make_block_same_class) 5960 0.006 0.000 0.029 0.000 blocks.py:3023(make_block) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/32261
2020-02-26T04:58:26Z
2020-03-04T14:28:53Z
2020-03-04T14:28:53Z
2020-03-04T15:19:36Z
CLN: remove dtype kwarg from _simple_new
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 67f2f05c8af1e..c215fdb475ed8 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -451,7 +451,7 @@ def asi8(self): return None @classmethod - def _simple_new(cls, values, name=None, dtype=None): + def _simple_new(cls, values, name: Label = None): """ We require that we have a dtype compat for the values. If we are passed a non-dtype compat, then coerce using the constructor. @@ -3310,7 +3310,7 @@ def reindex(self, target, method=None, level=None, limit=None, tolerance=None): values = range(0) else: values = self._data[:0] # appropriately-dtyped empty array - target = self._simple_new(values, dtype=self.dtype, **attrs) + target = self._simple_new(values, **attrs) else: target = ensure_index(target) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 67bed7bd77c7f..5f0d6ea2d6278 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -226,8 +226,7 @@ def _create_from_codes(self, codes, dtype=None, name=None): return CategoricalIndex(cat, name=name) @classmethod - def _simple_new(cls, values: Categorical, name=None, dtype=None): - # GH#32204 dtype is included for compat with Index._simple_new + def _simple_new(cls, values: Categorical, name: Label = None): assert isinstance(values, Categorical), type(values) result = object.__new__(cls) @@ -433,7 +432,7 @@ def where(self, cond, other=None): other = self._na_value values = np.where(cond, self.values, other) cat = Categorical(values, dtype=self.dtype) - return self._shallow_copy(cat) + return type(self)._simple_new(cat, name=self.name) def reindex(self, target, method=None, level=None, limit=None, tolerance=None): """ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 6ea4250e4acf4..ad89766b0798c 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -9,7 +9,7 @@ from pandas._libs import Timedelta, Timestamp, lib from pandas._libs.interval import Interval, IntervalMixin, IntervalTree -from pandas._typing import AnyArrayLike +from pandas._typing import AnyArrayLike, Label from pandas.util._decorators import Appender, Substitution, cache_readonly from pandas.util._exceptions import rewrite_exception @@ -191,7 +191,7 @@ def func(intvidx_self, other, sort=False): class IntervalIndex(IntervalMixin, ExtensionIndex): _typ = "intervalindex" _comparables = ["name"] - _attributes = ["name", "closed"] + _attributes = ["name"] # we would like our indexing holder to defer to us _defer_to_indexing = True @@ -227,17 +227,15 @@ def __new__( return cls._simple_new(array, name) @classmethod - def _simple_new(cls, array, name, closed=None): + def _simple_new(cls, array: IntervalArray, name: Label = None): """ Construct from an IntervalArray Parameters ---------- array : IntervalArray - name : str + name : Label, default None Attached as result.name - closed : Any - Ignored. """ assert isinstance(array, IntervalArray), type(array) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index c7c11c60185b3..35a5d99abf4e6 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -217,7 +217,7 @@ def __new__( return cls._simple_new(data, name=name) @classmethod - def _simple_new(cls, values, name=None, freq=None, **kwargs): + def _simple_new(cls, values: PeriodArray, name: Label = None): """ Create a new PeriodIndex. @@ -228,7 +228,6 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs): or coercion. """ assert isinstance(values, PeriodArray), type(values) - assert freq is None or freq == values.freq, (freq, values.freq) result = object.__new__(cls) result._data = values diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index fa8551bc646a6..71cc62e6a110b 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -95,7 +95,7 @@ def __new__( # RangeIndex if isinstance(start, RangeIndex): start = start._range - return cls._simple_new(start, dtype=dtype, name=name) + return cls._simple_new(start, name=name) # validate the arguments if com.all_none(start, stop, step): @@ -113,7 +113,7 @@ def __new__( raise ValueError("Step must not be zero") rng = range(start, stop, step) - return cls._simple_new(rng, dtype=dtype, name=name) + return cls._simple_new(rng, name=name) @classmethod def from_range(cls, data: range, name=None, dtype=None) -> "RangeIndex": @@ -131,10 +131,10 @@ def from_range(cls, data: range, name=None, dtype=None) -> "RangeIndex": ) cls._validate_dtype(dtype) - return cls._simple_new(data, dtype=dtype, name=name) + return cls._simple_new(data, name=name) @classmethod - def _simple_new(cls, values: range, name=None, dtype=None) -> "RangeIndex": + def _simple_new(cls, values: range, name: Label = None) -> "RangeIndex": result = object.__new__(cls) assert isinstance(values, range) diff --git a/pandas/tests/indexes/period/test_constructors.py b/pandas/tests/indexes/period/test_constructors.py index 418f53591b913..b5ff83ec7514d 100644 --- a/pandas/tests/indexes/period/test_constructors.py +++ b/pandas/tests/indexes/period/test_constructors.py @@ -322,9 +322,9 @@ def test_constructor_simple_new(self): idx = period_range("2007-01", name="p", periods=2, freq="M") with pytest.raises(AssertionError, match="<class .*PeriodIndex'>"): - idx._simple_new(idx, name="p", freq=idx.freq) + idx._simple_new(idx, name="p") - result = idx._simple_new(idx._data, name="p", freq=idx.freq) + result = idx._simple_new(idx._data, name="p") tm.assert_index_equal(result, idx) with pytest.raises(AssertionError): @@ -339,19 +339,19 @@ def test_constructor_simple_new_empty(self): # GH13079 idx = PeriodIndex([], freq="M", name="p") with pytest.raises(AssertionError, match="<class .*PeriodIndex'>"): - idx._simple_new(idx, name="p", freq="M") + idx._simple_new(idx, name="p") - result = idx._simple_new(idx._data, name="p", freq="M") + result = idx._simple_new(idx._data, name="p") tm.assert_index_equal(result, idx) @pytest.mark.parametrize("floats", [[1.1, 2.1], np.array([1.1, 2.1])]) def test_constructor_floats(self, floats): with pytest.raises(AssertionError, match="<class "): - PeriodIndex._simple_new(floats, freq="M") + PeriodIndex._simple_new(floats) msg = "PeriodIndex does not allow floating point in construction" with pytest.raises(TypeError, match=msg): - PeriodIndex(floats, freq="M") + PeriodIndex(floats) def test_constructor_nat(self): msg = "start and end must not be NaT"
DatetimeIndex remains an outlier, upcoming PR
https://api.github.com/repos/pandas-dev/pandas/pulls/32260
2020-02-26T03:42:23Z
2020-02-26T12:27:25Z
2020-02-26T12:27:25Z
2020-02-26T16:08:13Z
TST: broken off from #32187
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index fff5ca03e80f4..18b9898e7d800 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -40,20 +40,7 @@ def check(self, result, original, indexer, getitem): tm.assert_almost_equal(result, expected) - @pytest.mark.parametrize( - "index_func", - [ - tm.makeStringIndex, - tm.makeUnicodeIndex, - tm.makeCategoricalIndex, - tm.makeDateIndex, - tm.makeTimedeltaIndex, - tm.makePeriodIndex, - tm.makeIntIndex, - tm.makeRangeIndex, - ], - ) - def test_scalar_error(self, index_func): + def test_scalar_error(self, series_with_simple_index): # GH 4892 # float_indexers should raise exceptions @@ -62,11 +49,9 @@ def test_scalar_error(self, index_func): # but is specifically testing for the error # message - i = index_func(5) + s = series_with_simple_index - s = Series(np.arange(len(i)), index=i) - - msg = "Cannot index by location index" + msg = "Cannot index by location index with a non-integer key" with pytest.raises(TypeError, match=msg): s.iloc[3.0]
troubleshooting an apparently-unrelated Travis failure there
https://api.github.com/repos/pandas-dev/pandas/pulls/32258
2020-02-26T02:13:12Z
2020-03-03T01:56:51Z
2020-03-03T01:56:51Z
2020-03-03T01:59:38Z
PERF: pass through to numpy validation for iloc setitem
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 5adc65b488399..69283bc58799e 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -8,7 +8,6 @@ from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( - is_float, is_integer, is_iterator, is_list_like, @@ -1500,18 +1499,10 @@ def _convert_to_indexer(self, key, axis: int, is_setter: bool = False): """ Much simpler as we only have to deal with our valid types. """ - labels = self.obj._get_axis(axis) - - # make need to convert a float key - if isinstance(key, slice): - labels._validate_positional_slice(key) - return key - - elif is_float(key): - # _validate_indexer call will always raise - labels._validate_indexer("positional", key, "iloc") + return key - self._validate_key(key, axis) + def _get_setitem_indexer(self, key): + # GH#32257 Fall through to let numnpy do validation return key # ------------------------------------------------------------------- diff --git a/pandas/tests/frame/indexing/test_indexing.py b/pandas/tests/frame/indexing/test_indexing.py index 636cca0df9d4e..997414eceeb86 100644 --- a/pandas/tests/frame/indexing/test_indexing.py +++ b/pandas/tests/frame/indexing/test_indexing.py @@ -27,6 +27,9 @@ from pandas.tseries.offsets import BDay +# We pass through a TypeError raised by numpy +_slice_msg = "slice indices must be integers or None or have an __index__ method" + class TestGet: def test_get(self, float_frame): @@ -994,7 +997,8 @@ def test_getitem_setitem_fancy_exceptions(self, float_frame): with pytest.raises(IndexingError, match="Too many indexers"): ix[:, :, :] - with pytest.raises(IndexingError, match="Too many indexers"): + with pytest.raises(IndexError, match="too many indices for array"): + # GH#32257 we let numpy do validation, get their exception ix[:, :, :] = 1 def test_getitem_setitem_boolean_misaligned(self, float_frame): @@ -1073,7 +1077,7 @@ def test_getitem_setitem_float_labels(self): cp = df.copy() - with pytest.raises(TypeError, match=msg): + with pytest.raises(TypeError, match=_slice_msg): cp.iloc[1.0:5] = 0 with pytest.raises(TypeError, match=msg): diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index 87520f5ab2577..a84e88cefbced 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -1,9 +1,17 @@ +import re + import numpy as np import pytest from pandas import DataFrame, Float64Index, Index, Int64Index, RangeIndex, Series import pandas._testing as tm +# We pass through the error message from numpy +_slice_iloc_msg = re.escape( + "only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) " + "and integer or boolean arrays are valid indices" +) + def gen_obj(klass, index): if klass is Series: @@ -62,11 +70,7 @@ def test_scalar_error(self, index_func): with pytest.raises(TypeError, match=msg): s.iloc[3.0] - msg = ( - f"cannot do positional indexing on {type(i).__name__} with these " - r"indexers \[3\.0\] of type float" - ) - with pytest.raises(TypeError, match=msg): + with pytest.raises(IndexError, match=_slice_iloc_msg): s.iloc[3.0] = 0 @pytest.mark.parametrize( @@ -133,12 +137,7 @@ def test_scalar_non_numeric(self, index_func, klass): assert 3.0 not in s # setting with a float fails with iloc - msg = ( - r"cannot do (label|positional) indexing " - fr"on {type(i).__name__} with these indexers \[3\.0\] of " - "type float" - ) - with pytest.raises(TypeError, match=msg): + with pytest.raises(IndexError, match=_slice_iloc_msg): s.iloc[3.0] = 0 # setting with an indexer @@ -327,12 +326,7 @@ def test_scalar_float(self, klass): with pytest.raises(TypeError, match=msg): s.iloc[3.0] - msg = ( - "cannot do positional indexing " - fr"on {Float64Index.__name__} with these indexers \[3\.0\] of " - "type float" - ) - with pytest.raises(TypeError, match=msg): + with pytest.raises(IndexError, match=_slice_iloc_msg): s2.iloc[3.0] = 0 @pytest.mark.parametrize( @@ -376,11 +370,7 @@ def test_slice_non_numeric(self, index_func, l, klass): idxr(s)[l] # setitem - msg = ( - "cannot do positional indexing " - fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of " - "type float" - ) + msg = "slice indices must be integers or None or have an __index__ method" with pytest.raises(TypeError, match=msg): s.iloc[l] = 0 @@ -390,7 +380,7 @@ def test_slice_non_numeric(self, index_func, l, klass): r"\[(3|4)(\.0)?\] " r"of type (float|int)" ) - for idxr in [lambda x: x.loc, lambda x: x.iloc, lambda x: x]: + for idxr in [lambda x: x.loc, lambda x: x]: with pytest.raises(TypeError, match=msg): idxr(s)[l] = 0
We lose a little bit of ground on the range and slice (not really sure why), pick up a bigger amount of ground on list or ndarray. ``` In [3]: ser = pd.Series(range(10**5)) In [4]: key = range(100, 200) In [5]: key2 = list(key) In [6]: key3 = slice(100, 200) In [7]: key4 = np.array(key2) In [16]: %timeit ser.iloc[key] = 1 56.3 µs ± 1.19 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- master 62.9 µs ± 1.77 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- PR In [17]: %timeit ser.iloc[key2] = 1 95.6 µs ± 2.69 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- master 55.6 µs ± 922 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- PR In [20]: %timeit ser.iloc[key3] = 1 49 µs ± 756 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- master 50.6 µs ± 1.01 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- PR In [21]: %timeit ser.iloc[key4] = 1 71.6 µs ± 1.98 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- master 45.7 µs ± 427 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) # <-- PR ```
https://api.github.com/repos/pandas-dev/pandas/pulls/32257
2020-02-26T02:00:45Z
2020-02-27T12:52:36Z
2020-02-27T12:52:36Z
2020-02-27T14:55:30Z
CLN tests.generic
diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 8e54de771a3e4..1b6cb8447c76d 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -187,8 +187,10 @@ def test_constructor_compound_dtypes(self): def f(dtype): return self._construct(shape=3, value=1, dtype=dtype) - msg = "compound dtypes are not implemented" - f"in the {self._typ.__name__} constructor" + msg = ( + "compound dtypes are not implemented " + f"in the {self._typ.__name__} constructor" + ) with pytest.raises(NotImplementedError, match=msg): f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")]) diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py index 5aafd83da78fd..f119eb422a276 100644 --- a/pandas/tests/generic/test_series.py +++ b/pandas/tests/generic/test_series.py @@ -24,13 +24,6 @@ class TestSeries(Generic): _typ = Series _comparator = lambda self, x, y: tm.assert_series_equal(x, y) - def setup_method(self): - self.ts = tm.makeTimeSeries() # Was at top level in test_series - self.ts.name = "ts" - - self.series = tm.makeStringSeries() - self.series.name = "series" - def test_rename_mi(self): s = Series( [11, 21, 31], diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 0c9ddbf5473b3..27b0500983afd 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -203,12 +203,6 @@ def create_mgr(descr, item_shape=None): class TestBlock: def setup_method(self, method): - # self.fblock = get_float_ex() # a,c,e - # self.cblock = get_complex_ex() # - # self.oblock = get_obj_ex() - # self.bool_block = get_bool_ex() - # self.int_block = get_int_ex() - self.fblock = create_block("float", [0, 2, 4]) self.cblock = create_block("complex", [7]) self.oblock = create_block("object", [1, 3]) @@ -254,22 +248,11 @@ def test_merge(self): tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals)) tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals)) - # TODO: merge with mixed type? - def test_copy(self): cop = self.fblock.copy() assert cop is not self.fblock assert_block_equal(self.fblock, cop) - def test_reindex_index(self): - pass - - def test_reindex_cast(self): - pass - - def test_insert(self): - pass - def test_delete(self): newb = self.fblock.copy() newb.delete(0) @@ -300,39 +283,7 @@ def test_delete(self): newb.delete(3) -class TestDatetimeBlock: - def test_can_hold_element(self): - block = create_block("datetime", [0]) - - # We will check that block._can_hold_element iff arr.__setitem__ works - arr = pd.array(block.values.ravel()) - - # coerce None - assert block._can_hold_element(None) - arr[0] = None - assert arr[0] is pd.NaT - - # coerce different types of datetime objects - vals = [np.datetime64("2010-10-10"), datetime(2010, 10, 10)] - for val in vals: - assert block._can_hold_element(val) - arr[0] = val - - val = date(2010, 10, 10) - assert not block._can_hold_element(val) - - msg = ( - "'value' should be a 'Timestamp', 'NaT', " - "or array of those. Got 'date' instead." - ) - with pytest.raises(TypeError, match=msg): - arr[0] = val - - class TestBlockManager: - def test_constructor_corner(self): - pass - def test_attrs(self): mgr = create_mgr("a,b,c: f8-1; d,e,f: f8-2") assert mgr.nblocks == 2 @@ -441,18 +392,6 @@ def test_set_change_dtype(self, mgr): mgr2.set("quux", tm.randn(N)) assert mgr2.get("quux").dtype == np.float_ - def test_set_change_dtype_slice(self): # GH8850 - cols = MultiIndex.from_tuples([("1st", "a"), ("2nd", "b"), ("3rd", "c")]) - df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols) - df["2nd"] = df["2nd"] * 2.0 - - blocks = df._to_dict_of_blocks() - assert sorted(blocks.keys()) == ["float64", "int64"] - tm.assert_frame_equal( - blocks["float64"], DataFrame([[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]) - ) - tm.assert_frame_equal(blocks["int64"], DataFrame([[3], [6]], columns=cols[2:])) - def test_copy(self, mgr): cp = mgr.copy(deep=False) for blk, cp_blk in zip(mgr.blocks, cp.blocks): @@ -486,7 +425,7 @@ def test_sparse_mixed(self): assert len(mgr.blocks) == 3 assert isinstance(mgr, BlockManager) - # what to test here? + # TODO: what to test here? def test_as_array_float(self): mgr = create_mgr("c: f4; d: f2; e: f8") @@ -650,22 +589,6 @@ def test_interleave(self): mgr = create_mgr("a: M8[ns]; b: m8[ns]") assert mgr.as_array().dtype == "object" - def test_interleave_non_unique_cols(self): - df = DataFrame( - [[pd.Timestamp("20130101"), 3.5], [pd.Timestamp("20130102"), 4.5]], - columns=["x", "x"], - index=[1, 2], - ) - - df_unique = df.copy() - df_unique.columns = ["x", "y"] - assert df_unique.values.shape == df.values.shape - tm.assert_numpy_array_equal(df_unique.values[0], df.values[0]) - tm.assert_numpy_array_equal(df_unique.values[1], df.values[1]) - - def test_consolidate(self): - pass - def test_consolidate_ordering_issues(self, mgr): mgr.set("f", tm.randn(N)) mgr.set("d", tm.randn(N)) @@ -683,10 +606,6 @@ def test_consolidate_ordering_issues(self, mgr): cons.blocks[0].mgr_locs.as_array, np.arange(len(cons.items), dtype=np.int64) ) - def test_reindex_index(self): - # TODO: should this be pytest.skip? - pass - def test_reindex_items(self): # mgr is not consolidated, f8 & f8-2 blocks mgr = create_mgr("a: f8; b: i8; c: f8; d: i8; e: f8; f: bool; g: f8-2") @@ -767,13 +686,6 @@ def test_get_bool_data(self): def test_unicode_repr_doesnt_raise(self): repr(create_mgr("b,\u05d0: object")) - def test_missing_unicode_key(self): - df = DataFrame({"a": [1]}) - try: - df.loc[:, "\u05d0"] # should not raise UnicodeEncodeError - except KeyError: - pass # this is the expected exception - def test_equals(self): # unique items bm1 = create_mgr("a,b,c: i8-1; d,e,f: i8-2") @@ -843,8 +755,6 @@ class TestIndexing: create_mgr("a,b: f8; c,d: i8; e,f: f8", item_shape=(N, N)), ] - # MANAGERS = [MANAGERS[6]] - @pytest.mark.parametrize("mgr", MANAGERS) def test_get_slice(self, mgr): def assert_slice_ok(mgr, axis, slobj): @@ -994,11 +904,6 @@ def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, fill_value): mgr, ax, pd.Index(["foo", "bar", "baz"]), [0, 1, 2], fill_value, ) - # test_get_slice(slice_like, axis) - # take(indexer, axis) - # reindex_axis(new_labels, axis) - # reindex_indexer(new_labels, indexer, axis) - class TestBlockPlacement: def test_slice_len(self): @@ -1151,6 +1056,33 @@ def any(self, axis=None): class TestCanHoldElement: + def test_datetime_block_can_hold_element(self): + block = create_block("datetime", [0]) + + # We will check that block._can_hold_element iff arr.__setitem__ works + arr = pd.array(block.values.ravel()) + + # coerce None + assert block._can_hold_element(None) + arr[0] = None + assert arr[0] is pd.NaT + + # coerce different types of datetime objects + vals = [np.datetime64("2010-10-10"), datetime(2010, 10, 10)] + for val in vals: + assert block._can_hold_element(val) + arr[0] = val + + val = date(2010, 10, 10) + assert not block._can_hold_element(val) + + msg = ( + "'value' should be a 'Timestamp', 'NaT', " + "or array of those. Got 'date' instead." + ) + with pytest.raises(TypeError, match=msg): + arr[0] = val + @pytest.mark.parametrize( "value, dtype", [ @@ -1280,3 +1212,37 @@ def test_dataframe_not_equal(): df1 = pd.DataFrame({"a": [1, 2], "b": ["s", "d"]}) df2 = pd.DataFrame({"a": ["s", "d"], "b": [1, 2]}) assert df1.equals(df2) is False + + +def test_missing_unicode_key(): + df = DataFrame({"a": [1]}) + with pytest.raises(KeyError, match="\u05d0"): + df.loc[:, "\u05d0"] # should not raise UnicodeEncodeError + + +def test_set_change_dtype_slice(): + # GH#8850 + cols = MultiIndex.from_tuples([("1st", "a"), ("2nd", "b"), ("3rd", "c")]) + df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols) + df["2nd"] = df["2nd"] * 2.0 + + blocks = df._to_dict_of_blocks() + assert sorted(blocks.keys()) == ["float64", "int64"] + tm.assert_frame_equal( + blocks["float64"], DataFrame([[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]) + ) + tm.assert_frame_equal(blocks["int64"], DataFrame([[3], [6]], columns=cols[2:])) + + +def test_interleave_non_unique_cols(): + df = DataFrame( + [[pd.Timestamp("20130101"), 3.5], [pd.Timestamp("20130102"), 4.5]], + columns=["x", "x"], + index=[1, 2], + ) + + df_unique = df.copy() + df_unique.columns = ["x", "y"] + assert df_unique.values.shape == df.values.shape + tm.assert_numpy_array_equal(df_unique.values[0], df.values[0]) + tm.assert_numpy_array_equal(df_unique.values[1], df.values[1])
Some of this is commented out or otherwise unneeded, other parts belong in test_internals.
https://api.github.com/repos/pandas-dev/pandas/pulls/32256
2020-02-26T00:28:31Z
2020-02-26T02:06:06Z
2020-02-26T02:06:06Z
2020-02-26T02:06:31Z
Implement __array__ on ExtensionIndex
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 44deab25db695..ff9655ab7f177 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -91,6 +91,8 @@ Backwards incompatible API changes now raise a ``TypeError`` if a not-accepted keyword argument is passed into it. Previously a ``UnsupportedFunctionCall`` was raised (``AssertionError`` if ``min_count`` passed into :meth:`~DataFrameGroupby.median``) (:issue:`31485`) - :meth:`DataFrame.at` and :meth:`Series.at` will raise a ``TypeError`` instead of a ``ValueError`` if an incompatible key is passed, and ``KeyError`` if a missing key is passed, matching the behavior of ``.loc[]`` (:issue:`31722`) +- Passing an integer dtype other than ``int64`` to ``np.array(period_index, dtype=...)`` will now raise ``TypeError`` instead of incorrectly using ``int64`` (:issue:`32255`) +- .. _whatsnew_110.api_breaking.indexing_raises_key_errors: diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 8141e2c78a7e2..5eeee644b3854 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -282,7 +282,12 @@ def freq(self): return self.dtype.freq def __array__(self, dtype=None) -> np.ndarray: - # overriding DatetimelikeArray + if dtype == "i8": + return self.asi8 + elif dtype == bool: + return ~self._isnan + + # This will raise TypeErorr for non-object dtypes return np.array(list(self), dtype=object) def __arrow_array__(self, type=None): diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index d43ae8eb54818..5997843f7ac6d 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -364,10 +364,6 @@ def __contains__(self, key: Any) -> bool: hash(key) return contains(self, key, container=self._engine) - def __array__(self, dtype=None) -> np.ndarray: - """ the array interface, return my values """ - return np.array(self._data, dtype=dtype) - @Appender(Index.astype.__doc__) def astype(self, dtype, copy=True): if is_interval_dtype(dtype): diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index c9fefd46e55c7..2a1153f07f5b9 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -267,9 +267,6 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None): # -------------------------------------------------------------------- - def __array__(self, dtype=None) -> np.ndarray: - return np.asarray(self._data, dtype=dtype) - @cache_readonly def _is_dates_only(self) -> bool: """ diff --git a/pandas/core/indexes/extension.py b/pandas/core/indexes/extension.py index daccb35864e98..7b11df15f69fb 100644 --- a/pandas/core/indexes/extension.py +++ b/pandas/core/indexes/extension.py @@ -224,6 +224,9 @@ def __iter__(self): # --------------------------------------------------------------------- + def __array__(self, dtype=None) -> np.ndarray: + return np.asarray(self._data, dtype=dtype) + @property def _ndarray_values(self) -> np.ndarray: return self._data._ndarray_values diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index ebf69c49c029a..6a7595a6686bb 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -19,7 +19,6 @@ is_dtype_equal, is_float, is_integer, - is_integer_dtype, is_object_dtype, is_scalar, pandas_dtype, @@ -338,12 +337,6 @@ def _int64index(self) -> Int64Index: # ------------------------------------------------------------------------ # Index Methods - def __array__(self, dtype=None) -> np.ndarray: - if is_integer_dtype(dtype): - return self.asi8 - else: - return self.astype(object).values - def __array_wrap__(self, result, context=None): """ Gets called after a ufunc. Needs additional handling as diff --git a/pandas/tests/arrays/test_datetimelike.py b/pandas/tests/arrays/test_datetimelike.py index 17818b6ce689f..f99ee542d543c 100644 --- a/pandas/tests/arrays/test_datetimelike.py +++ b/pandas/tests/arrays/test_datetimelike.py @@ -687,10 +687,10 @@ def test_array_interface(self, period_index): result = np.asarray(arr, dtype=object) tm.assert_numpy_array_equal(result, expected) - # to other dtypes - with pytest.raises(TypeError): - np.asarray(arr, dtype="int64") + result = np.asarray(arr, dtype="int64") + tm.assert_numpy_array_equal(result, arr.asi8) + # to other dtypes with pytest.raises(TypeError): np.asarray(arr, dtype="float64") diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index ab3e967f12360..b4c223be0f6a5 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -681,3 +681,32 @@ def test_is_monotonic_with_nat(): assert not obj.is_monotonic_increasing assert not obj.is_monotonic_decreasing assert obj.is_unique + + +@pytest.mark.parametrize("array", [True, False]) +def test_dunder_array(array): + obj = PeriodIndex(["2000-01-01", "2001-01-01"], freq="D") + if array: + obj = obj._data + + expected = np.array([obj[0], obj[1]], dtype=object) + result = np.array(obj) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(obj) + tm.assert_numpy_array_equal(result, expected) + + expected = obj.asi8 + for dtype in ["i8", "int64", np.int64]: + result = np.array(obj, dtype=dtype) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(obj, dtype=dtype) + tm.assert_numpy_array_equal(result, expected) + + for dtype in ["float64", "int32", "uint64"]: + msg = "argument must be" + with pytest.raises(TypeError, match=msg): + np.array(obj, dtype=dtype) + with pytest.raises(TypeError, match=msg): + np.array(obj, dtype=getattr(np, dtype))
https://api.github.com/repos/pandas-dev/pandas/pulls/32255
2020-02-26T00:13:48Z
2020-03-07T11:26:55Z
2020-03-07T11:26:55Z
2020-03-07T15:18:39Z
REF: move misplaced Series.append tests
diff --git a/pandas/tests/series/methods/test_append.py b/pandas/tests/series/methods/test_append.py index 4d64b5b397981..4742d6ae3544f 100644 --- a/pandas/tests/series/methods/test_append.py +++ b/pandas/tests/series/methods/test_append.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -from pandas import DataFrame, DatetimeIndex, Series, date_range +from pandas import DataFrame, DatetimeIndex, Index, Series, Timestamp, date_range import pandas._testing as tm @@ -166,3 +166,87 @@ def test_append_tz_dateutil(self): appended = rng.append(rng2) tm.assert_index_equal(appended, rng3) + + def test_series_append_aware(self): + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") + rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") + ser1 = Series([1], index=rng1) + ser2 = Series([2], index=rng2) + ts_result = ser1.append(ser2) + + exp_index = DatetimeIndex( + ["2011-01-01 01:00", "2011-01-01 02:00"], tz="US/Eastern" + ) + exp = Series([1, 2], index=exp_index) + tm.assert_series_equal(ts_result, exp) + assert ts_result.index.tz == rng1.tz + + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="UTC") + rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="UTC") + ser1 = Series([1], index=rng1) + ser2 = Series([2], index=rng2) + ts_result = ser1.append(ser2) + + exp_index = DatetimeIndex(["2011-01-01 01:00", "2011-01-01 02:00"], tz="UTC") + exp = Series([1, 2], index=exp_index) + tm.assert_series_equal(ts_result, exp) + utc = rng1.tz + assert utc == ts_result.index.tz + + # GH#7795 + # different tz coerces to object dtype, not UTC + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") + rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Central") + ser1 = Series([1], index=rng1) + ser2 = Series([2], index=rng2) + ts_result = ser1.append(ser2) + exp_index = Index( + [ + Timestamp("1/1/2011 01:00", tz="US/Eastern"), + Timestamp("1/1/2011 02:00", tz="US/Central"), + ] + ) + exp = Series([1, 2], index=exp_index) + tm.assert_series_equal(ts_result, exp) + + def test_series_append_aware_naive(self): + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") + rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") + ser1 = Series(np.random.randn(len(rng1)), index=rng1) + ser2 = Series(np.random.randn(len(rng2)), index=rng2) + ts_result = ser1.append(ser2) + + expected = ser1.index.astype(object).append(ser2.index.astype(object)) + assert ts_result.index.equals(expected) + + # mixed + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") + rng2 = range(100) + ser1 = Series(np.random.randn(len(rng1)), index=rng1) + ser2 = Series(np.random.randn(len(rng2)), index=rng2) + ts_result = ser1.append(ser2) + + expected = ser1.index.astype(object).append(ser2.index) + assert ts_result.index.equals(expected) + + def test_series_append_dst(self): + rng1 = date_range("1/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") + rng2 = date_range("8/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") + ser1 = Series([1, 2, 3], index=rng1) + ser2 = Series([10, 11, 12], index=rng2) + ts_result = ser1.append(ser2) + + exp_index = DatetimeIndex( + [ + "2016-01-01 01:00", + "2016-01-01 02:00", + "2016-01-01 03:00", + "2016-08-01 01:00", + "2016-08-01 02:00", + "2016-08-01 03:00", + ], + tz="US/Eastern", + ) + exp = Series([1, 2, 3, 10, 11, 12], index=exp_index) + tm.assert_series_equal(ts_result, exp) + assert ts_result.index.tz == rng1.tz diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py index 74363f4c73c39..e729ff91293a8 100644 --- a/pandas/tests/series/test_timezones.py +++ b/pandas/tests/series/test_timezones.py @@ -10,101 +10,12 @@ from pandas._libs.tslibs import conversion, timezones -from pandas import DatetimeIndex, Index, Series, Timestamp +from pandas import Series, Timestamp import pandas._testing as tm from pandas.core.indexes.datetimes import date_range class TestSeriesTimezones: - # ----------------------------------------------------------------- - # Series.append - - def test_series_append_aware(self): - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") - ser1 = Series([1], index=rng1) - ser2 = Series([2], index=rng2) - ts_result = ser1.append(ser2) - - exp_index = DatetimeIndex( - ["2011-01-01 01:00", "2011-01-01 02:00"], tz="US/Eastern" - ) - exp = Series([1, 2], index=exp_index) - tm.assert_series_equal(ts_result, exp) - assert ts_result.index.tz == rng1.tz - - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="UTC") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="UTC") - ser1 = Series([1], index=rng1) - ser2 = Series([2], index=rng2) - ts_result = ser1.append(ser2) - - exp_index = DatetimeIndex(["2011-01-01 01:00", "2011-01-01 02:00"], tz="UTC") - exp = Series([1, 2], index=exp_index) - tm.assert_series_equal(ts_result, exp) - utc = rng1.tz - assert utc == ts_result.index.tz - - # GH#7795 - # different tz coerces to object dtype, not UTC - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Central") - ser1 = Series([1], index=rng1) - ser2 = Series([2], index=rng2) - ts_result = ser1.append(ser2) - exp_index = Index( - [ - Timestamp("1/1/2011 01:00", tz="US/Eastern"), - Timestamp("1/1/2011 02:00", tz="US/Central"), - ] - ) - exp = Series([1, 2], index=exp_index) - tm.assert_series_equal(ts_result, exp) - - def test_series_append_aware_naive(self): - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") - ser1 = Series(np.random.randn(len(rng1)), index=rng1) - ser2 = Series(np.random.randn(len(rng2)), index=rng2) - ts_result = ser1.append(ser2) - - expected = ser1.index.astype(object).append(ser2.index.astype(object)) - assert ts_result.index.equals(expected) - - # mixed - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") - rng2 = range(100) - ser1 = Series(np.random.randn(len(rng1)), index=rng1) - ser2 = Series(np.random.randn(len(rng2)), index=rng2) - ts_result = ser1.append(ser2) - - expected = ser1.index.astype(object).append(ser2.index) - assert ts_result.index.equals(expected) - - def test_series_append_dst(self): - rng1 = date_range("1/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") - rng2 = date_range("8/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") - ser1 = Series([1, 2, 3], index=rng1) - ser2 = Series([10, 11, 12], index=rng2) - ts_result = ser1.append(ser2) - - exp_index = DatetimeIndex( - [ - "2016-01-01 01:00", - "2016-01-01 02:00", - "2016-01-01 03:00", - "2016-08-01 01:00", - "2016-08-01 02:00", - "2016-08-01 03:00", - ], - tz="US/Eastern", - ) - exp = Series([1, 2, 3, 10, 11, 12], index=exp_index) - tm.assert_series_equal(ts_result, exp) - assert ts_result.index.tz == rng1.tz - - # ----------------------------------------------------------------- - def test_dateutil_tzoffset_support(self): values = [188.5, 328.25] tzinfo = tzoffset(None, 7200)
https://api.github.com/repos/pandas-dev/pandas/pulls/32254
2020-02-25T23:53:10Z
2020-02-26T02:02:42Z
2020-02-26T02:02:42Z
2020-02-26T02:04:46Z
TST: method-specific file for combine
diff --git a/pandas/tests/frame/methods/test_combine.py b/pandas/tests/frame/methods/test_combine.py new file mode 100644 index 0000000000000..bc6a67e4e1f32 --- /dev/null +++ b/pandas/tests/frame/methods/test_combine.py @@ -0,0 +1,47 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class TestCombine: + @pytest.mark.parametrize( + "data", + [ + pd.date_range("2000", periods=4), + pd.date_range("2000", periods=4, tz="US/Central"), + pd.period_range("2000", periods=4), + pd.timedelta_range(0, periods=4), + ], + ) + def test_combine_datetlike_udf(self, data): + # GH#23079 + df = pd.DataFrame({"A": data}) + other = df.copy() + df.iloc[1, 0] = None + + def combiner(a, b): + return b + + result = df.combine(other, combiner) + tm.assert_frame_equal(result, other) + + def test_combine_generic(self, float_frame): + df1 = float_frame + df2 = float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]] + + combined = df1.combine(df2, np.add) + combined2 = df2.combine(df1, np.add) + assert combined["D"].isna().all() + assert combined2["D"].isna().all() + + chunk = combined.loc[combined.index[:-5], ["A", "B", "C"]] + chunk2 = combined2.loc[combined2.index[:-5], ["A", "B", "C"]] + + exp = ( + float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]].reindex_like(chunk) + * 2 + ) + tm.assert_frame_equal(chunk, exp) + tm.assert_frame_equal(chunk2, exp) diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py index 36a476d195fe5..321eb5fe94daf 100644 --- a/pandas/tests/frame/test_combine_concat.py +++ b/pandas/tests/frame/test_combine_concat.py @@ -21,27 +21,6 @@ def test_concat_multiple_frames_dtypes(self): ) tm.assert_series_equal(results, expected) - @pytest.mark.parametrize( - "data", - [ - pd.date_range("2000", periods=4), - pd.date_range("2000", periods=4, tz="US/Central"), - pd.period_range("2000", periods=4), - pd.timedelta_range(0, periods=4), - ], - ) - def test_combine_datetlike_udf(self, data): - # https://github.com/pandas-dev/pandas/issues/23079 - df = pd.DataFrame({"A": data}) - other = df.copy() - df.iloc[1, 0] = None - - def combiner(a, b): - return b - - result = df.combine(other, combiner) - tm.assert_frame_equal(result, other) - def test_concat_multiple_tzs(self): # GH 12467 # combining datetime tz-aware and naive DataFrames diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index df40c2e7e2a11..542d9835bb5d3 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -685,25 +685,6 @@ def test_boolean_comparison(self): with pytest.raises(ValueError, match=msg1d): result = df == tup - def test_combine_generic(self, float_frame): - df1 = float_frame - df2 = float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]] - - combined = df1.combine(df2, np.add) - combined2 = df2.combine(df1, np.add) - assert combined["D"].isna().all() - assert combined2["D"].isna().all() - - chunk = combined.loc[combined.index[:-5], ["A", "B", "C"]] - chunk2 = combined2.loc[combined2.index[:-5], ["A", "B", "C"]] - - exp = ( - float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]].reindex_like(chunk) - * 2 - ) - tm.assert_frame_equal(chunk, exp) - tm.assert_frame_equal(chunk2, exp) - def test_inplace_ops_alignment(self): # inplace ops / ops alignment
https://api.github.com/repos/pandas-dev/pandas/pulls/32253
2020-02-25T23:43:02Z
2020-02-26T02:01:47Z
2020-02-26T02:01:47Z
2020-02-26T02:02:57Z
(wip) BUG: itertuples was retuning empty list if frame had no columns
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 705c335acfb48..b010b71abab3f 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -249,6 +249,7 @@ Other instead of ``TypeError: Can only append a Series if ignore_index=True or if the Series has a name`` (:issue:`30871`) - Set operations on an object-dtype :class:`Index` now always return object-dtype results (:issue:`31401`) - Bug in :meth:`AbstractHolidayCalendar.holidays` when no rules were defined (:issue:`31415`) +- Bug in :meth:`DataFrame.itertuples` was returning empty list when DataFrame has no columns (:issue:`25408`) - .. --------------------------------------------------------------------------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7efb4fbb878d6..ed49f750d2061 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1027,7 +1027,11 @@ def itertuples(self, index=True, name="Pandas"): fields.insert(0, "Index") # use integer indexing because of possible duplicate column names - arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) + if len(self.columns) > 0: + arrays.extend(self.iloc[:, k] for k in range(len(self.columns))) + else: + arrays.extend([() for _ in range(len(self))]) + return iter(arrays) # Python versions before 3.7 support at most 255 arguments to constructors can_return_named_tuples = PY37 or len(self.columns) + index < 255 diff --git a/pandas/tests/frame/methods/test_to_dict.py b/pandas/tests/frame/methods/test_to_dict.py index cd9bd169322fd..a934dffa763fd 100644 --- a/pandas/tests/frame/methods/test_to_dict.py +++ b/pandas/tests/frame/methods/test_to_dict.py @@ -262,3 +262,10 @@ def test_to_dict_orient_dtype(self): "c": type(df_dict["c"]), } assert result == expected + + def test_to_dict_no_rows_split(self): + # GH 25408 + columns = ["A", "B"] + result = DataFrame(columns=columns).transpose().to_dict(orient="split") + expected = {"index": ["A", "B"], "columns": [], "data": [[], []]} + assert result == expected diff --git a/pandas/tests/frame/test_api.py b/pandas/tests/frame/test_api.py index a021dd91a7d26..bf52e54695a15 100644 --- a/pandas/tests/frame/test_api.py +++ b/pandas/tests/frame/test_api.py @@ -286,6 +286,12 @@ def test_itertuples(self, float_frame): else: assert not hasattr(result_255_columns, "_fields") + # GH 25408 + df_0_columns = DataFrame(index=["A", "B"]) + result = list(df_0_columns.itertuples()) + tm.assert_index_equal(result[0], pd.Index(["A", "B"])) + assert result[1:] == [(), ()] + def test_sequence_like_with_categorical(self): # GH 7839
(currently not right) - [ ] closes #25408 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32252
2020-02-25T22:01:03Z
2020-04-02T00:06:25Z
null
2020-10-10T14:14:57Z
TST: method-specific file for select_dtypes
diff --git a/pandas/tests/frame/methods/test_select_dtypes.py b/pandas/tests/frame/methods/test_select_dtypes.py new file mode 100644 index 0000000000000..fe7baebcf0cf7 --- /dev/null +++ b/pandas/tests/frame/methods/test_select_dtypes.py @@ -0,0 +1,329 @@ +from collections import OrderedDict + +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame, Timestamp +import pandas._testing as tm + + +class TestSelectDtypes: + def test_select_dtypes_include_using_list_like(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(include=[np.number]) + ei = df[["b", "c", "d", "k"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=[np.number], exclude=["timedelta"]) + ei = df[["b", "c", "d"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"]) + ei = df[["b", "c", "d", "f"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=["datetime"]) + ei = df[["g"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=["datetime64"]) + ei = df[["g"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=["datetimetz"]) + ei = df[["h", "i"]] + tm.assert_frame_equal(ri, ei) + + with pytest.raises(NotImplementedError, match=r"^$"): + df.select_dtypes(include=["period"]) + + def test_select_dtypes_exclude_using_list_like(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + } + ) + re = df.select_dtypes(exclude=[np.number]) + ee = df[["a", "e"]] + tm.assert_frame_equal(re, ee) + + def test_select_dtypes_exclude_include_using_list_like(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + exclude = (np.datetime64,) + include = np.bool_, "integer" + r = df.select_dtypes(include=include, exclude=exclude) + e = df[["b", "c", "e"]] + tm.assert_frame_equal(r, e) + + exclude = ("datetime",) + include = "bool", "int64", "int32" + r = df.select_dtypes(include=include, exclude=exclude) + e = df[["b", "e"]] + tm.assert_frame_equal(r, e) + + def test_select_dtypes_include_using_scalars(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(include=np.number) + ei = df[["b", "c", "d", "k"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include="datetime") + ei = df[["g"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include="datetime64") + ei = df[["g"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include="category") + ei = df[["f"]] + tm.assert_frame_equal(ri, ei) + + with pytest.raises(NotImplementedError, match=r"^$"): + df.select_dtypes(include="period") + + def test_select_dtypes_exclude_using_scalars(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(exclude=np.number) + ei = df[["a", "e", "f", "g", "h", "i", "j"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(exclude="category") + ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]] + tm.assert_frame_equal(ri, ei) + + with pytest.raises(NotImplementedError, match=r"^$"): + df.select_dtypes(exclude="period") + + def test_select_dtypes_include_exclude_using_scalars(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(include=np.number, exclude="floating") + ei = df[["b", "c", "k"]] + tm.assert_frame_equal(ri, ei) + + def test_select_dtypes_include_exclude_mixed_scalars_lists(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"]) + ei = df[["b", "c"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=[np.number, "category"], exclude="floating") + ei = df[["b", "c", "f", "k"]] + tm.assert_frame_equal(ri, ei) + + def test_select_dtypes_duplicate_columns(self): + # GH20839 + odict = OrderedDict + df = DataFrame( + odict( + [ + ("a", list("abc")), + ("b", list(range(1, 4))), + ("c", np.arange(3, 6).astype("u1")), + ("d", np.arange(4.0, 7.0, dtype="float64")), + ("e", [True, False, True]), + ("f", pd.date_range("now", periods=3).values), + ] + ) + ) + df.columns = ["a", "a", "b", "b", "b", "c"] + + expected = DataFrame( + {"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")} + ) + + result = df.select_dtypes(include=[np.number], exclude=["floating"]) + tm.assert_frame_equal(result, expected) + + def test_select_dtypes_not_an_attr_but_still_valid_dtype(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + df["g"] = df.f.diff() + assert not hasattr(np, "u8") + r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"]) + e = df[["a", "b"]] + tm.assert_frame_equal(r, e) + + r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"]) + e = df[["a", "b", "g"]] + tm.assert_frame_equal(r, e) + + def test_select_dtypes_empty(self): + df = DataFrame({"a": list("abc"), "b": list(range(1, 4))}) + msg = "at least one of include or exclude must be nonempty" + with pytest.raises(ValueError, match=msg): + df.select_dtypes() + + def test_select_dtypes_bad_datetime64(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + with pytest.raises(ValueError, match=".+ is too specific"): + df.select_dtypes(include=["datetime64[D]"]) + + with pytest.raises(ValueError, match=".+ is too specific"): + df.select_dtypes(exclude=["datetime64[as]"]) + + def test_select_dtypes_datetime_with_tz(self): + + df2 = DataFrame( + dict( + A=Timestamp("20130102", tz="US/Eastern"), + B=Timestamp("20130603", tz="CET"), + ), + index=range(5), + ) + df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1) + result = df3.select_dtypes(include=["datetime64[ns]"]) + expected = df3.reindex(columns=[]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"] + ) + @pytest.mark.parametrize("arg", ["include", "exclude"]) + def test_select_dtypes_str_raises(self, dtype, arg): + df = DataFrame( + { + "a": list("abc"), + "g": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + msg = "string dtypes are not allowed" + kwargs = {arg: [dtype]} + + with pytest.raises(TypeError, match=msg): + df.select_dtypes(**kwargs) + + def test_select_dtypes_bad_arg_raises(self): + df = DataFrame( + { + "a": list("abc"), + "g": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + + msg = "data type.*not understood" + with pytest.raises(TypeError, match=msg): + df.select_dtypes(["blargy, blarg, blarg"]) + + def test_select_dtypes_typecodes(self): + # GH 11990 + df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random()) + expected = df + FLOAT_TYPES = list(np.typecodes["AllFloat"]) + tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 8b63f0614eebf..713d8f3ceeedb 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -111,325 +111,6 @@ def test_dtypes_are_correct_after_column_slice(self): pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])), ) - def test_select_dtypes_include_using_list_like(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.Categorical(list("abc")), - "g": pd.date_range("20130101", periods=3), - "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), - "i": pd.date_range("20130101", periods=3, tz="CET"), - "j": pd.period_range("2013-01", periods=3, freq="M"), - "k": pd.timedelta_range("1 day", periods=3), - } - ) - - ri = df.select_dtypes(include=[np.number]) - ei = df[["b", "c", "d", "k"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include=[np.number], exclude=["timedelta"]) - ei = df[["b", "c", "d"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"]) - ei = df[["b", "c", "d", "f"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include=["datetime"]) - ei = df[["g"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include=["datetime64"]) - ei = df[["g"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include=["datetimetz"]) - ei = df[["h", "i"]] - tm.assert_frame_equal(ri, ei) - - with pytest.raises(NotImplementedError, match=r"^$"): - df.select_dtypes(include=["period"]) - - def test_select_dtypes_exclude_using_list_like(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - } - ) - re = df.select_dtypes(exclude=[np.number]) - ee = df[["a", "e"]] - tm.assert_frame_equal(re, ee) - - def test_select_dtypes_exclude_include_using_list_like(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.date_range("now", periods=3).values, - } - ) - exclude = (np.datetime64,) - include = np.bool_, "integer" - r = df.select_dtypes(include=include, exclude=exclude) - e = df[["b", "c", "e"]] - tm.assert_frame_equal(r, e) - - exclude = ("datetime",) - include = "bool", "int64", "int32" - r = df.select_dtypes(include=include, exclude=exclude) - e = df[["b", "e"]] - tm.assert_frame_equal(r, e) - - def test_select_dtypes_include_using_scalars(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.Categorical(list("abc")), - "g": pd.date_range("20130101", periods=3), - "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), - "i": pd.date_range("20130101", periods=3, tz="CET"), - "j": pd.period_range("2013-01", periods=3, freq="M"), - "k": pd.timedelta_range("1 day", periods=3), - } - ) - - ri = df.select_dtypes(include=np.number) - ei = df[["b", "c", "d", "k"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include="datetime") - ei = df[["g"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include="datetime64") - ei = df[["g"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include="category") - ei = df[["f"]] - tm.assert_frame_equal(ri, ei) - - with pytest.raises(NotImplementedError, match=r"^$"): - df.select_dtypes(include="period") - - def test_select_dtypes_exclude_using_scalars(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.Categorical(list("abc")), - "g": pd.date_range("20130101", periods=3), - "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), - "i": pd.date_range("20130101", periods=3, tz="CET"), - "j": pd.period_range("2013-01", periods=3, freq="M"), - "k": pd.timedelta_range("1 day", periods=3), - } - ) - - ri = df.select_dtypes(exclude=np.number) - ei = df[["a", "e", "f", "g", "h", "i", "j"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(exclude="category") - ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]] - tm.assert_frame_equal(ri, ei) - - with pytest.raises(NotImplementedError, match=r"^$"): - df.select_dtypes(exclude="period") - - def test_select_dtypes_include_exclude_using_scalars(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.Categorical(list("abc")), - "g": pd.date_range("20130101", periods=3), - "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), - "i": pd.date_range("20130101", periods=3, tz="CET"), - "j": pd.period_range("2013-01", periods=3, freq="M"), - "k": pd.timedelta_range("1 day", periods=3), - } - ) - - ri = df.select_dtypes(include=np.number, exclude="floating") - ei = df[["b", "c", "k"]] - tm.assert_frame_equal(ri, ei) - - def test_select_dtypes_include_exclude_mixed_scalars_lists(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.Categorical(list("abc")), - "g": pd.date_range("20130101", periods=3), - "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), - "i": pd.date_range("20130101", periods=3, tz="CET"), - "j": pd.period_range("2013-01", periods=3, freq="M"), - "k": pd.timedelta_range("1 day", periods=3), - } - ) - - ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"]) - ei = df[["b", "c"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include=[np.number, "category"], exclude="floating") - ei = df[["b", "c", "f", "k"]] - tm.assert_frame_equal(ri, ei) - - def test_select_dtypes_duplicate_columns(self): - # GH20839 - odict = OrderedDict - df = DataFrame( - odict( - [ - ("a", list("abc")), - ("b", list(range(1, 4))), - ("c", np.arange(3, 6).astype("u1")), - ("d", np.arange(4.0, 7.0, dtype="float64")), - ("e", [True, False, True]), - ("f", pd.date_range("now", periods=3).values), - ] - ) - ) - df.columns = ["a", "a", "b", "b", "b", "c"] - - expected = DataFrame( - {"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")} - ) - - result = df.select_dtypes(include=[np.number], exclude=["floating"]) - tm.assert_frame_equal(result, expected) - - def test_select_dtypes_not_an_attr_but_still_valid_dtype(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.date_range("now", periods=3).values, - } - ) - df["g"] = df.f.diff() - assert not hasattr(np, "u8") - r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"]) - e = df[["a", "b"]] - tm.assert_frame_equal(r, e) - - r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"]) - e = df[["a", "b", "g"]] - tm.assert_frame_equal(r, e) - - def test_select_dtypes_empty(self): - df = DataFrame({"a": list("abc"), "b": list(range(1, 4))}) - msg = "at least one of include or exclude must be nonempty" - with pytest.raises(ValueError, match=msg): - df.select_dtypes() - - def test_select_dtypes_bad_datetime64(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.date_range("now", periods=3).values, - } - ) - with pytest.raises(ValueError, match=".+ is too specific"): - df.select_dtypes(include=["datetime64[D]"]) - - with pytest.raises(ValueError, match=".+ is too specific"): - df.select_dtypes(exclude=["datetime64[as]"]) - - def test_select_dtypes_datetime_with_tz(self): - - df2 = DataFrame( - dict( - A=Timestamp("20130102", tz="US/Eastern"), - B=Timestamp("20130603", tz="CET"), - ), - index=range(5), - ) - df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1) - result = df3.select_dtypes(include=["datetime64[ns]"]) - expected = df3.reindex(columns=[]) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"] - ) - @pytest.mark.parametrize("arg", ["include", "exclude"]) - def test_select_dtypes_str_raises(self, dtype, arg): - df = DataFrame( - { - "a": list("abc"), - "g": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.date_range("now", periods=3).values, - } - ) - msg = "string dtypes are not allowed" - kwargs = {arg: [dtype]} - - with pytest.raises(TypeError, match=msg): - df.select_dtypes(**kwargs) - - def test_select_dtypes_bad_arg_raises(self): - df = DataFrame( - { - "a": list("abc"), - "g": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.date_range("now", periods=3).values, - } - ) - - msg = "data type.*not understood" - with pytest.raises(TypeError, match=msg): - df.select_dtypes(["blargy, blarg, blarg"]) - - def test_select_dtypes_typecodes(self): - # GH 11990 - df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random()) - expected = df - FLOAT_TYPES = list(np.typecodes["AllFloat"]) - tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected) - def test_dtypes_gh8722(self, float_string_frame): float_string_frame["bool"] = float_string_frame["A"] > 0 result = float_string_frame.dtypes
https://api.github.com/repos/pandas-dev/pandas/pulls/32250
2020-02-25T21:40:07Z
2020-02-26T02:00:50Z
2020-02-26T02:00:50Z
2020-02-26T02:03:42Z
REF: method-specific file for to_timestamp
diff --git a/pandas/tests/frame/methods/test_to_timestamp.py b/pandas/tests/frame/methods/test_to_timestamp.py new file mode 100644 index 0000000000000..ae7d2827e05a6 --- /dev/null +++ b/pandas/tests/frame/methods/test_to_timestamp.py @@ -0,0 +1,103 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + Timedelta, + date_range, + period_range, + to_datetime, +) +import pandas._testing as tm + + +class TestToTimestamp: + def test_frame_to_time_stamp(self): + K = 5 + index = period_range(freq="A", start="1/1/2001", end="12/1/2009") + df = DataFrame(np.random.randn(len(index), K), index=index) + df["mix"] = "a" + + exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC") + exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") + result = df.to_timestamp("D", "end") + tm.assert_index_equal(result.index, exp_index) + tm.assert_numpy_array_equal(result.values, df.values) + + exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN") + result = df.to_timestamp("D", "start") + tm.assert_index_equal(result.index, exp_index) + + def _get_with_delta(delta, freq="A-DEC"): + return date_range( + to_datetime("1/1/2001") + delta, + to_datetime("12/31/2009") + delta, + freq=freq, + ) + + delta = timedelta(hours=23) + result = df.to_timestamp("H", "end") + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + + delta = timedelta(hours=23, minutes=59) + result = df.to_timestamp("T", "end") + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + + result = df.to_timestamp("S", "end") + delta = timedelta(hours=23, minutes=59, seconds=59) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + + # columns + df = df.T + + exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC") + exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") + result = df.to_timestamp("D", "end", axis=1) + tm.assert_index_equal(result.columns, exp_index) + tm.assert_numpy_array_equal(result.values, df.values) + + exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN") + result = df.to_timestamp("D", "start", axis=1) + tm.assert_index_equal(result.columns, exp_index) + + delta = timedelta(hours=23) + result = df.to_timestamp("H", "end", axis=1) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns") + tm.assert_index_equal(result.columns, exp_index) + + delta = timedelta(hours=23, minutes=59) + result = df.to_timestamp("T", "end", axis=1) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns") + tm.assert_index_equal(result.columns, exp_index) + + result = df.to_timestamp("S", "end", axis=1) + delta = timedelta(hours=23, minutes=59, seconds=59) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") + tm.assert_index_equal(result.columns, exp_index) + + # invalid axis + with pytest.raises(ValueError, match="axis"): + df.to_timestamp(axis=2) + + result1 = df.to_timestamp("5t", axis=1) + result2 = df.to_timestamp("t", axis=1) + expected = date_range("2001-01-01", "2009-01-01", freq="AS") + assert isinstance(result1.columns, DatetimeIndex) + assert isinstance(result2.columns, DatetimeIndex) + tm.assert_numpy_array_equal(result1.columns.asi8, expected.asi8) + tm.assert_numpy_array_equal(result2.columns.asi8, expected.asi8) + # PeriodIndex.to_timestamp always use 'infer' + assert result1.columns.freqstr == "AS-JAN" + assert result2.columns.freqstr == "AS-JAN" diff --git a/pandas/tests/frame/test_period.py b/pandas/tests/frame/test_period.py index a6b2b334d3ec8..1ce13fd31ba88 100644 --- a/pandas/tests/frame/test_period.py +++ b/pandas/tests/frame/test_period.py @@ -1,19 +1,6 @@ -from datetime import timedelta - import numpy as np -import pytest - -import pandas as pd -from pandas import ( - DataFrame, - DatetimeIndex, - Index, - PeriodIndex, - Timedelta, - date_range, - period_range, - to_datetime, -) + +from pandas import DataFrame, Index, PeriodIndex, period_range import pandas._testing as tm @@ -49,93 +36,6 @@ def test_frame_setitem(self): assert isinstance(rs.index, PeriodIndex) tm.assert_index_equal(rs.index, rng) - def test_frame_to_time_stamp(self): - K = 5 - index = period_range(freq="A", start="1/1/2001", end="12/1/2009") - df = DataFrame(np.random.randn(len(index), K), index=index) - df["mix"] = "a" - - exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC") - exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") - result = df.to_timestamp("D", "end") - tm.assert_index_equal(result.index, exp_index) - tm.assert_numpy_array_equal(result.values, df.values) - - exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN") - result = df.to_timestamp("D", "start") - tm.assert_index_equal(result.index, exp_index) - - def _get_with_delta(delta, freq="A-DEC"): - return date_range( - to_datetime("1/1/2001") + delta, - to_datetime("12/31/2009") + delta, - freq=freq, - ) - - delta = timedelta(hours=23) - result = df.to_timestamp("H", "end") - exp_index = _get_with_delta(delta) - exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns") - tm.assert_index_equal(result.index, exp_index) - - delta = timedelta(hours=23, minutes=59) - result = df.to_timestamp("T", "end") - exp_index = _get_with_delta(delta) - exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns") - tm.assert_index_equal(result.index, exp_index) - - result = df.to_timestamp("S", "end") - delta = timedelta(hours=23, minutes=59, seconds=59) - exp_index = _get_with_delta(delta) - exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") - tm.assert_index_equal(result.index, exp_index) - - # columns - df = df.T - - exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC") - exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") - result = df.to_timestamp("D", "end", axis=1) - tm.assert_index_equal(result.columns, exp_index) - tm.assert_numpy_array_equal(result.values, df.values) - - exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN") - result = df.to_timestamp("D", "start", axis=1) - tm.assert_index_equal(result.columns, exp_index) - - delta = timedelta(hours=23) - result = df.to_timestamp("H", "end", axis=1) - exp_index = _get_with_delta(delta) - exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns") - tm.assert_index_equal(result.columns, exp_index) - - delta = timedelta(hours=23, minutes=59) - result = df.to_timestamp("T", "end", axis=1) - exp_index = _get_with_delta(delta) - exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns") - tm.assert_index_equal(result.columns, exp_index) - - result = df.to_timestamp("S", "end", axis=1) - delta = timedelta(hours=23, minutes=59, seconds=59) - exp_index = _get_with_delta(delta) - exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") - tm.assert_index_equal(result.columns, exp_index) - - # invalid axis - with pytest.raises(ValueError, match="axis"): - df.to_timestamp(axis=2) - - result1 = df.to_timestamp("5t", axis=1) - result2 = df.to_timestamp("t", axis=1) - expected = pd.date_range("2001-01-01", "2009-01-01", freq="AS") - assert isinstance(result1.columns, DatetimeIndex) - assert isinstance(result2.columns, DatetimeIndex) - tm.assert_numpy_array_equal(result1.columns.asi8, expected.asi8) - tm.assert_numpy_array_equal(result2.columns.asi8, expected.asi8) - # PeriodIndex.to_timestamp always use 'infer' - assert result1.columns.freqstr == "AS-JAN" - assert result2.columns.freqstr == "AS-JAN" - def test_frame_index_to_string(self): index = PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M") frame = DataFrame(np.random.randn(3, 4), index=index)
cc @MomIsBestFriend this is the only test we have for DataFrame.to_timestamp, but it was apparently written before the convention of "small, focused tests" took hold. It would be helpful if this could be split/modernized.
https://api.github.com/repos/pandas-dev/pandas/pulls/32248
2020-02-25T21:33:11Z
2020-02-26T02:03:30Z
2020-02-26T02:03:30Z
2020-02-26T02:04:08Z
REF: simplify IntervalIndex/IntervalArray _shallow_copy
diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index b11736248c12a..f5167f470b056 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -725,45 +725,18 @@ def _concat_same_type(cls, to_concat): right = np.concatenate([interval.right for interval in to_concat]) return cls._simple_new(left, right, closed=closed, copy=False) - def _shallow_copy(self, left=None, right=None, closed=None): + def _shallow_copy(self, left, right): """ Return a new IntervalArray with the replacement attributes Parameters ---------- - left : array-like + left : Index Values to be used for the left-side of the intervals. - If None, the existing left and right values will be used. - - right : array-like + right : Index Values to be used for the right-side of the intervals. - If None and left is IntervalArray-like, the left and right - of the IntervalArray-like will be used. - - closed : {'left', 'right', 'both', 'neither'}, optional - Whether the intervals are closed on the left-side, right-side, both - or neither. If None, the existing closed will be used. """ - if left is None: - - # no values passed - left, right = self.left, self.right - - elif right is None: - - # only single value passed, could be an IntervalArray - # or array of Intervals - if not isinstance(left, (type(self), ABCIntervalIndex)): - left = type(self)(left) - - left, right = left.left, left.right - else: - - # both left and right are values - pass - - closed = closed or self.closed - return self._simple_new(left, right, closed=closed, verify_integrity=False) + return self._simple_new(left, right, closed=self.closed, verify_integrity=False) def copy(self): """ @@ -1035,7 +1008,9 @@ def set_closed(self, closed): msg = f"invalid option for 'closed': {closed}" raise ValueError(msg) - return self._shallow_copy(closed=closed) + return type(self)._simple_new( + left=self.left, right=self.right, closed=closed, verify_integrity=False + ) @property def length(self): diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 6ea4250e4acf4..b3923a1298859 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -333,11 +333,12 @@ def from_tuples( # -------------------------------------------------------------------- @Appender(Index._shallow_copy.__doc__) - def _shallow_copy(self, left=None, right=None, **kwargs): - result = self._data._shallow_copy(left=left, right=right) + def _shallow_copy(self, values=None, **kwargs): + if values is None: + values = self._data attributes = self._get_attributes_dict() attributes.update(kwargs) - return self._simple_new(result, **attributes) + return self._simple_new(values, **attributes) @cache_readonly def _isnan(self): @@ -407,7 +408,7 @@ def astype(self, dtype, copy=True): with rewrite_exception("IntervalArray", type(self).__name__): new_values = self.values.astype(dtype, copy=copy) if is_interval_dtype(new_values): - return self._shallow_copy(new_values.left, new_values.right) + return self._shallow_copy(new_values) return Index.astype(self, dtype, copy=copy) @property @@ -881,7 +882,8 @@ def where(self, cond, other=None): if other is None: other = self._na_value values = np.where(cond, self.values, other) - return self._shallow_copy(values) + result = IntervalArray(values) + return self._shallow_copy(result) def delete(self, loc): """ @@ -893,7 +895,8 @@ def delete(self, loc): """ new_left = self.left.delete(loc) new_right = self.right.delete(loc) - return self._shallow_copy(new_left, new_right) + result = self._data._shallow_copy(new_left, new_right) + return self._shallow_copy(result) def insert(self, loc, item): """ @@ -927,7 +930,8 @@ def insert(self, loc, item): new_left = self.left.insert(loc, left_insert) new_right = self.right.insert(loc, right_insert) - return self._shallow_copy(new_left, new_right) + result = self._data._shallow_copy(new_left, new_right) + return self._shallow_copy(result) @Appender(_index_shared_docs["take"] % _index_doc_kwargs) def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
xref #32244
https://api.github.com/repos/pandas-dev/pandas/pulls/32247
2020-02-25T19:54:31Z
2020-02-26T03:59:06Z
2020-02-26T03:59:06Z
2020-02-26T04:08:01Z
Backport PR #32241 on branch 1.0.x (CI: troubleshoot 32bit build)
diff --git a/ci/setup_env.sh b/ci/setup_env.sh index e5bee09fe2f79..ae39b0dda5d09 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -50,7 +50,7 @@ echo echo "update conda" conda config --set ssl_verify false conda config --set quiet true --set always_yes true --set changeps1 false -conda install pip # create conda to create a historical artifact for pip & setuptools +conda install pip conda # create conda to create a historical artifact for pip & setuptools conda update -n base conda echo "conda info -a"
Backport PR #32241: CI: troubleshoot 32bit build
https://api.github.com/repos/pandas-dev/pandas/pulls/32245
2020-02-25T18:48:04Z
2020-02-25T19:52:54Z
2020-02-25T19:52:54Z
2020-02-26T12:37:30Z
CLN: simplify+annotate _shallow_copy
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index aa22527d8c2d7..67f2f05c8af1e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -8,7 +8,7 @@ from pandas._libs import algos as libalgos, index as libindex, lib import pandas._libs.join as libjoin -from pandas._libs.lib import is_datetime_array +from pandas._libs.lib import is_datetime_array, no_default from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp from pandas._libs.tslibs.period import IncompatibleFrequency from pandas._libs.tslibs.timezones import tz_compare @@ -485,7 +485,7 @@ def _get_attributes_dict(self): """ return {k: getattr(self, k, None) for k in self._attributes} - def _shallow_copy(self, values=None, **kwargs): + def _shallow_copy(self, values=None, name: Label = no_default): """ Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking @@ -496,16 +496,14 @@ def _shallow_copy(self, values=None, **kwargs): Parameters ---------- values : the values to create the new Index, optional - kwargs : updates the default attributes for this Index + name : Label, defaults to self.name """ + name = self.name if name is no_default else name + if values is None: values = self.values - attributes = self._get_attributes_dict() - - attributes.update(kwargs) - - return self._simple_new(values, **attributes) + return self._simple_new(values, name=name) def _shallow_copy_with_infer(self, values, **kwargs): """ diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index caa6a9a93141f..603ec486d943e 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -7,6 +7,8 @@ from pandas._libs import index as libindex from pandas._libs.hashtable import duplicated_int64 +from pandas._libs.lib import no_default +from pandas._typing import Label from pandas.util._decorators import Appender, cache_readonly from pandas.core.dtypes.common import ( @@ -264,13 +266,14 @@ def _simple_new(cls, values, name=None, dtype=None): # -------------------------------------------------------------------- @Appender(Index._shallow_copy.__doc__) - def _shallow_copy(self, values=None, **kwargs): + def _shallow_copy(self, values=None, name: Label = no_default): + name = self.name if name is no_default else name + if values is None: values = self.values cat = Categorical(values, dtype=self.dtype) - name = kwargs.get("name", self.name) return type(self)._simple_new(cat, name=name) def _is_dtype_compat(self, other) -> bool: diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 1b3b6934aa53a..1abd58007c15f 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -8,6 +8,7 @@ from pandas._libs import NaT, iNaT, join as libjoin, lib from pandas._libs.tslibs import timezones +from pandas._typing import Label from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, cache_readonly @@ -649,7 +650,9 @@ def _set_freq(self, freq): self._data._freq = freq - def _shallow_copy(self, values=None, **kwargs): + def _shallow_copy(self, values=None, name: Label = lib.no_default): + name = self.name if name is lib.no_default else name + if values is None: values = self._data @@ -657,18 +660,16 @@ def _shallow_copy(self, values=None, **kwargs): values = values._data if isinstance(values, np.ndarray): # TODO: We would rather not get here - if kwargs.get("freq") is not None: - raise ValueError(kwargs) values = type(self._data)(values, dtype=self.dtype) attributes = self._get_attributes_dict() - if "freq" not in kwargs and self.freq is not None: + if self.freq is not None: if isinstance(values, (DatetimeArray, TimedeltaArray)): if values.freq is None: del attributes["freq"] - attributes.update(kwargs) + attributes["name"] = name return type(self)._simple_new(values, **attributes) # -------------------------------------------------------------------- @@ -738,9 +739,7 @@ def intersection(self, other, sort=False): # this point, depending on the values. result._set_freq(None) - result = self._shallow_copy( - result._data, name=result.name, dtype=result.dtype, freq=None - ) + result = self._shallow_copy(result._data, name=result.name) if result.freq is None: result._set_freq("infer") return result diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 367870f0ee467..06a26cc90555e 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -3,7 +3,7 @@ import numpy as np from pandas._libs import index as libindex, lib -from pandas._typing import Dtype +from pandas._typing import Dtype, Label from pandas.util._decorators import Appender, cache_readonly from pandas.core.dtypes.cast import astype_nansafe @@ -103,7 +103,7 @@ def _maybe_cast_slice_bound(self, label, side, kind): return self._maybe_cast_indexer(label) @Appender(Index._shallow_copy.__doc__) - def _shallow_copy(self, values=None, name=lib.no_default): + def _shallow_copy(self, values=None, name: Label = lib.no_default): name = name if name is not lib.no_default else self.name if values is not None and not self._can_hold_na and values.dtype.kind == "f": diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 0b85433b699a8..c7c11c60185b3 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -5,9 +5,11 @@ import numpy as np from pandas._libs import index as libindex +from pandas._libs.lib import no_default from pandas._libs.tslibs import frequencies as libfrequencies, resolution from pandas._libs.tslibs.parsing import parse_time_string from pandas._libs.tslibs.period import Period +from pandas._typing import Label from pandas.util._decorators import Appender, cache_readonly from pandas.core.dtypes.common import ( @@ -248,8 +250,10 @@ def _has_complex_internals(self): # used to avoid libreduction code paths, which raise or require conversion return True - def _shallow_copy(self, values=None, **kwargs): + def _shallow_copy(self, values=None, name: Label = no_default): # TODO: simplify, figure out type of values + name = name if name is not no_default else self.name + if values is None: values = self._data @@ -263,18 +267,7 @@ def _shallow_copy(self, values=None, **kwargs): # GH#30713 this should never be reached raise TypeError(type(values), getattr(values, "dtype", None)) - # We don't allow changing `freq` in _shallow_copy. - validate_dtype_freq(self.dtype, kwargs.get("freq")) - attributes = self._get_attributes_dict() - - attributes.update(kwargs) - if not len(values) and "dtype" not in kwargs: - attributes["dtype"] = self.dtype - return self._simple_new(values, **attributes) - - def _shallow_copy_with_infer(self, values=None, **kwargs): - """ we always want to return a PeriodIndex """ - return self._shallow_copy(values=values, **kwargs) + return self._simple_new(values, name=name) def _maybe_convert_timedelta(self, other): """ diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index d6752da6bc58f..fa8551bc646a6 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -7,6 +7,8 @@ import numpy as np from pandas._libs import index as libindex +from pandas._libs.lib import no_default +from pandas._typing import Label import pandas.compat as compat from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, cache_readonly @@ -385,13 +387,13 @@ def tolist(self): return list(self._range) @Appender(Int64Index._shallow_copy.__doc__) - def _shallow_copy(self, values=None, **kwargs): + def _shallow_copy(self, values=None, name: Label = no_default): + name = self.name if name is no_default else name + if values is None: - name = kwargs.get("name", self.name) return self._simple_new(self._range, name=name) else: - kwargs.setdefault("name", self.name) - return self._int64index._shallow_copy(values, **kwargs) + return Int64Index._simple_new(values, name=name) @Appender(Int64Index.copy.__doc__) def copy(self, name=None, deep=False, dtype=None, **kwargs): diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 6479b14e9521e..40c7ffba46450 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -128,15 +128,9 @@ def test_shallow_copy_empty(self): def test_shallow_copy_i8(self): # GH-24391 pi = period_range("2018-01-01", periods=3, freq="2D") - result = pi._shallow_copy(pi.asi8, freq=pi.freq) + result = pi._shallow_copy(pi.asi8) tm.assert_index_equal(result, pi) - def test_shallow_copy_changing_freq_raises(self): - pi = period_range("2018-01-01", periods=3, freq="2D") - msg = "specified freq and dtype are different" - with pytest.raises(IncompatibleFrequency, match=msg): - pi._shallow_copy(pi, freq="H") - def test_view_asi8(self): idx = PeriodIndex([], freq="M")
IntervalIndex is still an outlier, will be handled separately
https://api.github.com/repos/pandas-dev/pandas/pulls/32244
2020-02-25T18:31:38Z
2020-02-26T02:10:40Z
2020-02-26T02:10:40Z
2020-02-26T02:17:31Z
BUG: Fixed bug, where pandas._libs.lib.maybe_convert_objects function improperly handled arrays with bools and NaNs
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst index 808e6ae709ce9..eec471f989037 100644 --- a/doc/source/whatsnew/v1.0.2.rst +++ b/doc/source/whatsnew/v1.0.2.rst @@ -24,6 +24,7 @@ Fixed regressions - Fixed regression in :class:`DataFrame` arithmetic operations with mis-matched columns (:issue:`31623`) - Fixed regression in :meth:`GroupBy.agg` calling a user-provided function an extra time on an empty input (:issue:`31760`) - Joining on :class:`DatetimeIndex` or :class:`TimedeltaIndex` will preserve ``freq`` in simple cases (:issue:`32166`) +- Fixed bug in the repr of an object-dtype ``Index`` with bools and missing values (:issue:`32146`) - .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 61d6a660a0357..d10e49b0bf2f1 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -2298,7 +2298,7 @@ def maybe_convert_objects(ndarray[object] objects, bint try_float=0, return uints else: return ints - elif seen.is_bool: + elif seen.is_bool and not seen.nan_: return bools.view(np.bool_) return objects diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 48ae1f67297af..abc6513d02e0a 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -568,6 +568,13 @@ def test_maybe_convert_objects_nullable_integer(self, exp): tm.assert_extension_array_equal(result, exp) + def test_maybe_convert_objects_bool_nan(self): + # GH32146 + ind = pd.Index([True, False, np.nan], dtype=object) + exp = np.array([True, False, np.nan], dtype=object) + out = lib.maybe_convert_objects(ind.values, safe=1) + tm.assert_numpy_array_equal(out, exp) + def test_mixed_dtypes_remain_object_array(self): # GH14956 array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 0c4a790646a81..ee9cc44870c22 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2458,6 +2458,17 @@ def test_intersect_str_dates(self): expected = Index([], dtype=object) tm.assert_index_equal(result, expected) + def test_index_repr_bool_nan(self): + # GH32146 + arr = Index([True, False, np.nan], dtype=object) + exp1 = arr.format() + out1 = ["True", "False", "NaN"] + assert out1 == exp1 + + exp2 = repr(arr) + out2 = "Index([True, False, nan], dtype='object')" + assert out2 == exp2 + class TestIndexUtils: @pytest.mark.parametrize( diff --git a/pandas/tests/series/methods/test_value_counts.py b/pandas/tests/series/methods/test_value_counts.py index fdb35befeb0c2..f97362ce9c2a9 100644 --- a/pandas/tests/series/methods/test_value_counts.py +++ b/pandas/tests/series/methods/test_value_counts.py @@ -1,4 +1,5 @@ import numpy as np +import pytest import pandas as pd from pandas import Categorical, CategoricalIndex, Series @@ -177,3 +178,28 @@ def test_value_counts_categorical_with_nan(self): exp = Series([2, 1, 3], index=CategoricalIndex(["a", "b", np.nan])) res = ser.value_counts(dropna=False, sort=False) tm.assert_series_equal(res, exp) + + @pytest.mark.parametrize( + "ser, dropna, exp", + [ + ( + pd.Series([False, True, True, pd.NA]), + False, + pd.Series([2, 1, 1], index=[True, False, pd.NA]), + ), + ( + pd.Series([False, True, True, pd.NA]), + True, + pd.Series([2, 1], index=[True, False]), + ), + ( + pd.Series(range(3), index=[True, False, np.nan]).index, + False, + pd.Series([1, 1, 1], index=[True, False, pd.NA]), + ), + ], + ) + def test_value_counts_bool_with_nan(self, ser, dropna, exp): + # GH32146 + out = ser.value_counts(dropna=dropna) + tm.assert_series_equal(out, exp)
- [x] closes #32146 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32242
2020-02-25T17:42:41Z
2020-03-09T15:42:17Z
2020-03-09T15:42:16Z
2020-03-09T19:17:42Z
CI: troubleshoot 32bit build
diff --git a/ci/setup_env.sh b/ci/setup_env.sh index e5bee09fe2f79..ae39b0dda5d09 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -50,7 +50,7 @@ echo echo "update conda" conda config --set ssl_verify false conda config --set quiet true --set always_yes true --set changeps1 false -conda install pip # create conda to create a historical artifact for pip & setuptools +conda install pip conda # create conda to create a historical artifact for pip & setuptools conda update -n base conda echo "conda info -a"
closes #32229
https://api.github.com/repos/pandas-dev/pandas/pulls/32241
2020-02-25T16:57:42Z
2020-02-25T18:47:35Z
2020-02-25T18:47:35Z
2021-11-20T23:21:51Z
travis: enable bionic & multi-cpu testing
diff --git a/.travis.yml b/.travis.yml index 2c8533d02ddc1..02db18c52904c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,6 @@ language: python python: 3.7 +dist: bionic # To turn off cached cython files and compiler cache # set NOCACHE-true @@ -23,36 +24,28 @@ git: # for cloning depth: false -matrix: - fast_finish: true - - include: - - env: - - JOB="3.8" ENV_FILE="ci/deps/travis-38.yaml" PATTERN="(not slow and not network and not clipboard)" - - - env: - - JOB="3.7" ENV_FILE="ci/deps/travis-37.yaml" PATTERN="(not slow and not network and not clipboard)" +arch: + - amd64 + - ppc64le + - s390x + - arm64 - - env: - - JOB="3.6, locale" ENV_FILE="ci/deps/travis-36-locale.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" LOCALE_OVERRIDE="zh_CN.UTF-8" SQL="1" - services: - - mysql - - postgresql +services: + - mysql + - postgresql - - env: - # Enabling Deprecations when running tests - # PANDAS_TESTING_MODE="deprecate" causes DeprecationWarning messages to be displayed in the logs - # See pandas/_testing.py for more details. - - JOB="3.6, coverage" ENV_FILE="ci/deps/travis-36-cov.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" PANDAS_TESTING_MODE="deprecate" COVERAGE=true SQL="1" - services: - - mysql - - postgresql +env: + - JOB="3.8" ENV_FILE="ci/deps/travis-38.yaml" PATTERN="(not slow and not network and not clipboard)" + - JOB="3.7" ENV_FILE="ci/deps/travis-37.yaml" PATTERN="(not slow and not network and not clipboard)" + - JOB="3.6, locale" ENV_FILE="ci/deps/travis-36-locale.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" LOCALE_OVERRIDE="zh_CN.UTF-8" SQL="1" + # Enabling Deprecations when running tests + # PANDAS_TESTING_MODE="deprecate" causes DeprecationWarning messages to be displayed in the logs + # See pandas/_testing.py for more details. + - JOB="3.6, coverage" ENV_FILE="ci/deps/travis-36-cov.yaml" PATTERN="((not slow and not network and not clipboard) or (single and db))" PANDAS_TESTING_MODE="deprecate" COVERAGE=true SQL="1" + - JOB="3.6, slow" ENV_FILE="ci/deps/travis-36-slow.yaml" PATTERN="slow" SQL="1" - - env: - - JOB="3.6, slow" ENV_FILE="ci/deps/travis-36-slow.yaml" PATTERN="slow" SQL="1" - services: - - mysql - - postgresql +matrix: + fast_finish: true before_install: - echo "before_install"
- [x] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32237
2020-02-25T11:46:40Z
2020-03-15T00:56:51Z
null
2020-03-15T00:56:51Z
Added message to pytest raises for test_constructor_dict
diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index 8c9b7cd060059..14162bc433317 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -316,7 +316,8 @@ def test_constructor_dict(self): # mix dict and array, wrong size - no spec for which error should raise # first - with pytest.raises(ValueError): + msg = "Mixing dicts with non-Series may lead to ambiguous ordering." + with pytest.raises(ValueError, match=msg): DataFrame({"A": {"a": "a", "b": "b"}, "B": ["a", "b", "c"]}) # Length-one dict micro-optimization
- [x] ref #30999 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] added message to pytest.raises for test_constructor_dict
https://api.github.com/repos/pandas-dev/pandas/pulls/32236
2020-02-25T11:26:39Z
2020-02-26T12:43:54Z
2020-02-26T12:43:54Z
2020-02-26T12:43:58Z
Fix exception causes in 14 modules
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index e97872d880dee..9a53e7cd241e1 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -238,8 +238,8 @@ def read_sql_table( meta = MetaData(con, schema=schema) try: meta.reflect(only=[table_name], views=True) - except sqlalchemy.exc.InvalidRequestError: - raise ValueError(f"Table {table_name} not found") + except sqlalchemy.exc.InvalidRequestError as err: + raise ValueError(f"Table {table_name} not found") from err pandas_sql = SQLDatabase(con, meta=meta) table = pandas_sql.read_table( @@ -685,7 +685,7 @@ def insert_data(self): try: temp.reset_index(inplace=True) except ValueError as err: - raise ValueError(f"duplicate name in index/columns: {err}") + raise ValueError(f"duplicate name in index/columns: {err}") from err else: temp = self.frame @@ -1387,8 +1387,8 @@ def _create_sql_schema(self, frame, table_name, keys=None, dtype=None): def _get_unicode_name(name): try: uname = str(name).encode("utf-8", "strict").decode("utf-8") - except UnicodeError: - raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") + except UnicodeError as err: + raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err return uname diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 593228e99477b..0397dfa923afb 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1161,8 +1161,8 @@ def f(typ: int) -> Union[int, str]: return typ try: return self.TYPE_MAP_XML[typ] - except KeyError: - raise ValueError(f"cannot convert stata types [{typ}]") + except KeyError as err: + raise ValueError(f"cannot convert stata types [{typ}]") from err typlist = [f(x) for x in raw_typlist] @@ -1171,8 +1171,8 @@ def g(typ: int) -> Union[str, np.dtype]: return str(typ) try: return self.DTYPE_MAP_XML[typ] - except KeyError: - raise ValueError(f"cannot convert stata dtype [{typ}]") + except KeyError as err: + raise ValueError(f"cannot convert stata dtype [{typ}]") from err dtyplist = [g(x) for x in raw_typlist] @@ -1296,14 +1296,14 @@ def _read_old_header(self, first_char: bytes) -> None: try: self.typlist = [self.TYPE_MAP[typ] for typ in typlist] - except ValueError: + except ValueError as err: invalid_types = ",".join(str(x) for x in typlist) - raise ValueError(f"cannot convert stata types [{invalid_types}]") + raise ValueError(f"cannot convert stata types [{invalid_types}]") from err try: self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist] - except ValueError: + except ValueError as err: invalid_dtypes = ",".join(str(x) for x in typlist) - raise ValueError(f"cannot convert stata dtypes [{invalid_dtypes}]") + raise ValueError(f"cannot convert stata dtypes [{invalid_dtypes}]") from err if self.format_version > 108: self.varlist = [ @@ -1761,7 +1761,7 @@ def _do_convert_categoricals( categories.append(category) # Partially labeled try: cat_data.categories = categories - except ValueError: + except ValueError as err: vc = Series(categories).value_counts() repeated_cats = list(vc.index[vc > 1]) repeats = "-" * 80 + "\n" + "\n".join(repeated_cats) @@ -1777,7 +1777,7 @@ def _do_convert_categoricals( The repeated labels are: {repeats} """ - raise ValueError(msg) + raise ValueError(msg) from err # TODO: is the next line needed above in the data(...) method? cat_series = Series(cat_data, index=data.index) cat_converted_data.append((col, cat_series)) @@ -3143,11 +3143,11 @@ def _write_variable_labels(self) -> None: raise ValueError("Variable labels must be 80 characters or fewer") try: encoded = label.encode(self._encoding) - except UnicodeEncodeError: + except UnicodeEncodeError as err: raise ValueError( "Variable labels must contain only characters that " f"can be encoded in {self._encoding}" - ) + ) from err bio.write(_pad_bytes_new(encoded, vl_len + 1)) else: diff --git a/pandas/tests/extension/arrow/arrays.py b/pandas/tests/extension/arrow/arrays.py index cd4b43c83340f..ffebc9f8b3359 100644 --- a/pandas/tests/extension/arrow/arrays.py +++ b/pandas/tests/extension/arrow/arrays.py @@ -148,8 +148,8 @@ def _reduce(self, method, skipna=True, **kwargs): try: op = getattr(arr, method) - except AttributeError: - raise TypeError + except AttributeError as err: + raise TypeError from err return op(**kwargs) def any(self, axis=0, out=None): diff --git a/pandas/tests/extension/decimal/array.py b/pandas/tests/extension/decimal/array.py index 2614d8c72c342..9384ed5199c1f 100644 --- a/pandas/tests/extension/decimal/array.py +++ b/pandas/tests/extension/decimal/array.py @@ -183,8 +183,10 @@ def _reduce(self, name, skipna=True, **kwargs): try: op = getattr(self.data, name) - except AttributeError: - raise NotImplementedError(f"decimal does not support the {name} operation") + except AttributeError as err: + raise NotImplementedError( + f"decimal does not support the {name} operation" + ) from err return op(axis=0) diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index a229a824d0f9b..1f026e405dc17 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -137,13 +137,13 @@ def take(self, indexer, allow_fill=False, fill_value=None): output = [ self.data[loc] if loc != -1 else fill_value for loc in indexer ] - except IndexError: - raise IndexError(msg) + except IndexError as err: + raise IndexError(msg) from err else: try: output = [self.data[loc] for loc in indexer] - except IndexError: - raise IndexError(msg) + except IndexError as err: + raise IndexError(msg) from err return self._from_sequence(output) diff --git a/pandas/tests/extension/list/array.py b/pandas/tests/extension/list/array.py index 7c1da5e8102e2..d86f90e58d897 100644 --- a/pandas/tests/extension/list/array.py +++ b/pandas/tests/extension/list/array.py @@ -86,13 +86,13 @@ def take(self, indexer, allow_fill=False, fill_value=None): output = [ self.data[loc] if loc != -1 else fill_value for loc in indexer ] - except IndexError: - raise IndexError(msg) + except IndexError as err: + raise IndexError(msg) from err else: try: output = [self.data[loc] for loc in indexer] - except IndexError: - raise IndexError(msg) + except IndexError as err: + raise IndexError(msg) from err return self._from_sequence(output) diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 13723f6455bff..19c7454f15bed 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -1110,8 +1110,8 @@ def test_unit(self, cache): for val in ["foo", Timestamp("20130101")]: try: to_datetime(val, errors="raise", unit="s", cache=cache) - except tslib.OutOfBoundsDatetime: - raise AssertionError("incorrect exception raised") + except tslib.OutOfBoundsDatetime as err: + raise AssertionError("incorrect exception raised") from err except ValueError: pass diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 0ad9f2c1e941f..fc3876eee9d66 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -2575,19 +2575,19 @@ def setup_class(cls): pymysql.connect(host="localhost", user="root", passwd="", db="pandas_nosetest") try: pymysql.connect(read_default_group="pandas") - except pymysql.ProgrammingError: + except pymysql.ProgrammingError as err: raise RuntimeError( "Create a group of connection parameters under the heading " "[pandas] in your system's mysql default file, " "typically located at ~/.my.cnf or /etc/.my.cnf." - ) - except pymysql.Error: + ) from err + except pymysql.Error as err: raise RuntimeError( "Cannot connect to database. " "Create a group of connection parameters under the heading " "[pandas] in your system's mysql default file, " "typically located at ~/.my.cnf or /etc/.my.cnf." - ) + ) from err @pytest.fixture(autouse=True) def setup_method(self, request, datapath): @@ -2595,19 +2595,19 @@ def setup_method(self, request, datapath): pymysql.connect(host="localhost", user="root", passwd="", db="pandas_nosetest") try: pymysql.connect(read_default_group="pandas") - except pymysql.ProgrammingError: + except pymysql.ProgrammingError as err: raise RuntimeError( "Create a group of connection parameters under the heading " "[pandas] in your system's mysql default file, " "typically located at ~/.my.cnf or /etc/.my.cnf." - ) - except pymysql.Error: + ) from err + except pymysql.Error as err: raise RuntimeError( "Cannot connect to database. " "Create a group of connection parameters under the heading " "[pandas] in your system's mysql default file, " "typically located at ~/.my.cnf or /etc/.my.cnf." - ) + ) from err self.method = request.function diff --git a/pandas/tests/reshape/merge/test_join.py b/pandas/tests/reshape/merge/test_join.py index 685995ee201f8..725157b7c8523 100644 --- a/pandas/tests/reshape/merge/test_join.py +++ b/pandas/tests/reshape/merge/test_join.py @@ -810,11 +810,11 @@ def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix try: lgroup = left_grouped.get_group(group_key) - except KeyError: + except KeyError as err: if how in ("left", "inner"): raise AssertionError( f"key {group_key} should not have been in the join" - ) + ) from err _assert_all_na(l_joined, left.columns, join_col) else: @@ -822,11 +822,11 @@ def _check_join(left, right, result, join_col, how="left", lsuffix="_x", rsuffix try: rgroup = right_grouped.get_group(group_key) - except KeyError: + except KeyError as err: if how in ("right", "inner"): raise AssertionError( f"key {group_key} should not have been in the join" - ) + ) from err _assert_all_na(r_joined, right.columns, join_col) else: diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 5811f3bc196a1..afd8f4178f741 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1849,8 +1849,8 @@ def __len__(self) -> int: def __getitem__(self, index): try: return {0: df1, 1: df2}[index] - except KeyError: - raise IndexError + except KeyError as err: + raise IndexError from err tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected) diff --git a/pandas/tests/tseries/offsets/common.py b/pandas/tests/tseries/offsets/common.py index 71953fd095882..25837c0b6aee2 100644 --- a/pandas/tests/tseries/offsets/common.py +++ b/pandas/tests/tseries/offsets/common.py @@ -11,11 +11,11 @@ def assert_offset_equal(offset, base, expected): assert actual == expected assert actual_swapped == expected assert actual_apply == expected - except AssertionError: + except AssertionError as err: raise AssertionError( f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})" f"\nAt Date: {base}" - ) + ) from err def assert_is_on_offset(offset, date, expected): diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py index af34180fb3170..1a1b7e8e1bd08 100644 --- a/pandas/tseries/frequencies.py +++ b/pandas/tseries/frequencies.py @@ -141,8 +141,8 @@ def to_offset(freq) -> Optional[DateOffset]: delta = offset else: delta = delta + offset - except ValueError: - raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) + except ValueError as err: + raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) from err else: delta = None @@ -173,8 +173,8 @@ def to_offset(freq) -> Optional[DateOffset]: delta = offset else: delta = delta + offset - except (ValueError, TypeError): - raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) + except (ValueError, TypeError) as err: + raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) from err if delta is None: raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(freq)) @@ -223,9 +223,9 @@ def _get_offset(name: str) -> DateOffset: # handles case where there's no suffix (and will TypeError if too # many '-') offset = klass._from_name(*split[1:]) - except (ValueError, TypeError, KeyError): + except (ValueError, TypeError, KeyError) as err: # bad prefix or suffix - raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(name)) + raise ValueError(libfreqs.INVALID_FREQ_ERR_MSG.format(name)) from err # cache _offset_map[name] = offset diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 959dd19a50d90..b6bbe008812cb 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -2530,12 +2530,12 @@ def _tick_comp(op): def f(self, other): try: return op(self.delta, other.delta) - except AttributeError: + except AttributeError as err: # comparing with a non-Tick object raise TypeError( f"Invalid comparison between {type(self).__name__} " f"and {type(other).__name__}" - ) + ) from err f.__name__ = f"__{op.__name__}__" return f @@ -2570,10 +2570,10 @@ def __add__(self, other): return self.apply(other) except ApplyTypeError: return NotImplemented - except OverflowError: + except OverflowError as err: raise OverflowError( f"the add operation between {self} and {other} will overflow" - ) + ) from err def __eq__(self, other: Any) -> bool: if isinstance(other, str): diff --git a/pandas/util/_tester.py b/pandas/util/_tester.py index b299f3790ab22..1bdf0d8483c76 100644 --- a/pandas/util/_tester.py +++ b/pandas/util/_tester.py @@ -10,12 +10,12 @@ def test(extra_args=None): try: import pytest - except ImportError: - raise ImportError("Need pytest>=5.0.1 to run tests") + except ImportError as err: + raise ImportError("Need pytest>=5.0.1 to run tests") from err try: import hypothesis # noqa - except ImportError: - raise ImportError("Need hypothesis>=3.58 to run tests") + except ImportError as err: + raise ImportError("Need hypothesis>=3.58 to run tests") from err cmd = ["--skip-slow", "--skip-network", "--skip-db"] if extra_args: if not isinstance(extra_args, list):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32235
2020-02-25T10:21:12Z
2020-02-26T12:44:51Z
2020-02-26T12:44:51Z
2020-02-26T12:45:32Z
use ExtensionIndex._concat_same_dtype
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 1b3b6934aa53a..22836f4c8f08c 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -551,14 +551,6 @@ def _summary(self, name=None) -> str: result = result.replace("'", "") return result - def _concat_same_dtype(self, to_concat, name): - """ - Concatenate to_concat which has the same class. - """ - new_data = type(self._data)._concat_same_type(to_concat) - - return self._simple_new(new_data, name=name) - def shift(self, periods=1, freq=None): """ Shift index by desired number of time frequency increments.
DatetimelikeIndex._concat_same_dtpye is identical to ExtensionIndex version, so re-use that.
https://api.github.com/repos/pandas-dev/pandas/pulls/32232
2020-02-25T03:49:29Z
2020-02-26T02:31:37Z
2020-02-26T02:31:37Z
2020-02-26T02:35:19Z
DOC: Clean up of DataFrame.ewm
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index e045d1c2211d7..8231521d1414a 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -35,13 +35,13 @@ class EWM(_Rolling): ---------- com : float, optional Specify decay in terms of center of mass, - :math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`. + :math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`. span : float, optional Specify decay in terms of span, - :math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`. + :math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`. halflife : float, optional Specify decay in terms of half-life, - :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{for} halflife > 0`. + :math:`\alpha = 1 - exp(-ln(2) / halflife)`, for :math:`halflife > 0`. alpha : float, optional Specify smoothing factor :math:`\alpha` directly, :math:`0 < \alpha \leq 1`. @@ -71,30 +71,48 @@ class EWM(_Rolling): Notes ----- - Exactly one of center of mass, span, half-life, and alpha must be provided. - Allowed values and relationship between the parameters are specified in the - parameter descriptions above; see the link at the end of this section for - a detailed explanation. - - When adjust is True (default), weighted averages are calculated using - weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1. - - When adjust is False, weighted averages are calculated recursively as: - weighted_average[0] = arg[0]; - weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i]. - - When ignore_na is False (default), weights are based on absolute positions. - For example, the weights of x and y used in calculating the final weighted - average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and - (1-alpha)**2 and alpha (if adjust is False). - - When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based - on relative positions. For example, the weights of x and y used in - calculating the final weighted average of [x, None, y] are 1-alpha and 1 - (if adjust is True), and 1-alpha and alpha (if adjust is False). - - More details can be found at - https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows + Exactly one center of mass paramter: ``com``, ``span``, ``halflife``, or ``alpha`` + must be provided. + Allowed values and the relation between the parameters are specified in the + parameter descriptions above (see the link at the end of this section for + a detailed explanation). + + Available EW (exponentially weighted) methods: ``mean()``, ``var()``, ``std()``, + ``corr()``, ``cov()``. + + When ``adjust=True`` (default), the EW function is calculated using + weights :math:`w_i = (1 - \alpha)^i`. + For example, the EW moving average of the series [:math:`x_0, x_1, ..., x_t`] would + be: + + .. math:: + y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ... + + (1 - \alpha)^t x_0}{1 + (1 - \alpha) + (1 - \alpha)^2 + ... + (1 - \alpha)^t} + + When ``adjust=False``, the exponentially weighted function is calculated + recursively: + + .. math:: + \begin{split} + y_0 &= x_0 \\ + y_t &= (1 - \alpha) y_{t-1} + \alpha x_t, + \end{split} + + When ``ignore_na=False`` (default), weights are based on absolute positions. + For example, the weights of :math:`x_0` and :math:`x_2` used in calculating the + final weighted average of [:math:`x_0`, None, :math:`x_2`] are :math:`(1-\alpha)^2` + and :math:`1` if ``adjust=True``, and :math:`(1-\alpha)^2` and + :math:`\alpha` if ``adjust=False``. + + When ``ignore_na=True`` (reproducing pre-0.15.0 behavior), weights are based on + relative positions. For example, the weights of :math:`x_0` and :math:`x_2` used in + calculating the final weighted average of [:math:`x_0`, None, :math:`x_2`] are + :math:`1-\alpha` and :math:`1` if ``adjust=True``, and :math:`1-\alpha` and + :math:`\alpha` if ``adjust=False``. + + More details can be found at: + `Exponentially weighted windows + <https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows>`_. Examples --------
- [ ] closes #31647 Though, I don't know what @MarcoGorelli meant in the 5'th issue. Didn't know how to expand my previous Pull Request: #32212, as I have already deleted the files on my machine. Here are the screenshots, @WillAyd: ![image](https://user-images.githubusercontent.com/12001304/75212620-caa16e00-5787-11ea-9bd9-532f557d3e33.png) ![image](https://user-images.githubusercontent.com/12001304/75213997-50271d00-578c-11ea-82e0-5f5b986ede6d.png) In the first image a controversial change might be: log(0.5) -> -ln(2). The Notes have been expanded with a simple example from the provided link.
https://api.github.com/repos/pandas-dev/pandas/pulls/32231
2020-02-25T03:30:52Z
2020-02-25T04:24:04Z
null
2020-02-25T04:24:04Z
REF/TST: method-specific files for DataFrame timeseries methods
diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py index 774eb443c45fe..03598b6bb5eca 100644 --- a/pandas/tests/frame/conftest.py +++ b/pandas/tests/frame/conftest.py @@ -1,3 +1,5 @@ +from itertools import product + import numpy as np import pytest @@ -5,6 +7,11 @@ import pandas._testing as tm +@pytest.fixture(params=product([True, False], [True, False])) +def close_open_fixture(request): + return request.param + + @pytest.fixture def float_frame_with_na(): """ diff --git a/pandas/tests/frame/methods/test_asfreq.py b/pandas/tests/frame/methods/test_asfreq.py new file mode 100644 index 0000000000000..40b0ec0c0d811 --- /dev/null +++ b/pandas/tests/frame/methods/test_asfreq.py @@ -0,0 +1,58 @@ +from datetime import datetime + +import numpy as np + +from pandas import DataFrame, DatetimeIndex, Series, date_range +import pandas._testing as tm + +from pandas.tseries import offsets + + +class TestAsFreq: + def test_asfreq(self, datetime_frame): + offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd()) + rule_monthly = datetime_frame.asfreq("BM") + + tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"]) + + filled = rule_monthly.asfreq("B", method="pad") # noqa + # TODO: actually check that this worked. + + # don't forget! + filled_dep = rule_monthly.asfreq("B", method="pad") # noqa + + # test does not blow up on length-0 DataFrame + zero_length = datetime_frame.reindex([]) + result = zero_length.asfreq("BM") + assert result is not zero_length + + def test_asfreq_datetimeindex(self): + df = DataFrame( + {"A": [1, 2, 3]}, + index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)], + ) + df = df.asfreq("B") + assert isinstance(df.index, DatetimeIndex) + + ts = df["A"].asfreq("B") + assert isinstance(ts.index, DatetimeIndex) + + def test_asfreq_fillvalue(self): + # test for fill value during upsampling, related to issue 3715 + + # setup + rng = date_range("1/1/2016", periods=10, freq="2S") + ts = Series(np.arange(len(rng)), index=rng) + df = DataFrame({"one": ts}) + + # insert pre-existing missing value + df.loc["2016-01-01 00:00:08", "one"] = None + + actual_df = df.asfreq(freq="1S", fill_value=9.0) + expected_df = df.asfreq(freq="1S").fillna(9.0) + expected_df.loc["2016-01-01 00:00:08", "one"] = None + tm.assert_frame_equal(expected_df, actual_df) + + expected_series = ts.asfreq(freq="1S").fillna(9.0) + actual_series = ts.asfreq(freq="1S", fill_value=9.0) + tm.assert_series_equal(expected_series, actual_series) diff --git a/pandas/tests/frame/methods/test_at_time.py b/pandas/tests/frame/methods/test_at_time.py new file mode 100644 index 0000000000000..108bbbfa183c4 --- /dev/null +++ b/pandas/tests/frame/methods/test_at_time.py @@ -0,0 +1,86 @@ +from datetime import time + +import numpy as np +import pytest +import pytz + +from pandas import DataFrame, date_range +import pandas._testing as tm + + +class TestAtTime: + def test_at_time(self): + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame(np.random.randn(len(rng), 2), index=rng) + rs = ts.at_time(rng[1]) + assert (rs.index.hour == rng[1].hour).all() + assert (rs.index.minute == rng[1].minute).all() + assert (rs.index.second == rng[1].second).all() + + result = ts.at_time("9:30") + expected = ts.at_time(time(9, 30)) + tm.assert_frame_equal(result, expected) + + result = ts.loc[time(9, 30)] + expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)] + + tm.assert_frame_equal(result, expected) + + # midnight, everything + rng = date_range("1/1/2000", "1/31/2000") + ts = DataFrame(np.random.randn(len(rng), 3), index=rng) + + result = ts.at_time(time(0, 0)) + tm.assert_frame_equal(result, ts) + + # time doesn't exist + rng = date_range("1/1/2012", freq="23Min", periods=384) + ts = DataFrame(np.random.randn(len(rng), 2), rng) + rs = ts.at_time("16:00") + assert len(rs) == 0 + + @pytest.mark.parametrize( + "hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)] + ) + def test_at_time_errors(self, hour): + # GH#24043 + dti = date_range("2018", periods=3, freq="H") + df = DataFrame(list(range(len(dti))), index=dti) + if getattr(hour, "tzinfo", None) is None: + result = df.at_time(hour) + expected = df.iloc[1:2] + tm.assert_frame_equal(result, expected) + else: + with pytest.raises(ValueError, match="Index must be timezone"): + df.at_time(hour) + + def test_at_time_tz(self): + # GH#24043 + dti = date_range("2018", periods=3, freq="H", tz="US/Pacific") + df = DataFrame(list(range(len(dti))), index=dti) + result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern"))) + expected = df.iloc[1:2] + tm.assert_frame_equal(result, expected) + + def test_at_time_raises(self): + # GH#20725 + df = DataFrame([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(TypeError): # index is not a DatetimeIndex + df.at_time("00:00") + + @pytest.mark.parametrize("axis", ["index", "columns", 0, 1]) + def test_at_time_axis(self, axis): + # issue 8839 + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame(np.random.randn(len(rng), len(rng))) + ts.index, ts.columns = rng, rng + + indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)] + + if axis in ["index", 0]: + expected = ts.loc[indices, :] + elif axis in ["columns", 1]: + expected = ts.loc[:, indices] + + result = ts.at_time("9:30", axis=axis) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_between_time.py b/pandas/tests/frame/methods/test_between_time.py new file mode 100644 index 0000000000000..b40604b4f4a16 --- /dev/null +++ b/pandas/tests/frame/methods/test_between_time.py @@ -0,0 +1,110 @@ +from datetime import time + +import numpy as np +import pytest + +from pandas import DataFrame, date_range +import pandas._testing as tm + + +class TestBetweenTime: + def test_between_time(self, close_open_fixture): + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame(np.random.randn(len(rng), 2), index=rng) + stime = time(0, 0) + etime = time(1, 0) + inc_start, inc_end = close_open_fixture + + filtered = ts.between_time(stime, etime, inc_start, inc_end) + exp_len = 13 * 4 + 1 + if not inc_start: + exp_len -= 5 + if not inc_end: + exp_len -= 4 + + assert len(filtered) == exp_len + for rs in filtered.index: + t = rs.time() + if inc_start: + assert t >= stime + else: + assert t > stime + + if inc_end: + assert t <= etime + else: + assert t < etime + + result = ts.between_time("00:00", "01:00") + expected = ts.between_time(stime, etime) + tm.assert_frame_equal(result, expected) + + # across midnight + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame(np.random.randn(len(rng), 2), index=rng) + stime = time(22, 0) + etime = time(9, 0) + + filtered = ts.between_time(stime, etime, inc_start, inc_end) + exp_len = (12 * 11 + 1) * 4 + 1 + if not inc_start: + exp_len -= 4 + if not inc_end: + exp_len -= 4 + + assert len(filtered) == exp_len + for rs in filtered.index: + t = rs.time() + if inc_start: + assert (t >= stime) or (t <= etime) + else: + assert (t > stime) or (t <= etime) + + if inc_end: + assert (t <= etime) or (t >= stime) + else: + assert (t < etime) or (t >= stime) + + def test_between_time_raises(self): + # GH#20725 + df = DataFrame([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(TypeError): # index is not a DatetimeIndex + df.between_time(start_time="00:00", end_time="12:00") + + def test_between_time_axis(self, axis): + # GH#8839 + rng = date_range("1/1/2000", periods=100, freq="10min") + ts = DataFrame(np.random.randn(len(rng), len(rng))) + stime, etime = ("08:00:00", "09:00:00") + exp_len = 7 + + if axis in ["index", 0]: + ts.index = rng + assert len(ts.between_time(stime, etime)) == exp_len + assert len(ts.between_time(stime, etime, axis=0)) == exp_len + + if axis in ["columns", 1]: + ts.columns = rng + selected = ts.between_time(stime, etime, axis=1).columns + assert len(selected) == exp_len + + def test_between_time_axis_raises(self, axis): + # issue 8839 + rng = date_range("1/1/2000", periods=100, freq="10min") + mask = np.arange(0, len(rng)) + rand_data = np.random.randn(len(rng), len(rng)) + ts = DataFrame(rand_data, index=rng, columns=rng) + stime, etime = ("08:00:00", "09:00:00") + + msg = "Index must be DatetimeIndex" + if axis in ["columns", 1]: + ts.index = mask + with pytest.raises(TypeError, match=msg): + ts.between_time(stime, etime) + with pytest.raises(TypeError, match=msg): + ts.between_time(stime, etime, axis=0) + + if axis in ["index", 0]: + ts.columns = mask + with pytest.raises(TypeError, match=msg): + ts.between_time(stime, etime, axis=1) diff --git a/pandas/tests/frame/methods/test_to_period.py b/pandas/tests/frame/methods/test_to_period.py new file mode 100644 index 0000000000000..eac78e611b008 --- /dev/null +++ b/pandas/tests/frame/methods/test_to_period.py @@ -0,0 +1,36 @@ +import numpy as np +import pytest + +from pandas import DataFrame, date_range, period_range +import pandas._testing as tm + + +class TestToPeriod: + def test_frame_to_period(self): + K = 5 + + dr = date_range("1/1/2000", "1/1/2001") + pr = period_range("1/1/2000", "1/1/2001") + df = DataFrame(np.random.randn(len(dr), K), index=dr) + df["mix"] = "a" + + pts = df.to_period() + exp = df.copy() + exp.index = pr + tm.assert_frame_equal(pts, exp) + + pts = df.to_period("M") + tm.assert_index_equal(pts.index, exp.index.asfreq("M")) + + df = df.T + pts = df.to_period(axis=1) + exp = df.copy() + exp.columns = pr + tm.assert_frame_equal(pts, exp) + + pts = df.to_period("M", axis=1) + tm.assert_index_equal(pts.columns, exp.columns.asfreq("M")) + + msg = "No axis named 2 for object type <class 'pandas.core.frame.DataFrame'>" + with pytest.raises(ValueError, match=msg): + df.to_period(axis=2) diff --git a/pandas/tests/frame/methods/test_tz_convert.py b/pandas/tests/frame/methods/test_tz_convert.py new file mode 100644 index 0000000000000..ea8c4b88538d4 --- /dev/null +++ b/pandas/tests/frame/methods/test_tz_convert.py @@ -0,0 +1,84 @@ +import numpy as np +import pytest + +from pandas import DataFrame, Index, MultiIndex, date_range +import pandas._testing as tm + + +class TestTZConvert: + def test_frame_tz_convert(self): + rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") + + df = DataFrame({"a": 1}, index=rng) + result = df.tz_convert("Europe/Berlin") + expected = DataFrame({"a": 1}, rng.tz_convert("Europe/Berlin")) + assert result.index.tz.zone == "Europe/Berlin" + tm.assert_frame_equal(result, expected) + + df = df.T + result = df.tz_convert("Europe/Berlin", axis=1) + assert result.columns.tz.zone == "Europe/Berlin" + tm.assert_frame_equal(result, expected.T) + + @pytest.mark.parametrize("fn", ["tz_localize", "tz_convert"]) + def test_tz_convert_and_localize(self, fn): + l0 = date_range("20140701", periods=5, freq="D") + l1 = date_range("20140701", periods=5, freq="D") + + int_idx = Index(range(5)) + + if fn == "tz_convert": + l0 = l0.tz_localize("UTC") + l1 = l1.tz_localize("UTC") + + for idx in [l0, l1]: + + l0_expected = getattr(idx, fn)("US/Pacific") + l1_expected = getattr(idx, fn)("US/Pacific") + + df1 = DataFrame(np.ones(5), index=l0) + df1 = getattr(df1, fn)("US/Pacific") + tm.assert_index_equal(df1.index, l0_expected) + + # MultiIndex + # GH7846 + df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1])) + + df3 = getattr(df2, fn)("US/Pacific", level=0) + assert not df3.index.levels[0].equals(l0) + tm.assert_index_equal(df3.index.levels[0], l0_expected) + tm.assert_index_equal(df3.index.levels[1], l1) + assert not df3.index.levels[1].equals(l1_expected) + + df3 = getattr(df2, fn)("US/Pacific", level=1) + tm.assert_index_equal(df3.index.levels[0], l0) + assert not df3.index.levels[0].equals(l0_expected) + tm.assert_index_equal(df3.index.levels[1], l1_expected) + assert not df3.index.levels[1].equals(l1) + + df4 = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) + + # TODO: untested + df5 = getattr(df4, fn)("US/Pacific", level=1) # noqa + + tm.assert_index_equal(df3.index.levels[0], l0) + assert not df3.index.levels[0].equals(l0_expected) + tm.assert_index_equal(df3.index.levels[1], l1_expected) + assert not df3.index.levels[1].equals(l1) + + # Bad Inputs + + # Not DatetimeIndex / PeriodIndex + with pytest.raises(TypeError, match="DatetimeIndex"): + df = DataFrame(index=int_idx) + df = getattr(df, fn)("US/Pacific") + + # Not DatetimeIndex / PeriodIndex + with pytest.raises(TypeError, match="DatetimeIndex"): + df = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) + df = getattr(df, fn)("US/Pacific", level=0) + + # Invalid level + with pytest.raises(ValueError, match="not valid"): + df = DataFrame(index=l0) + df = getattr(df, fn)("US/Pacific", level=1) diff --git a/pandas/tests/frame/methods/test_tz_localize.py b/pandas/tests/frame/methods/test_tz_localize.py new file mode 100644 index 0000000000000..1d4e26a6999b7 --- /dev/null +++ b/pandas/tests/frame/methods/test_tz_localize.py @@ -0,0 +1,21 @@ +from pandas import DataFrame, date_range +import pandas._testing as tm + + +class TestTZLocalize: + # See also: + # test_tz_convert_and_localize in test_tz_convert + + def test_frame_tz_localize(self): + rng = date_range("1/1/2011", periods=100, freq="H") + + df = DataFrame({"a": 1}, index=rng) + result = df.tz_localize("utc") + expected = DataFrame({"a": 1}, rng.tz_localize("UTC")) + assert result.index.tz.zone == "UTC" + tm.assert_frame_equal(result, expected) + + df = df.T + result = df.tz_localize("utc", axis=1) + assert result.columns.tz.zone == "UTC" + tm.assert_frame_equal(result, expected.T) diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 5e06b6402c34f..b713af92eac27 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -1,30 +1,10 @@ -from datetime import datetime, time -from itertools import product - import numpy as np import pytest -import pytz import pandas as pd -from pandas import ( - DataFrame, - DatetimeIndex, - Index, - MultiIndex, - Series, - date_range, - period_range, - to_datetime, -) +from pandas import DataFrame, Series, date_range, to_datetime import pandas._testing as tm -import pandas.tseries.offsets as offsets - - -@pytest.fixture(params=product([True, False], [True, False])) -def close_open_fixture(request): - return request.param - class TestDataFrameTimeSeriesMethods: def test_frame_ctor_datetime64_column(self): @@ -80,54 +60,6 @@ def test_frame_append_datetime64_col_other_units(self): assert (tmp["dates"].values == ex_vals).all() - def test_asfreq(self, datetime_frame): - offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd()) - rule_monthly = datetime_frame.asfreq("BM") - - tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"]) - - filled = rule_monthly.asfreq("B", method="pad") # noqa - # TODO: actually check that this worked. - - # don't forget! - filled_dep = rule_monthly.asfreq("B", method="pad") # noqa - - # test does not blow up on length-0 DataFrame - zero_length = datetime_frame.reindex([]) - result = zero_length.asfreq("BM") - assert result is not zero_length - - def test_asfreq_datetimeindex(self): - df = DataFrame( - {"A": [1, 2, 3]}, - index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)], - ) - df = df.asfreq("B") - assert isinstance(df.index, DatetimeIndex) - - ts = df["A"].asfreq("B") - assert isinstance(ts.index, DatetimeIndex) - - def test_asfreq_fillvalue(self): - # test for fill value during upsampling, related to issue 3715 - - # setup - rng = pd.date_range("1/1/2016", periods=10, freq="2S") - ts = pd.Series(np.arange(len(rng)), index=rng) - df = pd.DataFrame({"one": ts}) - - # insert pre-existing missing value - df.loc["2016-01-01 00:00:08", "one"] = None - - actual_df = df.asfreq(freq="1S", fill_value=9.0) - expected_df = df.asfreq(freq="1S").fillna(9.0) - expected_df.loc["2016-01-01 00:00:08", "one"] = None - tm.assert_frame_equal(expected_df, actual_df) - - expected_series = ts.asfreq(freq="1S").fillna(9.0) - actual_series = ts.asfreq(freq="1S", fill_value=9.0) - tm.assert_series_equal(expected_series, actual_series) - @pytest.mark.parametrize( "data,idx,expected_first,expected_last", [ @@ -239,183 +171,6 @@ def test_last_raises(self): with pytest.raises(TypeError): # index is not a DatetimeIndex df.last("1D") - def test_at_time(self): - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = DataFrame(np.random.randn(len(rng), 2), index=rng) - rs = ts.at_time(rng[1]) - assert (rs.index.hour == rng[1].hour).all() - assert (rs.index.minute == rng[1].minute).all() - assert (rs.index.second == rng[1].second).all() - - result = ts.at_time("9:30") - expected = ts.at_time(time(9, 30)) - tm.assert_frame_equal(result, expected) - - result = ts.loc[time(9, 30)] - expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)] - - tm.assert_frame_equal(result, expected) - - # midnight, everything - rng = date_range("1/1/2000", "1/31/2000") - ts = DataFrame(np.random.randn(len(rng), 3), index=rng) - - result = ts.at_time(time(0, 0)) - tm.assert_frame_equal(result, ts) - - # time doesn't exist - rng = date_range("1/1/2012", freq="23Min", periods=384) - ts = DataFrame(np.random.randn(len(rng), 2), rng) - rs = ts.at_time("16:00") - assert len(rs) == 0 - - @pytest.mark.parametrize( - "hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)] - ) - def test_at_time_errors(self, hour): - # GH 24043 - dti = pd.date_range("2018", periods=3, freq="H") - df = pd.DataFrame(list(range(len(dti))), index=dti) - if getattr(hour, "tzinfo", None) is None: - result = df.at_time(hour) - expected = df.iloc[1:2] - tm.assert_frame_equal(result, expected) - else: - with pytest.raises(ValueError, match="Index must be timezone"): - df.at_time(hour) - - def test_at_time_tz(self): - # GH 24043 - dti = pd.date_range("2018", periods=3, freq="H", tz="US/Pacific") - df = pd.DataFrame(list(range(len(dti))), index=dti) - result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern"))) - expected = df.iloc[1:2] - tm.assert_frame_equal(result, expected) - - def test_at_time_raises(self): - # GH20725 - df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) - with pytest.raises(TypeError): # index is not a DatetimeIndex - df.at_time("00:00") - - @pytest.mark.parametrize("axis", ["index", "columns", 0, 1]) - def test_at_time_axis(self, axis): - # issue 8839 - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = DataFrame(np.random.randn(len(rng), len(rng))) - ts.index, ts.columns = rng, rng - - indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)] - - if axis in ["index", 0]: - expected = ts.loc[indices, :] - elif axis in ["columns", 1]: - expected = ts.loc[:, indices] - - result = ts.at_time("9:30", axis=axis) - tm.assert_frame_equal(result, expected) - - def test_between_time(self, close_open_fixture): - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = DataFrame(np.random.randn(len(rng), 2), index=rng) - stime = time(0, 0) - etime = time(1, 0) - inc_start, inc_end = close_open_fixture - - filtered = ts.between_time(stime, etime, inc_start, inc_end) - exp_len = 13 * 4 + 1 - if not inc_start: - exp_len -= 5 - if not inc_end: - exp_len -= 4 - - assert len(filtered) == exp_len - for rs in filtered.index: - t = rs.time() - if inc_start: - assert t >= stime - else: - assert t > stime - - if inc_end: - assert t <= etime - else: - assert t < etime - - result = ts.between_time("00:00", "01:00") - expected = ts.between_time(stime, etime) - tm.assert_frame_equal(result, expected) - - # across midnight - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = DataFrame(np.random.randn(len(rng), 2), index=rng) - stime = time(22, 0) - etime = time(9, 0) - - filtered = ts.between_time(stime, etime, inc_start, inc_end) - exp_len = (12 * 11 + 1) * 4 + 1 - if not inc_start: - exp_len -= 4 - if not inc_end: - exp_len -= 4 - - assert len(filtered) == exp_len - for rs in filtered.index: - t = rs.time() - if inc_start: - assert (t >= stime) or (t <= etime) - else: - assert (t > stime) or (t <= etime) - - if inc_end: - assert (t <= etime) or (t >= stime) - else: - assert (t < etime) or (t >= stime) - - def test_between_time_raises(self): - # GH20725 - df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) - with pytest.raises(TypeError): # index is not a DatetimeIndex - df.between_time(start_time="00:00", end_time="12:00") - - def test_between_time_axis(self, axis): - # issue 8839 - rng = date_range("1/1/2000", periods=100, freq="10min") - ts = DataFrame(np.random.randn(len(rng), len(rng))) - stime, etime = ("08:00:00", "09:00:00") - exp_len = 7 - - if axis in ["index", 0]: - ts.index = rng - assert len(ts.between_time(stime, etime)) == exp_len - assert len(ts.between_time(stime, etime, axis=0)) == exp_len - - if axis in ["columns", 1]: - ts.columns = rng - selected = ts.between_time(stime, etime, axis=1).columns - assert len(selected) == exp_len - - def test_between_time_axis_raises(self, axis): - # issue 8839 - rng = date_range("1/1/2000", periods=100, freq="10min") - mask = np.arange(0, len(rng)) - rand_data = np.random.randn(len(rng), len(rng)) - ts = DataFrame(rand_data, index=rng, columns=rng) - stime, etime = ("08:00:00", "09:00:00") - - msg = "Index must be DatetimeIndex" - if axis in ["columns", 1]: - ts.index = mask - with pytest.raises(TypeError, match=msg): - ts.between_time(stime, etime) - with pytest.raises(TypeError, match=msg): - ts.between_time(stime, etime, axis=0) - - if axis in ["index", 0]: - ts.columns = mask - with pytest.raises(TypeError, match=msg): - ts.between_time(stime, etime, axis=1) - def test_operation_on_NaT(self): # Both NaT and Timestamp are in DataFrame. df = pd.DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]}) @@ -455,95 +210,3 @@ def test_datetime_assignment_with_NaT_and_diff_time_units(self): {0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]" ) tm.assert_frame_equal(result, expected) - - def test_frame_to_period(self): - K = 5 - - dr = date_range("1/1/2000", "1/1/2001") - pr = period_range("1/1/2000", "1/1/2001") - df = DataFrame(np.random.randn(len(dr), K), index=dr) - df["mix"] = "a" - - pts = df.to_period() - exp = df.copy() - exp.index = pr - tm.assert_frame_equal(pts, exp) - - pts = df.to_period("M") - tm.assert_index_equal(pts.index, exp.index.asfreq("M")) - - df = df.T - pts = df.to_period(axis=1) - exp = df.copy() - exp.columns = pr - tm.assert_frame_equal(pts, exp) - - pts = df.to_period("M", axis=1) - tm.assert_index_equal(pts.columns, exp.columns.asfreq("M")) - - msg = "No axis named 2 for object type <class 'pandas.core.frame.DataFrame'>" - with pytest.raises(ValueError, match=msg): - df.to_period(axis=2) - - @pytest.mark.parametrize("fn", ["tz_localize", "tz_convert"]) - def test_tz_convert_and_localize(self, fn): - l0 = date_range("20140701", periods=5, freq="D") - l1 = date_range("20140701", periods=5, freq="D") - - int_idx = Index(range(5)) - - if fn == "tz_convert": - l0 = l0.tz_localize("UTC") - l1 = l1.tz_localize("UTC") - - for idx in [l0, l1]: - - l0_expected = getattr(idx, fn)("US/Pacific") - l1_expected = getattr(idx, fn)("US/Pacific") - - df1 = DataFrame(np.ones(5), index=l0) - df1 = getattr(df1, fn)("US/Pacific") - tm.assert_index_equal(df1.index, l0_expected) - - # MultiIndex - # GH7846 - df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1])) - - df3 = getattr(df2, fn)("US/Pacific", level=0) - assert not df3.index.levels[0].equals(l0) - tm.assert_index_equal(df3.index.levels[0], l0_expected) - tm.assert_index_equal(df3.index.levels[1], l1) - assert not df3.index.levels[1].equals(l1_expected) - - df3 = getattr(df2, fn)("US/Pacific", level=1) - tm.assert_index_equal(df3.index.levels[0], l0) - assert not df3.index.levels[0].equals(l0_expected) - tm.assert_index_equal(df3.index.levels[1], l1_expected) - assert not df3.index.levels[1].equals(l1) - - df4 = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) - - # TODO: untested - df5 = getattr(df4, fn)("US/Pacific", level=1) # noqa - - tm.assert_index_equal(df3.index.levels[0], l0) - assert not df3.index.levels[0].equals(l0_expected) - tm.assert_index_equal(df3.index.levels[1], l1_expected) - assert not df3.index.levels[1].equals(l1) - - # Bad Inputs - - # Not DatetimeIndex / PeriodIndex - with pytest.raises(TypeError, match="DatetimeIndex"): - df = DataFrame(index=int_idx) - df = getattr(df, fn)("US/Pacific") - - # Not DatetimeIndex / PeriodIndex - with pytest.raises(TypeError, match="DatetimeIndex"): - df = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) - df = getattr(df, fn)("US/Pacific", level=0) - - # Invalid level - with pytest.raises(ValueError, match="not valid"): - df = DataFrame(index=l0) - df = getattr(df, fn)("US/Pacific", level=1) diff --git a/pandas/tests/frame/test_timezones.py b/pandas/tests/frame/test_timezones.py index b60f2052a988f..62e8a4b470218 100644 --- a/pandas/tests/frame/test_timezones.py +++ b/pandas/tests/frame/test_timezones.py @@ -59,34 +59,6 @@ def test_frame_from_records_utc(self): # it works DataFrame.from_records([rec], index="begin_time") - def test_frame_tz_localize(self): - rng = date_range("1/1/2011", periods=100, freq="H") - - df = DataFrame({"a": 1}, index=rng) - result = df.tz_localize("utc") - expected = DataFrame({"a": 1}, rng.tz_localize("UTC")) - assert result.index.tz.zone == "UTC" - tm.assert_frame_equal(result, expected) - - df = df.T - result = df.tz_localize("utc", axis=1) - assert result.columns.tz.zone == "UTC" - tm.assert_frame_equal(result, expected.T) - - def test_frame_tz_convert(self): - rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") - - df = DataFrame({"a": 1}, index=rng) - result = df.tz_convert("Europe/Berlin") - expected = DataFrame({"a": 1}, rng.tz_convert("Europe/Berlin")) - assert result.index.tz.zone == "Europe/Berlin" - tm.assert_frame_equal(result, expected) - - df = df.T - result = df.tz_convert("Europe/Berlin", axis=1) - assert result.columns.tz.zone == "Europe/Berlin" - tm.assert_frame_equal(result, expected.T) - def test_frame_join_tzaware(self): test1 = DataFrame( np.zeros((6, 3)),
xref #32226.
https://api.github.com/repos/pandas-dev/pandas/pulls/32230
2020-02-25T02:32:51Z
2020-02-25T20:40:23Z
2020-02-25T20:40:23Z
2020-02-25T20:51:01Z
REF/TST: method-specific files for combine, update; parametrize
diff --git a/pandas/tests/series/methods/test_combine.py b/pandas/tests/series/methods/test_combine.py new file mode 100644 index 0000000000000..75d47e3daa103 --- /dev/null +++ b/pandas/tests/series/methods/test_combine.py @@ -0,0 +1,17 @@ +from pandas import Series +import pandas._testing as tm + + +class TestCombine: + def test_combine_scalar(self): + # GH#21248 + # Note - combine() with another Series is tested elsewhere because + # it is used when testing operators + ser = Series([i * 10 for i in range(5)]) + result = ser.combine(3, lambda x, y: x + y) + expected = Series([i * 10 + 3 for i in range(5)]) + tm.assert_series_equal(result, expected) + + result = ser.combine(22, lambda x, y: min(x, y)) + expected = Series([min(i * 10, 22) for i in range(5)]) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/methods/test_update.py b/pandas/tests/series/methods/test_update.py new file mode 100644 index 0000000000000..b7f5f33294792 --- /dev/null +++ b/pandas/tests/series/methods/test_update.py @@ -0,0 +1,58 @@ +import numpy as np +import pytest + +from pandas import DataFrame, Series +import pandas._testing as tm + + +class TestUpdate: + def test_update(self): + s = Series([1.5, np.nan, 3.0, 4.0, np.nan]) + s2 = Series([np.nan, 3.5, np.nan, 5.0]) + s.update(s2) + + expected = Series([1.5, 3.5, 3.0, 5.0, np.nan]) + tm.assert_series_equal(s, expected) + + # GH 3217 + df = DataFrame([{"a": 1}, {"a": 3, "b": 2}]) + df["c"] = np.nan + + df["c"].update(Series(["foo"], index=[0])) + expected = DataFrame( + [[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"] + ) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "other, dtype, expected", + [ + # other is int + ([61, 63], "int32", Series([10, 61, 12], dtype="int32")), + ([61, 63], "int64", Series([10, 61, 12])), + ([61, 63], float, Series([10.0, 61.0, 12.0])), + ([61, 63], object, Series([10, 61, 12], dtype=object)), + # other is float, but can be cast to int + ([61.0, 63.0], "int32", Series([10, 61, 12], dtype="int32")), + ([61.0, 63.0], "int64", Series([10, 61, 12])), + ([61.0, 63.0], float, Series([10.0, 61.0, 12.0])), + ([61.0, 63.0], object, Series([10, 61.0, 12], dtype=object)), + # others is float, cannot be cast to int + ([61.1, 63.1], "int32", Series([10.0, 61.1, 12.0])), + ([61.1, 63.1], "int64", Series([10.0, 61.1, 12.0])), + ([61.1, 63.1], float, Series([10.0, 61.1, 12.0])), + ([61.1, 63.1], object, Series([10, 61.1, 12], dtype=object)), + # other is object, cannot be cast + ([(61,), (63,)], "int32", Series([10, (61,), 12])), + ([(61,), (63,)], "int64", Series([10, (61,), 12])), + ([(61,), (63,)], float, Series([10.0, (61,), 12.0])), + ([(61,), (63,)], object, Series([10, (61,), 12])), + ], + ) + def test_update_dtypes(self, other, dtype, expected): + + ser = Series([10, 11, 12], dtype=dtype) + other = Series(other, index=[1, 3]) + ser.update(other) + + tm.assert_series_equal(ser, expected) diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py index 4afa083e97c7c..adb79f69c2d81 100644 --- a/pandas/tests/series/test_combine_concat.py +++ b/pandas/tests/series/test_combine_concat.py @@ -2,84 +2,27 @@ import pytest import pandas as pd -from pandas import DataFrame, Series -import pandas._testing as tm +from pandas import Series class TestSeriesCombine: - def test_combine_scalar(self): - # GH 21248 - # Note - combine() with another Series is tested elsewhere because - # it is used when testing operators - s = pd.Series([i * 10 for i in range(5)]) - result = s.combine(3, lambda x, y: x + y) - expected = pd.Series([i * 10 + 3 for i in range(5)]) - tm.assert_series_equal(result, expected) - - result = s.combine(22, lambda x, y: min(x, y)) - expected = pd.Series([min(i * 10, 22) for i in range(5)]) - tm.assert_series_equal(result, expected) - - def test_update(self): - s = Series([1.5, np.nan, 3.0, 4.0, np.nan]) - s2 = Series([np.nan, 3.5, np.nan, 5.0]) - s.update(s2) - - expected = Series([1.5, 3.5, 3.0, 5.0, np.nan]) - tm.assert_series_equal(s, expected) - - # GH 3217 - df = DataFrame([{"a": 1}, {"a": 3, "b": 2}]) - df["c"] = np.nan - - df["c"].update(Series(["foo"], index=[0])) - expected = DataFrame( - [[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"] - ) - tm.assert_frame_equal(df, expected) - @pytest.mark.parametrize( - "other, dtype, expected", - [ - # other is int - ([61, 63], "int32", pd.Series([10, 61, 12], dtype="int32")), - ([61, 63], "int64", pd.Series([10, 61, 12])), - ([61, 63], float, pd.Series([10.0, 61.0, 12.0])), - ([61, 63], object, pd.Series([10, 61, 12], dtype=object)), - # other is float, but can be cast to int - ([61.0, 63.0], "int32", pd.Series([10, 61, 12], dtype="int32")), - ([61.0, 63.0], "int64", pd.Series([10, 61, 12])), - ([61.0, 63.0], float, pd.Series([10.0, 61.0, 12.0])), - ([61.0, 63.0], object, pd.Series([10, 61.0, 12], dtype=object)), - # others is float, cannot be cast to int - ([61.1, 63.1], "int32", pd.Series([10.0, 61.1, 12.0])), - ([61.1, 63.1], "int64", pd.Series([10.0, 61.1, 12.0])), - ([61.1, 63.1], float, pd.Series([10.0, 61.1, 12.0])), - ([61.1, 63.1], object, pd.Series([10, 61.1, 12], dtype=object)), - # other is object, cannot be cast - ([(61,), (63,)], "int32", pd.Series([10, (61,), 12])), - ([(61,), (63,)], "int64", pd.Series([10, (61,), 12])), - ([(61,), (63,)], float, pd.Series([10.0, (61,), 12.0])), - ([(61,), (63,)], object, pd.Series([10, (61,), 12])), - ], + "dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"] ) - def test_update_dtypes(self, other, dtype, expected): + def test_concat_empty_series_dtypes_match_roundtrips(self, dtype): + dtype = np.dtype(dtype) - s = Series([10, 11, 12], dtype=dtype) - other = Series(other, index=[1, 3]) - s.update(other) + result = pd.concat([Series(dtype=dtype)]) + assert result.dtype == dtype - tm.assert_series_equal(s, expected) + result = pd.concat([Series(dtype=dtype), Series(dtype=dtype)]) + assert result.dtype == dtype def test_concat_empty_series_dtypes_roundtrips(self): # round-tripping with self & like self dtypes = map(np.dtype, ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]) - for dtype in dtypes: - assert pd.concat([Series(dtype=dtype)]).dtype == dtype - assert pd.concat([Series(dtype=dtype), Series(dtype=dtype)]).dtype == dtype - def int_result_type(dtype, dtype2): typs = {dtype.kind, dtype2.kind} if not len(typs - {"i", "u", "b"}) and ( @@ -118,35 +61,28 @@ def get_result_type(dtype, dtype2): result = pd.concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype assert result.kind == expected - def test_concat_empty_series_dtypes(self): + @pytest.mark.parametrize( + "left,right,expected", + [ + # booleans + (np.bool_, np.int32, np.int32), + (np.bool_, np.float32, np.object_), + # datetime-like + ("m8[ns]", np.bool, np.object_), + ("m8[ns]", np.int64, np.object_), + ("M8[ns]", np.bool, np.object_), + ("M8[ns]", np.int64, np.object_), + # categorical + ("category", "category", "category"), + ("category", "object", "object"), + ], + ) + def test_concat_empty_series_dtypes(self, left, right, expected): + result = pd.concat([Series(dtype=left), Series(dtype=right)]) + assert result.dtype == expected - # booleans - assert ( - pd.concat([Series(dtype=np.bool_), Series(dtype=np.int32)]).dtype - == np.int32 - ) - assert ( - pd.concat([Series(dtype=np.bool_), Series(dtype=np.float32)]).dtype - == np.object_ - ) + def test_concat_empty_series_dtypes_triple(self): - # datetime-like - assert ( - pd.concat([Series(dtype="m8[ns]"), Series(dtype=np.bool)]).dtype - == np.object_ - ) - assert ( - pd.concat([Series(dtype="m8[ns]"), Series(dtype=np.int64)]).dtype - == np.object_ - ) - assert ( - pd.concat([Series(dtype="M8[ns]"), Series(dtype=np.bool)]).dtype - == np.object_ - ) - assert ( - pd.concat([Series(dtype="M8[ns]"), Series(dtype=np.int64)]).dtype - == np.object_ - ) assert ( pd.concat( [Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)] @@ -154,11 +90,7 @@ def test_concat_empty_series_dtypes(self): == np.object_ ) - # categorical - assert ( - pd.concat([Series(dtype="category"), Series(dtype="category")]).dtype - == "category" - ) + def test_concat_empty_series_dtype_category_with_array(self): # GH 18515 assert ( pd.concat( @@ -166,13 +98,8 @@ def test_concat_empty_series_dtypes(self): ).dtype == "float64" ) - assert ( - pd.concat([Series(dtype="category"), Series(dtype="object")]).dtype - == "object" - ) - # sparse - # TODO: move? + def test_concat_empty_series_dtypes_sparse(self): result = pd.concat( [ Series(dtype="float64").astype("Sparse"),
https://api.github.com/repos/pandas-dev/pandas/pulls/32228
2020-02-25T00:46:02Z
2020-02-25T20:41:55Z
2020-02-25T20:41:55Z
2020-02-25T20:46:53Z
REF/TST: method-specific files for rename, reset_index
diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py new file mode 100644 index 0000000000000..e69a562f8214d --- /dev/null +++ b/pandas/tests/frame/methods/test_rename.py @@ -0,0 +1,353 @@ +from collections import ChainMap + +import numpy as np +import pytest + +from pandas import DataFrame, Index, MultiIndex +import pandas._testing as tm + + +class TestRename: + def test_rename(self, float_frame): + mapping = {"A": "a", "B": "b", "C": "c", "D": "d"} + + renamed = float_frame.rename(columns=mapping) + renamed2 = float_frame.rename(columns=str.lower) + + tm.assert_frame_equal(renamed, renamed2) + tm.assert_frame_equal( + renamed2.rename(columns=str.upper), float_frame, check_names=False + ) + + # index + data = {"A": {"foo": 0, "bar": 1}} + + # gets sorted alphabetical + df = DataFrame(data) + renamed = df.rename(index={"foo": "bar", "bar": "foo"}) + tm.assert_index_equal(renamed.index, Index(["foo", "bar"])) + + renamed = df.rename(index=str.upper) + tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"])) + + # have to pass something + with pytest.raises(TypeError, match="must pass an index to rename"): + float_frame.rename() + + # partial columns + renamed = float_frame.rename(columns={"C": "foo", "D": "bar"}) + tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"])) + + # other axis + renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"}) + tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"])) + + # index with name + index = Index(["foo", "bar"], name="name") + renamer = DataFrame(data, index=index) + renamed = renamer.rename(index={"foo": "bar", "bar": "foo"}) + tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name")) + assert renamed.index.name == renamer.index.name + + @pytest.mark.parametrize( + "args,kwargs", + [ + ((ChainMap({"A": "a"}, {"B": "b"}),), dict(axis="columns")), + ((), dict(columns=ChainMap({"A": "a"}, {"B": "b"}))), + ], + ) + def test_rename_chainmap(self, args, kwargs): + # see gh-23859 + colAData = range(1, 11) + colBdata = np.random.randn(10) + + df = DataFrame({"A": colAData, "B": colBdata}) + result = df.rename(*args, **kwargs) + + expected = DataFrame({"a": colAData, "b": colBdata}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "kwargs, rename_index, rename_columns", + [ + ({"mapper": None, "axis": 0}, True, False), + ({"mapper": None, "axis": 1}, False, True), + ({"index": None}, True, False), + ({"columns": None}, False, True), + ({"index": None, "columns": None}, True, True), + ({}, False, False), + ], + ) + def test_rename_axis_none(self, kwargs, rename_index, rename_columns): + # GH 25034 + index = Index(list("abc"), name="foo") + columns = Index(["col1", "col2"], name="bar") + data = np.arange(6).reshape(3, 2) + df = DataFrame(data, index, columns) + + result = df.rename_axis(**kwargs) + expected_index = index.rename(None) if rename_index else index + expected_columns = columns.rename(None) if rename_columns else columns + expected = DataFrame(data, expected_index, expected_columns) + tm.assert_frame_equal(result, expected) + + def test_rename_multiindex(self): + + tuples_index = [("foo1", "bar1"), ("foo2", "bar2")] + tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")] + index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"]) + columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"]) + df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns) + + # + # without specifying level -> across all levels + + renamed = df.rename( + index={"foo1": "foo3", "bar2": "bar3"}, + columns={"fizz1": "fizz3", "buzz2": "buzz3"}, + ) + new_index = MultiIndex.from_tuples( + [("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"] + ) + new_columns = MultiIndex.from_tuples( + [("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"] + ) + tm.assert_index_equal(renamed.index, new_index) + tm.assert_index_equal(renamed.columns, new_columns) + assert renamed.index.names == df.index.names + assert renamed.columns.names == df.columns.names + + # + # with specifying a level (GH13766) + + # dict + new_columns = MultiIndex.from_tuples( + [("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"] + ) + renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0) + tm.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz") + tm.assert_index_equal(renamed.columns, new_columns) + + new_columns = MultiIndex.from_tuples( + [("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"] + ) + renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1) + tm.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz") + tm.assert_index_equal(renamed.columns, new_columns) + + # function + func = str.upper + new_columns = MultiIndex.from_tuples( + [("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"] + ) + renamed = df.rename(columns=func, level=0) + tm.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns=func, level="fizz") + tm.assert_index_equal(renamed.columns, new_columns) + + new_columns = MultiIndex.from_tuples( + [("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"] + ) + renamed = df.rename(columns=func, level=1) + tm.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns=func, level="buzz") + tm.assert_index_equal(renamed.columns, new_columns) + + # index + new_index = MultiIndex.from_tuples( + [("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"] + ) + renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0) + tm.assert_index_equal(renamed.index, new_index) + + def test_rename_nocopy(self, float_frame): + renamed = float_frame.rename(columns={"C": "foo"}, copy=False) + renamed["foo"] = 1.0 + assert (float_frame["C"] == 1.0).all() + + def test_rename_inplace(self, float_frame): + float_frame.rename(columns={"C": "foo"}) + assert "C" in float_frame + assert "foo" not in float_frame + + c_id = id(float_frame["C"]) + float_frame = float_frame.copy() + float_frame.rename(columns={"C": "foo"}, inplace=True) + + assert "C" not in float_frame + assert "foo" in float_frame + assert id(float_frame["foo"]) != c_id + + def test_rename_bug(self): + # GH 5344 + # rename set ref_locs, and set_index was not resetting + df = DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}) + df = df.rename(columns={0: "a"}) + df = df.rename(columns={1: "b"}) + df = df.set_index(["a", "b"]) + df.columns = ["2001-01-01"] + expected = DataFrame( + [[1], [2]], + index=MultiIndex.from_tuples( + [("foo", "bah"), ("bar", "bas")], names=["a", "b"] + ), + columns=["2001-01-01"], + ) + tm.assert_frame_equal(df, expected) + + def test_rename_bug2(self): + # GH 19497 + # rename was changing Index to MultiIndex if Index contained tuples + + df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=["a"]) + df = df.rename({(1, 1): (5, 4)}, axis="index") + expected = DataFrame( + data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=["a"] + ) + tm.assert_frame_equal(df, expected) + + def test_rename_errors_raises(self): + df = DataFrame(columns=["A", "B", "C", "D"]) + with pytest.raises(KeyError, match="'E'] not found in axis"): + df.rename(columns={"A": "a", "E": "e"}, errors="raise") + + @pytest.mark.parametrize( + "mapper, errors, expected_columns", + [ + ({"A": "a", "E": "e"}, "ignore", ["a", "B", "C", "D"]), + ({"A": "a"}, "raise", ["a", "B", "C", "D"]), + (str.lower, "raise", ["a", "b", "c", "d"]), + ], + ) + def test_rename_errors(self, mapper, errors, expected_columns): + # GH 13473 + # rename now works with errors parameter + df = DataFrame(columns=["A", "B", "C", "D"]) + result = df.rename(columns=mapper, errors=errors) + expected = DataFrame(columns=expected_columns) + tm.assert_frame_equal(result, expected) + + def test_rename_objects(self, float_string_frame): + renamed = float_string_frame.rename(columns=str.upper) + + assert "FOO" in renamed + assert "foo" not in renamed + + def test_rename_axis_style(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["X", "Y"]) + expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"]) + + result = df.rename(str.lower, axis=1) + tm.assert_frame_equal(result, expected) + + result = df.rename(str.lower, axis="columns") + tm.assert_frame_equal(result, expected) + + result = df.rename({"A": "a", "B": "b"}, axis=1) + tm.assert_frame_equal(result, expected) + + result = df.rename({"A": "a", "B": "b"}, axis="columns") + tm.assert_frame_equal(result, expected) + + # Index + expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"]) + result = df.rename(str.lower, axis=0) + tm.assert_frame_equal(result, expected) + + result = df.rename(str.lower, axis="index") + tm.assert_frame_equal(result, expected) + + result = df.rename({"X": "x", "Y": "y"}, axis=0) + tm.assert_frame_equal(result, expected) + + result = df.rename({"X": "x", "Y": "y"}, axis="index") + tm.assert_frame_equal(result, expected) + + result = df.rename(mapper=str.lower, axis="index") + tm.assert_frame_equal(result, expected) + + def test_rename_mapper_multi(self): + df = DataFrame({"A": ["a", "b"], "B": ["c", "d"], "C": [1, 2]}).set_index( + ["A", "B"] + ) + result = df.rename(str.upper) + expected = df.rename(index=str.upper) + tm.assert_frame_equal(result, expected) + + def test_rename_positional_named(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"]) + result = df.rename(index=str.lower, columns=str.upper) + expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"]) + tm.assert_frame_equal(result, expected) + + def test_rename_axis_style_raises(self): + # see gh-12392 + df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"]) + + # Named target and axis + over_spec_msg = "Cannot specify both 'axis' and any of 'index' or 'columns'" + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(index=str.lower, axis=1) + + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(index=str.lower, axis="columns") + + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(columns=str.lower, axis="columns") + + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(index=str.lower, axis=0) + + # Multiple targets and axis + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(str.lower, index=str.lower, axis="columns") + + # Too many targets + over_spec_msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'" + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(str.lower, index=str.lower, columns=str.lower) + + # Duplicates + with pytest.raises(TypeError, match="multiple values"): + df.rename(id, mapper=id) + + def test_rename_positional_raises(self): + # GH 29136 + df = DataFrame(columns=["A", "B"]) + msg = r"rename\(\) takes from 1 to 2 positional arguments" + + with pytest.raises(TypeError, match=msg): + df.rename(None, str.lower) + + def test_rename_no_mappings_raises(self): + # GH 29136 + df = DataFrame([[1]]) + msg = "must pass an index to rename" + with pytest.raises(TypeError, match=msg): + df.rename() + + with pytest.raises(TypeError, match=msg): + df.rename(None, index=None) + + with pytest.raises(TypeError, match=msg): + df.rename(None, columns=None) + + with pytest.raises(TypeError, match=msg): + df.rename(None, columns=None, index=None) + + def test_rename_mapper_and_positional_arguments_raises(self): + # GH 29136 + df = DataFrame([[1]]) + msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'" + with pytest.raises(TypeError, match=msg): + df.rename({}, index={}) + + with pytest.raises(TypeError, match=msg): + df.rename({}, columns={}) + + with pytest.raises(TypeError, match=msg): + df.rename({}, columns={}, index={}) diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py new file mode 100644 index 0000000000000..6586c19af2539 --- /dev/null +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -0,0 +1,299 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + IntervalIndex, + MultiIndex, + RangeIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestResetIndex: + def test_reset_index_tz(self, tz_aware_fixture): + # GH 3950 + # reset_index with single level + tz = tz_aware_fixture + idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx") + df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx) + + expected = DataFrame( + { + "idx": [ + datetime(2011, 1, 1), + datetime(2011, 1, 2), + datetime(2011, 1, 3), + datetime(2011, 1, 4), + datetime(2011, 1, 5), + ], + "a": range(5), + "b": ["A", "B", "C", "D", "E"], + }, + columns=["idx", "a", "b"], + ) + expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz)) + tm.assert_frame_equal(df.reset_index(), expected) + + def test_reset_index_with_intervals(self): + idx = IntervalIndex.from_breaks(np.arange(11), name="x") + original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]] + + result = original.set_index("x") + expected = DataFrame({"y": np.arange(10)}, index=idx) + tm.assert_frame_equal(result, expected) + + result2 = result.reset_index() + tm.assert_frame_equal(result2, original) + + def test_reset_index(self, float_frame): + stacked = float_frame.stack()[::2] + stacked = DataFrame({"foo": stacked, "bar": stacked}) + + names = ["first", "second"] + stacked.index.names = names + deleveled = stacked.reset_index() + for i, (lev, level_codes) in enumerate( + zip(stacked.index.levels, stacked.index.codes) + ): + values = lev.take(level_codes) + name = names[i] + tm.assert_index_equal(values, Index(deleveled[name])) + + stacked.index.names = [None, None] + deleveled2 = stacked.reset_index() + tm.assert_series_equal( + deleveled["first"], deleveled2["level_0"], check_names=False + ) + tm.assert_series_equal( + deleveled["second"], deleveled2["level_1"], check_names=False + ) + + # default name assigned + rdf = float_frame.reset_index() + exp = Series(float_frame.index.values, name="index") + tm.assert_series_equal(rdf["index"], exp) + + # default name assigned, corner case + df = float_frame.copy() + df["index"] = "foo" + rdf = df.reset_index() + exp = Series(float_frame.index.values, name="level_0") + tm.assert_series_equal(rdf["level_0"], exp) + + # but this is ok + float_frame.index.name = "index" + deleveled = float_frame.reset_index() + tm.assert_series_equal(deleveled["index"], Series(float_frame.index)) + tm.assert_index_equal(deleveled.index, Index(np.arange(len(deleveled)))) + + # preserve column names + float_frame.columns.name = "columns" + resetted = float_frame.reset_index() + assert resetted.columns.name == "columns" + + # only remove certain columns + df = float_frame.reset_index().set_index(["index", "A", "B"]) + rs = df.reset_index(["A", "B"]) + + # TODO should reset_index check_names ? + tm.assert_frame_equal(rs, float_frame, check_names=False) + + rs = df.reset_index(["index", "A", "B"]) + tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False) + + rs = df.reset_index(["index", "A", "B"]) + tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False) + + rs = df.reset_index("A") + xp = float_frame.reset_index().set_index(["index", "B"]) + tm.assert_frame_equal(rs, xp, check_names=False) + + # test resetting in place + df = float_frame.copy() + resetted = float_frame.reset_index() + df.reset_index(inplace=True) + tm.assert_frame_equal(df, resetted, check_names=False) + + df = float_frame.reset_index().set_index(["index", "A", "B"]) + rs = df.reset_index("A", drop=True) + xp = float_frame.copy() + del xp["A"] + xp = xp.set_index(["B"], append=True) + tm.assert_frame_equal(rs, xp, check_names=False) + + def test_reset_index_name(self): + df = DataFrame( + [[1, 2, 3, 4], [5, 6, 7, 8]], + columns=["A", "B", "C", "D"], + index=Index(range(2), name="x"), + ) + assert df.reset_index().index.name is None + assert df.reset_index(drop=True).index.name is None + df.reset_index(inplace=True) + assert df.index.name is None + + def test_reset_index_level(self): + df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"]) + + for levels in ["A", "B"], [0, 1]: + # With MultiIndex + result = df.set_index(["A", "B"]).reset_index(level=levels[0]) + tm.assert_frame_equal(result, df.set_index("B")) + + result = df.set_index(["A", "B"]).reset_index(level=levels[:1]) + tm.assert_frame_equal(result, df.set_index("B")) + + result = df.set_index(["A", "B"]).reset_index(level=levels) + tm.assert_frame_equal(result, df) + + result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True) + tm.assert_frame_equal(result, df[["C", "D"]]) + + # With single-level Index (GH 16263) + result = df.set_index("A").reset_index(level=levels[0]) + tm.assert_frame_equal(result, df) + + result = df.set_index("A").reset_index(level=levels[:1]) + tm.assert_frame_equal(result, df) + + result = df.set_index(["A"]).reset_index(level=levels[0], drop=True) + tm.assert_frame_equal(result, df[["B", "C", "D"]]) + + # Missing levels - for both MultiIndex and single-level Index: + for idx_lev in ["A", "B"], ["A"]: + with pytest.raises(KeyError, match=r"(L|l)evel \(?E\)?"): + df.set_index(idx_lev).reset_index(level=["A", "E"]) + with pytest.raises(IndexError, match="Too many levels"): + df.set_index(idx_lev).reset_index(level=[0, 1, 2]) + + def test_reset_index_right_dtype(self): + time = np.arange(0.0, 10, np.sqrt(2) / 2) + s1 = Series( + (9.81 * time ** 2) / 2, index=Index(time, name="time"), name="speed" + ) + df = DataFrame(s1) + + resetted = s1.reset_index() + assert resetted["time"].dtype == np.float64 + + resetted = df.reset_index() + assert resetted["time"].dtype == np.float64 + + def test_reset_index_multiindex_col(self): + vals = np.random.randn(3, 3).astype(object) + idx = ["x", "y", "z"] + full = np.hstack(([[x] for x in idx], vals)) + df = DataFrame( + vals, + Index(idx, name="a"), + columns=[["b", "b", "c"], ["mean", "median", "mean"]], + ) + rs = df.reset_index() + xp = DataFrame( + full, columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]] + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index(col_fill=None) + xp = DataFrame( + full, columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]] + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index(col_level=1, col_fill="blah") + xp = DataFrame( + full, columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]] + ) + tm.assert_frame_equal(rs, xp) + + df = DataFrame( + vals, + MultiIndex.from_arrays([[0, 1, 2], ["x", "y", "z"]], names=["d", "a"]), + columns=[["b", "b", "c"], ["mean", "median", "mean"]], + ) + rs = df.reset_index("a") + xp = DataFrame( + full, + Index([0, 1, 2], name="d"), + columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]], + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index("a", col_fill=None) + xp = DataFrame( + full, + Index(range(3), name="d"), + columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]], + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index("a", col_fill="blah", col_level=1) + xp = DataFrame( + full, + Index(range(3), name="d"), + columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]], + ) + tm.assert_frame_equal(rs, xp) + + def test_reset_index_multiindex_nan(self): + # GH#6322, testing reset_index on MultiIndexes + # when we have a nan or all nan + df = DataFrame( + {"A": ["a", "b", "c"], "B": [0, 1, np.nan], "C": np.random.rand(3)} + ) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + df = DataFrame( + {"A": [np.nan, "b", "c"], "B": [0, 1, 2], "C": np.random.rand(3)} + ) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + df = DataFrame({"A": ["a", "b", "c"], "B": [0, 1, 2], "C": [np.nan, 1.1, 2.2]}) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + df = DataFrame( + { + "A": ["a", "b", "c"], + "B": [np.nan, np.nan, np.nan], + "C": np.random.rand(3), + } + ) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + def test_reset_index_with_datetimeindex_cols(self): + # GH#5818 + df = DataFrame( + [[1, 2], [3, 4]], + columns=date_range("1/1/2013", "1/2/2013"), + index=["A", "B"], + ) + + result = df.reset_index() + expected = DataFrame( + [["A", 1, 2], ["B", 3, 4]], + columns=["index", datetime(2013, 1, 1), datetime(2013, 1, 2)], + ) + tm.assert_frame_equal(result, expected) + + def test_reset_index_range(self): + # GH#12071 + df = DataFrame([[0, 0], [1, 1]], columns=["A", "B"], index=RangeIndex(stop=2)) + result = df.reset_index() + assert isinstance(result.index, RangeIndex) + expected = DataFrame( + [[0, 0, 0], [1, 1, 1]], + columns=["index", "A", "B"], + index=RangeIndex(stop=2), + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 0c19a38bb5fa2..751ed1dfdd847 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -1,4 +1,3 @@ -from collections import ChainMap from datetime import datetime, timedelta import inspect @@ -18,7 +17,6 @@ Index, IntervalIndex, MultiIndex, - RangeIndex, Series, Timestamp, cut, @@ -533,30 +531,6 @@ def test_convert_dti_to_series(self): df.pop("ts") tm.assert_frame_equal(df, expected) - def test_reset_index_tz(self, tz_aware_fixture): - # GH 3950 - # reset_index with single level - tz = tz_aware_fixture - idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx") - df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx) - - expected = DataFrame( - { - "idx": [ - datetime(2011, 1, 1), - datetime(2011, 1, 2), - datetime(2011, 1, 3), - datetime(2011, 1, 4), - datetime(2011, 1, 5), - ], - "a": range(5), - "b": ["A", "B", "C", "D", "E"], - }, - columns=["idx", "a", "b"], - ) - expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz)) - tm.assert_frame_equal(df.reset_index(), expected) - def test_set_index_timezone(self): # GH 12358 # tz-aware Series should retain the tz @@ -583,17 +557,6 @@ def test_set_index_dst(self): exp = DataFrame({"b": [3, 4, 5]}, index=exp_index) tm.assert_frame_equal(res, exp) - def test_reset_index_with_intervals(self): - idx = IntervalIndex.from_breaks(np.arange(11), name="x") - original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]] - - result = original.set_index("x") - expected = DataFrame({"y": np.arange(10)}, index=idx) - tm.assert_frame_equal(result, expected) - - result2 = result.reset_index() - tm.assert_frame_equal(result2, original) - def test_set_index_multiindexcolumns(self): columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)]) df = DataFrame(np.random.randn(3, 3), columns=columns) @@ -652,65 +615,6 @@ def test_dti_set_index_reindex(self): # Renaming - def test_rename(self, float_frame): - mapping = {"A": "a", "B": "b", "C": "c", "D": "d"} - - renamed = float_frame.rename(columns=mapping) - renamed2 = float_frame.rename(columns=str.lower) - - tm.assert_frame_equal(renamed, renamed2) - tm.assert_frame_equal( - renamed2.rename(columns=str.upper), float_frame, check_names=False - ) - - # index - data = {"A": {"foo": 0, "bar": 1}} - - # gets sorted alphabetical - df = DataFrame(data) - renamed = df.rename(index={"foo": "bar", "bar": "foo"}) - tm.assert_index_equal(renamed.index, Index(["foo", "bar"])) - - renamed = df.rename(index=str.upper) - tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"])) - - # have to pass something - with pytest.raises(TypeError, match="must pass an index to rename"): - float_frame.rename() - - # partial columns - renamed = float_frame.rename(columns={"C": "foo", "D": "bar"}) - tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"])) - - # other axis - renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"}) - tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"])) - - # index with name - index = Index(["foo", "bar"], name="name") - renamer = DataFrame(data, index=index) - renamed = renamer.rename(index={"foo": "bar", "bar": "foo"}) - tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name")) - assert renamed.index.name == renamer.index.name - - @pytest.mark.parametrize( - "args,kwargs", - [ - ((ChainMap({"A": "a"}, {"B": "b"}),), dict(axis="columns")), - ((), dict(columns=ChainMap({"A": "a"}, {"B": "b"}))), - ], - ) - def test_rename_chainmap(self, args, kwargs): - # see gh-23859 - colAData = range(1, 11) - colBdata = np.random.randn(10) - - df = DataFrame({"A": colAData, "B": colBdata}) - result = df.rename(*args, **kwargs) - - expected = DataFrame({"a": colAData, "b": colBdata}) - tm.assert_frame_equal(result, expected) - def test_rename_axis_inplace(self, float_frame): # GH 15704 expected = float_frame.rename_axis("foo") @@ -785,168 +689,6 @@ def test_rename_axis_mapper(self): with pytest.raises(TypeError, match="bogus"): df.rename_axis(bogus=None) - @pytest.mark.parametrize( - "kwargs, rename_index, rename_columns", - [ - ({"mapper": None, "axis": 0}, True, False), - ({"mapper": None, "axis": 1}, False, True), - ({"index": None}, True, False), - ({"columns": None}, False, True), - ({"index": None, "columns": None}, True, True), - ({}, False, False), - ], - ) - def test_rename_axis_none(self, kwargs, rename_index, rename_columns): - # GH 25034 - index = Index(list("abc"), name="foo") - columns = Index(["col1", "col2"], name="bar") - data = np.arange(6).reshape(3, 2) - df = DataFrame(data, index, columns) - - result = df.rename_axis(**kwargs) - expected_index = index.rename(None) if rename_index else index - expected_columns = columns.rename(None) if rename_columns else columns - expected = DataFrame(data, expected_index, expected_columns) - tm.assert_frame_equal(result, expected) - - def test_rename_multiindex(self): - - tuples_index = [("foo1", "bar1"), ("foo2", "bar2")] - tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")] - index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"]) - columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"]) - df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns) - - # - # without specifying level -> across all levels - - renamed = df.rename( - index={"foo1": "foo3", "bar2": "bar3"}, - columns={"fizz1": "fizz3", "buzz2": "buzz3"}, - ) - new_index = MultiIndex.from_tuples( - [("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"] - ) - new_columns = MultiIndex.from_tuples( - [("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"] - ) - tm.assert_index_equal(renamed.index, new_index) - tm.assert_index_equal(renamed.columns, new_columns) - assert renamed.index.names == df.index.names - assert renamed.columns.names == df.columns.names - - # - # with specifying a level (GH13766) - - # dict - new_columns = MultiIndex.from_tuples( - [("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"] - ) - renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0) - tm.assert_index_equal(renamed.columns, new_columns) - renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz") - tm.assert_index_equal(renamed.columns, new_columns) - - new_columns = MultiIndex.from_tuples( - [("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"] - ) - renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1) - tm.assert_index_equal(renamed.columns, new_columns) - renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz") - tm.assert_index_equal(renamed.columns, new_columns) - - # function - func = str.upper - new_columns = MultiIndex.from_tuples( - [("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"] - ) - renamed = df.rename(columns=func, level=0) - tm.assert_index_equal(renamed.columns, new_columns) - renamed = df.rename(columns=func, level="fizz") - tm.assert_index_equal(renamed.columns, new_columns) - - new_columns = MultiIndex.from_tuples( - [("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"] - ) - renamed = df.rename(columns=func, level=1) - tm.assert_index_equal(renamed.columns, new_columns) - renamed = df.rename(columns=func, level="buzz") - tm.assert_index_equal(renamed.columns, new_columns) - - # index - new_index = MultiIndex.from_tuples( - [("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"] - ) - renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0) - tm.assert_index_equal(renamed.index, new_index) - - def test_rename_nocopy(self, float_frame): - renamed = float_frame.rename(columns={"C": "foo"}, copy=False) - renamed["foo"] = 1.0 - assert (float_frame["C"] == 1.0).all() - - def test_rename_inplace(self, float_frame): - float_frame.rename(columns={"C": "foo"}) - assert "C" in float_frame - assert "foo" not in float_frame - - c_id = id(float_frame["C"]) - float_frame = float_frame.copy() - float_frame.rename(columns={"C": "foo"}, inplace=True) - - assert "C" not in float_frame - assert "foo" in float_frame - assert id(float_frame["foo"]) != c_id - - def test_rename_bug(self): - # GH 5344 - # rename set ref_locs, and set_index was not resetting - df = DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}) - df = df.rename(columns={0: "a"}) - df = df.rename(columns={1: "b"}) - df = df.set_index(["a", "b"]) - df.columns = ["2001-01-01"] - expected = DataFrame( - [[1], [2]], - index=MultiIndex.from_tuples( - [("foo", "bah"), ("bar", "bas")], names=["a", "b"] - ), - columns=["2001-01-01"], - ) - tm.assert_frame_equal(df, expected) - - def test_rename_bug2(self): - # GH 19497 - # rename was changing Index to MultiIndex if Index contained tuples - - df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=["a"]) - df = df.rename({(1, 1): (5, 4)}, axis="index") - expected = DataFrame( - data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=["a"] - ) - tm.assert_frame_equal(df, expected) - - def test_rename_errors_raises(self): - df = DataFrame(columns=["A", "B", "C", "D"]) - with pytest.raises(KeyError, match="'E'] not found in axis"): - df.rename(columns={"A": "a", "E": "e"}, errors="raise") - - @pytest.mark.parametrize( - "mapper, errors, expected_columns", - [ - ({"A": "a", "E": "e"}, "ignore", ["a", "B", "C", "D"]), - ({"A": "a"}, "raise", ["a", "B", "C", "D"]), - (str.lower, "raise", ["a", "b", "c", "d"]), - ], - ) - def test_rename_errors(self, mapper, errors, expected_columns): - # GH 13473 - # rename now works with errors parameter - df = DataFrame(columns=["A", "B", "C", "D"]) - result = df.rename(columns=mapper, errors=errors) - expected = DataFrame(columns=expected_columns) - tm.assert_frame_equal(result, expected) - def test_reorder_levels(self): index = MultiIndex( levels=[["bar"], ["one", "two", "three"], [0, 1]], @@ -985,253 +727,6 @@ def test_reorder_levels(self): result = df.reorder_levels(["L0", "L0", "L0"]) tm.assert_frame_equal(result, expected) - def test_reset_index(self, float_frame): - stacked = float_frame.stack()[::2] - stacked = DataFrame({"foo": stacked, "bar": stacked}) - - names = ["first", "second"] - stacked.index.names = names - deleveled = stacked.reset_index() - for i, (lev, level_codes) in enumerate( - zip(stacked.index.levels, stacked.index.codes) - ): - values = lev.take(level_codes) - name = names[i] - tm.assert_index_equal(values, Index(deleveled[name])) - - stacked.index.names = [None, None] - deleveled2 = stacked.reset_index() - tm.assert_series_equal( - deleveled["first"], deleveled2["level_0"], check_names=False - ) - tm.assert_series_equal( - deleveled["second"], deleveled2["level_1"], check_names=False - ) - - # default name assigned - rdf = float_frame.reset_index() - exp = Series(float_frame.index.values, name="index") - tm.assert_series_equal(rdf["index"], exp) - - # default name assigned, corner case - df = float_frame.copy() - df["index"] = "foo" - rdf = df.reset_index() - exp = Series(float_frame.index.values, name="level_0") - tm.assert_series_equal(rdf["level_0"], exp) - - # but this is ok - float_frame.index.name = "index" - deleveled = float_frame.reset_index() - tm.assert_series_equal(deleveled["index"], Series(float_frame.index)) - tm.assert_index_equal(deleveled.index, Index(np.arange(len(deleveled)))) - - # preserve column names - float_frame.columns.name = "columns" - resetted = float_frame.reset_index() - assert resetted.columns.name == "columns" - - # only remove certain columns - df = float_frame.reset_index().set_index(["index", "A", "B"]) - rs = df.reset_index(["A", "B"]) - - # TODO should reset_index check_names ? - tm.assert_frame_equal(rs, float_frame, check_names=False) - - rs = df.reset_index(["index", "A", "B"]) - tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False) - - rs = df.reset_index(["index", "A", "B"]) - tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False) - - rs = df.reset_index("A") - xp = float_frame.reset_index().set_index(["index", "B"]) - tm.assert_frame_equal(rs, xp, check_names=False) - - # test resetting in place - df = float_frame.copy() - resetted = float_frame.reset_index() - df.reset_index(inplace=True) - tm.assert_frame_equal(df, resetted, check_names=False) - - df = float_frame.reset_index().set_index(["index", "A", "B"]) - rs = df.reset_index("A", drop=True) - xp = float_frame.copy() - del xp["A"] - xp = xp.set_index(["B"], append=True) - tm.assert_frame_equal(rs, xp, check_names=False) - - def test_reset_index_name(self): - df = DataFrame( - [[1, 2, 3, 4], [5, 6, 7, 8]], - columns=["A", "B", "C", "D"], - index=Index(range(2), name="x"), - ) - assert df.reset_index().index.name is None - assert df.reset_index(drop=True).index.name is None - df.reset_index(inplace=True) - assert df.index.name is None - - def test_reset_index_level(self): - df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"]) - - for levels in ["A", "B"], [0, 1]: - # With MultiIndex - result = df.set_index(["A", "B"]).reset_index(level=levels[0]) - tm.assert_frame_equal(result, df.set_index("B")) - - result = df.set_index(["A", "B"]).reset_index(level=levels[:1]) - tm.assert_frame_equal(result, df.set_index("B")) - - result = df.set_index(["A", "B"]).reset_index(level=levels) - tm.assert_frame_equal(result, df) - - result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True) - tm.assert_frame_equal(result, df[["C", "D"]]) - - # With single-level Index (GH 16263) - result = df.set_index("A").reset_index(level=levels[0]) - tm.assert_frame_equal(result, df) - - result = df.set_index("A").reset_index(level=levels[:1]) - tm.assert_frame_equal(result, df) - - result = df.set_index(["A"]).reset_index(level=levels[0], drop=True) - tm.assert_frame_equal(result, df[["B", "C", "D"]]) - - # Missing levels - for both MultiIndex and single-level Index: - for idx_lev in ["A", "B"], ["A"]: - with pytest.raises(KeyError, match=r"(L|l)evel \(?E\)?"): - df.set_index(idx_lev).reset_index(level=["A", "E"]) - with pytest.raises(IndexError, match="Too many levels"): - df.set_index(idx_lev).reset_index(level=[0, 1, 2]) - - def test_reset_index_right_dtype(self): - time = np.arange(0.0, 10, np.sqrt(2) / 2) - s1 = Series( - (9.81 * time ** 2) / 2, index=Index(time, name="time"), name="speed" - ) - df = DataFrame(s1) - - resetted = s1.reset_index() - assert resetted["time"].dtype == np.float64 - - resetted = df.reset_index() - assert resetted["time"].dtype == np.float64 - - def test_reset_index_multiindex_col(self): - vals = np.random.randn(3, 3).astype(object) - idx = ["x", "y", "z"] - full = np.hstack(([[x] for x in idx], vals)) - df = DataFrame( - vals, - Index(idx, name="a"), - columns=[["b", "b", "c"], ["mean", "median", "mean"]], - ) - rs = df.reset_index() - xp = DataFrame( - full, columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]] - ) - tm.assert_frame_equal(rs, xp) - - rs = df.reset_index(col_fill=None) - xp = DataFrame( - full, columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]] - ) - tm.assert_frame_equal(rs, xp) - - rs = df.reset_index(col_level=1, col_fill="blah") - xp = DataFrame( - full, columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]] - ) - tm.assert_frame_equal(rs, xp) - - df = DataFrame( - vals, - MultiIndex.from_arrays([[0, 1, 2], ["x", "y", "z"]], names=["d", "a"]), - columns=[["b", "b", "c"], ["mean", "median", "mean"]], - ) - rs = df.reset_index("a") - xp = DataFrame( - full, - Index([0, 1, 2], name="d"), - columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]], - ) - tm.assert_frame_equal(rs, xp) - - rs = df.reset_index("a", col_fill=None) - xp = DataFrame( - full, - Index(range(3), name="d"), - columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]], - ) - tm.assert_frame_equal(rs, xp) - - rs = df.reset_index("a", col_fill="blah", col_level=1) - xp = DataFrame( - full, - Index(range(3), name="d"), - columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]], - ) - tm.assert_frame_equal(rs, xp) - - def test_reset_index_multiindex_nan(self): - # GH6322, testing reset_index on MultiIndexes - # when we have a nan or all nan - df = DataFrame( - {"A": ["a", "b", "c"], "B": [0, 1, np.nan], "C": np.random.rand(3)} - ) - rs = df.set_index(["A", "B"]).reset_index() - tm.assert_frame_equal(rs, df) - - df = DataFrame( - {"A": [np.nan, "b", "c"], "B": [0, 1, 2], "C": np.random.rand(3)} - ) - rs = df.set_index(["A", "B"]).reset_index() - tm.assert_frame_equal(rs, df) - - df = DataFrame({"A": ["a", "b", "c"], "B": [0, 1, 2], "C": [np.nan, 1.1, 2.2]}) - rs = df.set_index(["A", "B"]).reset_index() - tm.assert_frame_equal(rs, df) - - df = DataFrame( - { - "A": ["a", "b", "c"], - "B": [np.nan, np.nan, np.nan], - "C": np.random.rand(3), - } - ) - rs = df.set_index(["A", "B"]).reset_index() - tm.assert_frame_equal(rs, df) - - def test_reset_index_with_datetimeindex_cols(self): - # GH5818 - # - df = DataFrame( - [[1, 2], [3, 4]], - columns=date_range("1/1/2013", "1/2/2013"), - index=["A", "B"], - ) - - result = df.reset_index() - expected = DataFrame( - [["A", 1, 2], ["B", 3, 4]], - columns=["index", datetime(2013, 1, 1), datetime(2013, 1, 2)], - ) - tm.assert_frame_equal(result, expected) - - def test_reset_index_range(self): - # GH 12071 - df = DataFrame([[0, 0], [1, 1]], columns=["A", "B"], index=RangeIndex(stop=2)) - result = df.reset_index() - assert isinstance(result.index, RangeIndex) - expected = DataFrame( - [[0, 0, 0], [1, 1, 1]], - columns=["index", "A", "B"], - index=RangeIndex(stop=2), - ) - tm.assert_frame_equal(result, expected) - def test_set_index_names(self): df = tm.makeDataFrame() df.index.name = "name" @@ -1262,92 +757,6 @@ def test_set_index_names(self): # Check equality tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2) - def test_rename_objects(self, float_string_frame): - renamed = float_string_frame.rename(columns=str.upper) - - assert "FOO" in renamed - assert "foo" not in renamed - - def test_rename_axis_style(self): - # https://github.com/pandas-dev/pandas/issues/12392 - df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["X", "Y"]) - expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"]) - - result = df.rename(str.lower, axis=1) - tm.assert_frame_equal(result, expected) - - result = df.rename(str.lower, axis="columns") - tm.assert_frame_equal(result, expected) - - result = df.rename({"A": "a", "B": "b"}, axis=1) - tm.assert_frame_equal(result, expected) - - result = df.rename({"A": "a", "B": "b"}, axis="columns") - tm.assert_frame_equal(result, expected) - - # Index - expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"]) - result = df.rename(str.lower, axis=0) - tm.assert_frame_equal(result, expected) - - result = df.rename(str.lower, axis="index") - tm.assert_frame_equal(result, expected) - - result = df.rename({"X": "x", "Y": "y"}, axis=0) - tm.assert_frame_equal(result, expected) - - result = df.rename({"X": "x", "Y": "y"}, axis="index") - tm.assert_frame_equal(result, expected) - - result = df.rename(mapper=str.lower, axis="index") - tm.assert_frame_equal(result, expected) - - def test_rename_mapper_multi(self): - df = DataFrame({"A": ["a", "b"], "B": ["c", "d"], "C": [1, 2]}).set_index( - ["A", "B"] - ) - result = df.rename(str.upper) - expected = df.rename(index=str.upper) - tm.assert_frame_equal(result, expected) - - def test_rename_positional_named(self): - # https://github.com/pandas-dev/pandas/issues/12392 - df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"]) - result = df.rename(index=str.lower, columns=str.upper) - expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"]) - tm.assert_frame_equal(result, expected) - - def test_rename_axis_style_raises(self): - # see gh-12392 - df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"]) - - # Named target and axis - over_spec_msg = "Cannot specify both 'axis' and any of 'index' or 'columns'" - with pytest.raises(TypeError, match=over_spec_msg): - df.rename(index=str.lower, axis=1) - - with pytest.raises(TypeError, match=over_spec_msg): - df.rename(index=str.lower, axis="columns") - - with pytest.raises(TypeError, match=over_spec_msg): - df.rename(columns=str.lower, axis="columns") - - with pytest.raises(TypeError, match=over_spec_msg): - df.rename(index=str.lower, axis=0) - - # Multiple targets and axis - with pytest.raises(TypeError, match=over_spec_msg): - df.rename(str.lower, index=str.lower, axis="columns") - - # Too many targets - over_spec_msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'" - with pytest.raises(TypeError, match=over_spec_msg): - df.rename(str.lower, index=str.lower, columns=str.lower) - - # Duplicates - with pytest.raises(TypeError, match="multiple values"): - df.rename(id, mapper=id) - def test_reindex_api_equivalence(self): # equivalence of the labels/axis and index/columns API's df = DataFrame( @@ -1376,43 +785,6 @@ def test_reindex_api_equivalence(self): for res in [res2, res3]: tm.assert_frame_equal(res1, res) - def test_rename_positional_raises(self): - # GH 29136 - df = DataFrame(columns=["A", "B"]) - msg = r"rename\(\) takes from 1 to 2 positional arguments" - - with pytest.raises(TypeError, match=msg): - df.rename(None, str.lower) - - def test_rename_no_mappings_raises(self): - # GH 29136 - df = DataFrame([[1]]) - msg = "must pass an index to rename" - with pytest.raises(TypeError, match=msg): - df.rename() - - with pytest.raises(TypeError, match=msg): - df.rename(None, index=None) - - with pytest.raises(TypeError, match=msg): - df.rename(None, columns=None) - - with pytest.raises(TypeError, match=msg): - df.rename(None, columns=None, index=None) - - def test_rename_mapper_and_positional_arguments_raises(self): - # GH 29136 - df = DataFrame([[1]]) - msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'" - with pytest.raises(TypeError, match=msg): - df.rename({}, index={}) - - with pytest.raises(TypeError, match=msg): - df.rename({}, columns={}) - - with pytest.raises(TypeError, match=msg): - df.rename({}, columns={}, index={}) - def test_assign_columns(self, float_frame): float_frame["hi"] = "there" diff --git a/pandas/tests/series/methods/test_rename.py b/pandas/tests/series/methods/test_rename.py new file mode 100644 index 0000000000000..60182f509e657 --- /dev/null +++ b/pandas/tests/series/methods/test_rename.py @@ -0,0 +1,91 @@ +from datetime import datetime + +import numpy as np + +from pandas import Index, Series +import pandas._testing as tm + + +class TestRename: + def test_rename(self, datetime_series): + ts = datetime_series + renamer = lambda x: x.strftime("%Y%m%d") + renamed = ts.rename(renamer) + assert renamed.index[0] == renamer(ts.index[0]) + + # dict + rename_dict = dict(zip(ts.index, renamed.index)) + renamed2 = ts.rename(rename_dict) + tm.assert_series_equal(renamed, renamed2) + + # partial dict + s = Series(np.arange(4), index=["a", "b", "c", "d"], dtype="int64") + renamed = s.rename({"b": "foo", "d": "bar"}) + tm.assert_index_equal(renamed.index, Index(["a", "foo", "c", "bar"])) + + # index with name + renamer = Series( + np.arange(4), index=Index(["a", "b", "c", "d"], name="name"), dtype="int64" + ) + renamed = renamer.rename({}) + assert renamed.index.name == renamer.index.name + + def test_rename_by_series(self): + s = Series(range(5), name="foo") + renamer = Series({1: 10, 2: 20}) + result = s.rename(renamer) + expected = Series(range(5), index=[0, 10, 20, 3, 4], name="foo") + tm.assert_series_equal(result, expected) + + def test_rename_set_name(self): + s = Series(range(4), index=list("abcd")) + for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]: + result = s.rename(name) + assert result.name == name + tm.assert_numpy_array_equal(result.index.values, s.index.values) + assert s.name is None + + def test_rename_set_name_inplace(self): + s = Series(range(3), index=list("abc")) + for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]: + s.rename(name, inplace=True) + assert s.name == name + + exp = np.array(["a", "b", "c"], dtype=np.object_) + tm.assert_numpy_array_equal(s.index.values, exp) + + def test_rename_axis_supported(self): + # Supporting axis for compatibility, detailed in GH-18589 + s = Series(range(5)) + s.rename({}, axis=0) + s.rename({}, axis="index") + # FIXME: dont leave commenred-out + # TODO: clean up shared index validation + # with pytest.raises(ValueError, match="No axis named 5"): + # s.rename({}, axis=5) + + def test_rename_inplace(self, datetime_series): + renamer = lambda x: x.strftime("%Y%m%d") + expected = renamer(datetime_series.index[0]) + + datetime_series.rename(renamer, inplace=True) + assert datetime_series.index[0] == expected + + def test_rename_with_custom_indexer(self): + # GH 27814 + class MyIndexer: + pass + + ix = MyIndexer() + s = Series([1, 2, 3]).rename(ix) + assert s.name is ix + + def test_rename_with_custom_indexer_inplace(self): + # GH 27814 + class MyIndexer: + pass + + ix = MyIndexer() + s = Series([1, 2, 3]) + s.rename(ix, inplace=True) + assert s.name is ix diff --git a/pandas/tests/series/methods/test_reset_index.py b/pandas/tests/series/methods/test_reset_index.py new file mode 100644 index 0000000000000..f0c4895ad7c10 --- /dev/null +++ b/pandas/tests/series/methods/test_reset_index.py @@ -0,0 +1,110 @@ +import numpy as np +import pytest + +from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series +import pandas._testing as tm + + +class TestResetIndex: + def test_reset_index(self): + df = tm.makeDataFrame()[:5] + ser = df.stack() + ser.index.names = ["hash", "category"] + + ser.name = "value" + df = ser.reset_index() + assert "value" in df + + df = ser.reset_index(name="value2") + assert "value2" in df + + # check inplace + s = ser.reset_index(drop=True) + s2 = ser + s2.reset_index(drop=True, inplace=True) + tm.assert_series_equal(s, s2) + + # level + index = MultiIndex( + levels=[["bar"], ["one", "two", "three"], [0, 1]], + codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]], + ) + s = Series(np.random.randn(6), index=index) + rs = s.reset_index(level=1) + assert len(rs.columns) == 2 + + rs = s.reset_index(level=[0, 2], drop=True) + tm.assert_index_equal(rs.index, Index(index.get_level_values(1))) + assert isinstance(rs, Series) + + def test_reset_index_name(self): + s = Series([1, 2, 3], index=Index(range(3), name="x")) + assert s.reset_index().index.name is None + assert s.reset_index(drop=True).index.name is None + + def test_reset_index_level(self): + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"]) + + for levels in ["A", "B"], [0, 1]: + # With MultiIndex + s = df.set_index(["A", "B"])["C"] + + result = s.reset_index(level=levels[0]) + tm.assert_frame_equal(result, df.set_index("B")) + + result = s.reset_index(level=levels[:1]) + tm.assert_frame_equal(result, df.set_index("B")) + + result = s.reset_index(level=levels) + tm.assert_frame_equal(result, df) + + result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True) + tm.assert_frame_equal(result, df[["C"]]) + + with pytest.raises(KeyError, match="Level E "): + s.reset_index(level=["A", "E"]) + + # With single-level Index + s = df.set_index("A")["B"] + + result = s.reset_index(level=levels[0]) + tm.assert_frame_equal(result, df[["A", "B"]]) + + result = s.reset_index(level=levels[:1]) + tm.assert_frame_equal(result, df[["A", "B"]]) + + result = s.reset_index(level=levels[0], drop=True) + tm.assert_series_equal(result, df["B"]) + + with pytest.raises(IndexError, match="Too many levels"): + s.reset_index(level=[0, 1, 2]) + + # Check that .reset_index([],drop=True) doesn't fail + result = Series(range(4)).reset_index([], drop=True) + expected = Series(range(4)) + tm.assert_series_equal(result, expected) + + def test_reset_index_range(self): + # GH 12071 + s = Series(range(2), name="A", dtype="int64") + series_result = s.reset_index() + assert isinstance(series_result.index, RangeIndex) + series_expected = DataFrame( + [[0, 0], [1, 1]], columns=["index", "A"], index=RangeIndex(stop=2) + ) + tm.assert_frame_equal(series_result, series_expected) + + def test_reset_index_drop_errors(self): + # GH 20925 + + # KeyError raised for series index when passed level name is missing + s = Series(range(4)) + with pytest.raises(KeyError, match="does not match index name"): + s.reset_index("wrong", drop=True) + with pytest.raises(KeyError, match="does not match index name"): + s.reset_index("wrong") + + # KeyError raised for series when level to be dropped is missing + s = Series(range(4), index=MultiIndex.from_product([[1, 2]] * 2)) + with pytest.raises(KeyError, match="not found"): + s.reset_index("wrong", drop=True) diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index 71f6681e8c955..9be8744d7223f 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -3,7 +3,7 @@ import numpy as np import pytest -from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series +from pandas import Index, MultiIndex, Series import pandas._testing as tm @@ -31,62 +31,6 @@ def test_setindex(self, string_series): # Renaming - def test_rename(self, datetime_series): - ts = datetime_series - renamer = lambda x: x.strftime("%Y%m%d") - renamed = ts.rename(renamer) - assert renamed.index[0] == renamer(ts.index[0]) - - # dict - rename_dict = dict(zip(ts.index, renamed.index)) - renamed2 = ts.rename(rename_dict) - tm.assert_series_equal(renamed, renamed2) - - # partial dict - s = Series(np.arange(4), index=["a", "b", "c", "d"], dtype="int64") - renamed = s.rename({"b": "foo", "d": "bar"}) - tm.assert_index_equal(renamed.index, Index(["a", "foo", "c", "bar"])) - - # index with name - renamer = Series( - np.arange(4), index=Index(["a", "b", "c", "d"], name="name"), dtype="int64" - ) - renamed = renamer.rename({}) - assert renamed.index.name == renamer.index.name - - def test_rename_by_series(self): - s = Series(range(5), name="foo") - renamer = Series({1: 10, 2: 20}) - result = s.rename(renamer) - expected = Series(range(5), index=[0, 10, 20, 3, 4], name="foo") - tm.assert_series_equal(result, expected) - - def test_rename_set_name(self): - s = Series(range(4), index=list("abcd")) - for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]: - result = s.rename(name) - assert result.name == name - tm.assert_numpy_array_equal(result.index.values, s.index.values) - assert s.name is None - - def test_rename_set_name_inplace(self): - s = Series(range(3), index=list("abc")) - for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]: - s.rename(name, inplace=True) - assert s.name == name - - exp = np.array(["a", "b", "c"], dtype=np.object_) - tm.assert_numpy_array_equal(s.index.values, exp) - - def test_rename_axis_supported(self): - # Supporting axis for compatibility, detailed in GH-18589 - s = Series(range(5)) - s.rename({}, axis=0) - s.rename({}, axis="index") - # TODO: clean up shared index validation - # with pytest.raises(ValueError, match="No axis named 5"): - # s.rename({}, axis=5) - def test_set_name_attribute(self): s = Series([1, 2, 3]) s2 = Series([1, 2, 3], name="bar") @@ -103,13 +47,6 @@ def test_set_name(self): assert s.name is None assert s is not s2 - def test_rename_inplace(self, datetime_series): - renamer = lambda x: x.strftime("%Y%m%d") - expected = renamer(datetime_series.index[0]) - - datetime_series.rename(renamer, inplace=True) - assert datetime_series.index[0] == expected - def test_set_index_makes_timeseries(self): idx = tm.makeDateIndex(10) @@ -117,94 +54,6 @@ def test_set_index_makes_timeseries(self): s.index = idx assert s.index.is_all_dates - def test_reset_index(self): - df = tm.makeDataFrame()[:5] - ser = df.stack() - ser.index.names = ["hash", "category"] - - ser.name = "value" - df = ser.reset_index() - assert "value" in df - - df = ser.reset_index(name="value2") - assert "value2" in df - - # check inplace - s = ser.reset_index(drop=True) - s2 = ser - s2.reset_index(drop=True, inplace=True) - tm.assert_series_equal(s, s2) - - # level - index = MultiIndex( - levels=[["bar"], ["one", "two", "three"], [0, 1]], - codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]], - ) - s = Series(np.random.randn(6), index=index) - rs = s.reset_index(level=1) - assert len(rs.columns) == 2 - - rs = s.reset_index(level=[0, 2], drop=True) - tm.assert_index_equal(rs.index, Index(index.get_level_values(1))) - assert isinstance(rs, Series) - - def test_reset_index_name(self): - s = Series([1, 2, 3], index=Index(range(3), name="x")) - assert s.reset_index().index.name is None - assert s.reset_index(drop=True).index.name is None - - def test_reset_index_level(self): - df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"]) - - for levels in ["A", "B"], [0, 1]: - # With MultiIndex - s = df.set_index(["A", "B"])["C"] - - result = s.reset_index(level=levels[0]) - tm.assert_frame_equal(result, df.set_index("B")) - - result = s.reset_index(level=levels[:1]) - tm.assert_frame_equal(result, df.set_index("B")) - - result = s.reset_index(level=levels) - tm.assert_frame_equal(result, df) - - result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True) - tm.assert_frame_equal(result, df[["C"]]) - - with pytest.raises(KeyError, match="Level E "): - s.reset_index(level=["A", "E"]) - - # With single-level Index - s = df.set_index("A")["B"] - - result = s.reset_index(level=levels[0]) - tm.assert_frame_equal(result, df[["A", "B"]]) - - result = s.reset_index(level=levels[:1]) - tm.assert_frame_equal(result, df[["A", "B"]]) - - result = s.reset_index(level=levels[0], drop=True) - tm.assert_series_equal(result, df["B"]) - - with pytest.raises(IndexError, match="Too many levels"): - s.reset_index(level=[0, 1, 2]) - - # Check that .reset_index([],drop=True) doesn't fail - result = Series(range(4)).reset_index([], drop=True) - expected = Series(range(4)) - tm.assert_series_equal(result, expected) - - def test_reset_index_range(self): - # GH 12071 - s = Series(range(2), name="A", dtype="int64") - series_result = s.reset_index() - assert isinstance(series_result.index, RangeIndex) - series_expected = DataFrame( - [[0, 0], [1, 1]], columns=["index", "A"], index=RangeIndex(stop=2) - ) - tm.assert_frame_equal(series_result, series_expected) - def test_reorder_levels(self): index = MultiIndex( levels=[["bar"], ["one", "two", "three"], [0, 1]], @@ -268,25 +117,6 @@ def test_rename_axis_none(self, kwargs): expected = Series([1, 2, 3], index=expected_index) tm.assert_series_equal(result, expected) - def test_rename_with_custom_indexer(self): - # GH 27814 - class MyIndexer: - pass - - ix = MyIndexer() - s = Series([1, 2, 3]).rename(ix) - assert s.name is ix - - def test_rename_with_custom_indexer_inplace(self): - # GH 27814 - class MyIndexer: - pass - - ix = MyIndexer() - s = Series([1, 2, 3]) - s.rename(ix, inplace=True) - assert s.name is ix - def test_set_axis_inplace_axes(self, axis_series): # GH14636 ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64") @@ -323,21 +153,6 @@ def test_set_axis_inplace(self): with pytest.raises(ValueError, match="No axis named"): s.set_axis(list("abcd"), axis=axis, inplace=False) - def test_reset_index_drop_errors(self): - # GH 20925 - - # KeyError raised for series index when passed level name is missing - s = Series(range(4)) - with pytest.raises(KeyError, match="does not match index name"): - s.reset_index("wrong", drop=True) - with pytest.raises(KeyError, match="does not match index name"): - s.reset_index("wrong") - - # KeyError raised for series when level to be dropped is missing - s = Series(range(4), index=MultiIndex.from_product([[1, 2]] * 2)) - with pytest.raises(KeyError, match="not found"): - s.reset_index("wrong", drop=True) - def test_droplevel(self): # GH20342 ser = Series([1, 2, 3, 4])
https://api.github.com/repos/pandas-dev/pandas/pulls/32227
2020-02-25T00:32:43Z
2020-02-25T20:41:13Z
2020-02-25T20:41:13Z
2020-02-25T20:47:36Z
REF/TST: method-specific files for Series timeseries methods
diff --git a/pandas/tests/series/methods/test_asfreq.py b/pandas/tests/series/methods/test_asfreq.py index 05ec56cf02182..d94b60384a07c 100644 --- a/pandas/tests/series/methods/test_asfreq.py +++ b/pandas/tests/series/methods/test_asfreq.py @@ -1,8 +1,13 @@ +from datetime import datetime + import numpy as np +import pytest -from pandas import DataFrame, Series, period_range +from pandas import DataFrame, DatetimeIndex, Series, date_range, period_range import pandas._testing as tm +from pandas.tseries.offsets import BDay, BMonthEnd + class TestAsFreq: # TODO: de-duplicate/parametrize or move DataFrame test @@ -21,3 +26,79 @@ def test_asfreq_ts(self): result = ts.asfreq("D", how="start") assert len(result) == len(ts) tm.assert_index_equal(result.index, index.asfreq("D", how="start")) + + @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) + def test_tz_aware_asfreq(self, tz): + dr = date_range("2011-12-01", "2012-07-20", freq="D", tz=tz) + + ser = Series(np.random.randn(len(dr)), index=dr) + + # it works! + ser.asfreq("T") + + def test_asfreq(self): + ts = Series( + [0.0, 1.0, 2.0], + index=[ + datetime(2009, 10, 30), + datetime(2009, 11, 30), + datetime(2009, 12, 31), + ], + ) + + daily_ts = ts.asfreq("B") + monthly_ts = daily_ts.asfreq("BM") + tm.assert_series_equal(monthly_ts, ts) + + daily_ts = ts.asfreq("B", method="pad") + monthly_ts = daily_ts.asfreq("BM") + tm.assert_series_equal(monthly_ts, ts) + + daily_ts = ts.asfreq(BDay()) + monthly_ts = daily_ts.asfreq(BMonthEnd()) + tm.assert_series_equal(monthly_ts, ts) + + result = ts[:0].asfreq("M") + assert len(result) == 0 + assert result is not ts + + daily_ts = ts.asfreq("D", fill_value=-1) + result = daily_ts.value_counts().sort_index() + expected = Series([60, 1, 1, 1], index=[-1.0, 2.0, 1.0, 0.0]).sort_index() + tm.assert_series_equal(result, expected) + + def test_asfreq_datetimeindex_empty_series(self): + # GH#14320 + index = DatetimeIndex(["2016-09-29 11:00"]) + expected = Series(index=index, dtype=object).asfreq("H") + result = Series([3], index=index.copy()).asfreq("H") + tm.assert_index_equal(expected.index, result.index) + + def test_asfreq_keep_index_name(self): + # GH#9854 + index_name = "bar" + index = date_range("20130101", periods=20, name=index_name) + df = DataFrame(list(range(20)), columns=["foo"], index=index) + + assert index_name == df.index.name + assert index_name == df.asfreq("10D").index.name + + def test_asfreq_normalize(self): + rng = date_range("1/1/2000 09:30", periods=20) + norm = date_range("1/1/2000", periods=20) + vals = np.random.randn(20) + ts = Series(vals, index=rng) + + result = ts.asfreq("D", normalize=True) + norm = date_range("1/1/2000", periods=20) + expected = Series(vals, index=norm) + + tm.assert_series_equal(result, expected) + + vals = np.random.randn(20, 3) + ts = DataFrame(vals, index=rng) + + result = ts.asfreq("D", normalize=True) + expected = DataFrame(vals, index=norm) + + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/methods/test_at_time.py b/pandas/tests/series/methods/test_at_time.py new file mode 100644 index 0000000000000..d9985cf33776a --- /dev/null +++ b/pandas/tests/series/methods/test_at_time.py @@ -0,0 +1,72 @@ +from datetime import time + +import numpy as np +import pytest + +from pandas._libs.tslibs import timezones + +from pandas import DataFrame, Series, date_range +import pandas._testing as tm + + +class TestAtTime: + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_localized_at_time(self, tzstr): + tz = timezones.maybe_get_tz(tzstr) + + rng = date_range("4/16/2012", "5/1/2012", freq="H") + ts = Series(np.random.randn(len(rng)), index=rng) + + ts_local = ts.tz_localize(tzstr) + + result = ts_local.at_time(time(10, 0)) + expected = ts.at_time(time(10, 0)).tz_localize(tzstr) + tm.assert_series_equal(result, expected) + assert timezones.tz_compare(result.index.tz, tz) + + def test_at_time(self): + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = Series(np.random.randn(len(rng)), index=rng) + rs = ts.at_time(rng[1]) + assert (rs.index.hour == rng[1].hour).all() + assert (rs.index.minute == rng[1].minute).all() + assert (rs.index.second == rng[1].second).all() + + result = ts.at_time("9:30") + expected = ts.at_time(time(9, 30)) + tm.assert_series_equal(result, expected) + + df = DataFrame(np.random.randn(len(rng), 3), index=rng) + + result = ts[time(9, 30)] + result_df = df.loc[time(9, 30)] + expected = ts[(rng.hour == 9) & (rng.minute == 30)] + exp_df = df[(rng.hour == 9) & (rng.minute == 30)] + + tm.assert_series_equal(result, expected) + tm.assert_frame_equal(result_df, exp_df) + + chunk = df.loc["1/4/2000":] + result = chunk.loc[time(9, 30)] + expected = result_df[-1:] + tm.assert_frame_equal(result, expected) + + # midnight, everything + rng = date_range("1/1/2000", "1/31/2000") + ts = Series(np.random.randn(len(rng)), index=rng) + + result = ts.at_time(time(0, 0)) + tm.assert_series_equal(result, ts) + + # time doesn't exist + rng = date_range("1/1/2012", freq="23Min", periods=384) + ts = Series(np.random.randn(len(rng)), rng) + rs = ts.at_time("16:00") + assert len(rs) == 0 + + def test_at_time_raises(self): + # GH20725 + ser = Series("a b c".split()) + msg = "Index must be DatetimeIndex" + with pytest.raises(TypeError, match=msg): + ser.at_time("00:00") diff --git a/pandas/tests/series/methods/test_between.py b/pandas/tests/series/methods/test_between.py new file mode 100644 index 0000000000000..350a3fe6ff009 --- /dev/null +++ b/pandas/tests/series/methods/test_between.py @@ -0,0 +1,35 @@ +import numpy as np + +from pandas import Series, bdate_range, date_range, period_range +import pandas._testing as tm + + +class TestBetween: + + # TODO: redundant with test_between_datetime_values? + def test_between(self): + series = Series(date_range("1/1/2000", periods=10)) + left, right = series[[2, 7]] + + result = series.between(left, right) + expected = (series >= left) & (series <= right) + tm.assert_series_equal(result, expected) + + def test_between_datetime_values(self): + ser = Series(bdate_range("1/1/2000", periods=20).astype(object)) + ser[::2] = np.nan + + result = ser[ser.between(ser[3], ser[17])] + expected = ser[3:18].dropna() + tm.assert_series_equal(result, expected) + + result = ser[ser.between(ser[3], ser[17], inclusive=False)] + expected = ser[5:16].dropna() + tm.assert_series_equal(result, expected) + + def test_between_period_values(self): + ser = Series(period_range("2000-01-01", periods=10, freq="D")) + left, right = ser[[2, 7]] + result = ser.between(left, right) + expected = (ser >= left) & (ser <= right) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/methods/test_between_time.py b/pandas/tests/series/methods/test_between_time.py new file mode 100644 index 0000000000000..3fa26afe77a1d --- /dev/null +++ b/pandas/tests/series/methods/test_between_time.py @@ -0,0 +1,144 @@ +from datetime import datetime, time +from itertools import product + +import numpy as np +import pytest + +from pandas._libs.tslibs import timezones +import pandas.util._test_decorators as td + +from pandas import DataFrame, Series, date_range +import pandas._testing as tm + + +class TestBetweenTime: + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_localized_between_time(self, tzstr): + tz = timezones.maybe_get_tz(tzstr) + + rng = date_range("4/16/2012", "5/1/2012", freq="H") + ts = Series(np.random.randn(len(rng)), index=rng) + + ts_local = ts.tz_localize(tzstr) + + t1, t2 = time(10, 0), time(11, 0) + result = ts_local.between_time(t1, t2) + expected = ts.between_time(t1, t2).tz_localize(tzstr) + tm.assert_series_equal(result, expected) + assert timezones.tz_compare(result.index.tz, tz) + + def test_between_time(self): + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = Series(np.random.randn(len(rng)), index=rng) + stime = time(0, 0) + etime = time(1, 0) + + close_open = product([True, False], [True, False]) + for inc_start, inc_end in close_open: + filtered = ts.between_time(stime, etime, inc_start, inc_end) + exp_len = 13 * 4 + 1 + if not inc_start: + exp_len -= 5 + if not inc_end: + exp_len -= 4 + + assert len(filtered) == exp_len + for rs in filtered.index: + t = rs.time() + if inc_start: + assert t >= stime + else: + assert t > stime + + if inc_end: + assert t <= etime + else: + assert t < etime + + result = ts.between_time("00:00", "01:00") + expected = ts.between_time(stime, etime) + tm.assert_series_equal(result, expected) + + # across midnight + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = Series(np.random.randn(len(rng)), index=rng) + stime = time(22, 0) + etime = time(9, 0) + + close_open = product([True, False], [True, False]) + for inc_start, inc_end in close_open: + filtered = ts.between_time(stime, etime, inc_start, inc_end) + exp_len = (12 * 11 + 1) * 4 + 1 + if not inc_start: + exp_len -= 4 + if not inc_end: + exp_len -= 4 + + assert len(filtered) == exp_len + for rs in filtered.index: + t = rs.time() + if inc_start: + assert (t >= stime) or (t <= etime) + else: + assert (t > stime) or (t <= etime) + + if inc_end: + assert (t <= etime) or (t >= stime) + else: + assert (t < etime) or (t >= stime) + + def test_between_time_raises(self): + # GH20725 + ser = Series("a b c".split()) + msg = "Index must be DatetimeIndex" + with pytest.raises(TypeError, match=msg): + ser.between_time(start_time="00:00", end_time="12:00") + + def test_between_time_types(self): + # GH11818 + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time" + with pytest.raises(ValueError, match=msg): + rng.indexer_between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) + + frame = DataFrame({"A": 0}, index=rng) + with pytest.raises(ValueError, match=msg): + frame.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) + + series = Series(0, index=rng) + with pytest.raises(ValueError, match=msg): + series.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) + + @td.skip_if_has_locale + def test_between_time_formats(self): + # GH11818 + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame(np.random.randn(len(rng), 2), index=rng) + + strings = [ + ("2:00", "2:30"), + ("0200", "0230"), + ("2:00am", "2:30am"), + ("0200am", "0230am"), + ("2:00:00", "2:30:00"), + ("020000", "023000"), + ("2:00:00am", "2:30:00am"), + ("020000am", "023000am"), + ] + expected_length = 28 + + for time_string in strings: + assert len(ts.between_time(*time_string)) == expected_length + + def test_between_time_axis(self): + # issue 8839 + rng = date_range("1/1/2000", periods=100, freq="10min") + ts = Series(np.random.randn(len(rng)), index=rng) + stime, etime = ("08:00:00", "09:00:00") + expected_length = 7 + + assert len(ts.between_time(stime, etime)) == expected_length + assert len(ts.between_time(stime, etime, axis=0)) == expected_length + msg = "No axis named 1 for object type <class 'pandas.core.series.Series'>" + with pytest.raises(ValueError, match=msg): + ts.between_time(stime, etime, axis=1) diff --git a/pandas/tests/series/methods/test_truncate.py b/pandas/tests/series/methods/test_truncate.py index d4e2890ed8bf0..c97369b349f56 100644 --- a/pandas/tests/series/methods/test_truncate.py +++ b/pandas/tests/series/methods/test_truncate.py @@ -1,7 +1,10 @@ +from datetime import datetime + import numpy as np import pytest import pandas as pd +from pandas import Series, date_range import pandas._testing as tm from pandas.tseries.offsets import BDay @@ -76,3 +79,33 @@ def test_truncate_nonsortedindex(self): with pytest.raises(ValueError, match=msg): ts.sort_values(ascending=False).truncate(before="2011-11", after="2011-12") + + def test_truncate_datetimeindex_tz(self): + # GH 9243 + idx = date_range("4/1/2005", "4/30/2005", freq="D", tz="US/Pacific") + s = Series(range(len(idx)), index=idx) + result = s.truncate(datetime(2005, 4, 2), datetime(2005, 4, 4)) + expected = Series([1, 2, 3], index=idx[1:4]) + tm.assert_series_equal(result, expected) + + def test_truncate_periodindex(self): + # GH 17717 + idx1 = pd.PeriodIndex( + [pd.Period("2017-09-02"), pd.Period("2017-09-02"), pd.Period("2017-09-03")] + ) + series1 = pd.Series([1, 2, 3], index=idx1) + result1 = series1.truncate(after="2017-09-02") + + expected_idx1 = pd.PeriodIndex( + [pd.Period("2017-09-02"), pd.Period("2017-09-02")] + ) + tm.assert_series_equal(result1, pd.Series([1, 2], index=expected_idx1)) + + idx2 = pd.PeriodIndex( + [pd.Period("2017-09-03"), pd.Period("2017-09-02"), pd.Period("2017-09-03")] + ) + series2 = pd.Series([1, 2, 3], index=idx2) + result2 = series2.sort_index().truncate(after="2017-09-02") + + expected_idx2 = pd.PeriodIndex([pd.Period("2017-09-02")]) + tm.assert_series_equal(result2, pd.Series([2], index=expected_idx2)) diff --git a/pandas/tests/series/methods/test_tz_convert.py b/pandas/tests/series/methods/test_tz_convert.py new file mode 100644 index 0000000000000..ce348d5323e62 --- /dev/null +++ b/pandas/tests/series/methods/test_tz_convert.py @@ -0,0 +1,29 @@ +import numpy as np +import pytest + +from pandas import DatetimeIndex, Series, date_range +import pandas._testing as tm + + +class TestTZConvert: + def test_series_tz_convert(self): + rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") + ts = Series(1, index=rng) + + result = ts.tz_convert("Europe/Berlin") + assert result.index.tz.zone == "Europe/Berlin" + + # can't convert tz-naive + rng = date_range("1/1/2011", periods=200, freq="D") + ts = Series(1, index=rng) + + with pytest.raises(TypeError, match="Cannot convert tz-naive"): + ts.tz_convert("US/Eastern") + + def test_series_tz_convert_to_utc(self): + base = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC") + idx1 = base.tz_convert("Asia/Tokyo")[:2] + idx2 = base.tz_convert("US/Eastern")[1:] + + res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2) + tm.assert_series_equal(res, Series([np.nan, 3, np.nan], index=base)) diff --git a/pandas/tests/series/methods/test_tz_localize.py b/pandas/tests/series/methods/test_tz_localize.py new file mode 100644 index 0000000000000..44c55edf77c0a --- /dev/null +++ b/pandas/tests/series/methods/test_tz_localize.py @@ -0,0 +1,88 @@ +import pytest +import pytz + +from pandas._libs.tslibs import timezones + +from pandas import DatetimeIndex, NaT, Series, Timestamp, date_range +import pandas._testing as tm + + +class TestTZLocalize: + def test_series_tz_localize(self): + + rng = date_range("1/1/2011", periods=100, freq="H") + ts = Series(1, index=rng) + + result = ts.tz_localize("utc") + assert result.index.tz.zone == "UTC" + + # Can't localize if already tz-aware + rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") + ts = Series(1, index=rng) + + with pytest.raises(TypeError, match="Already tz-aware"): + ts.tz_localize("US/Eastern") + + def test_series_tz_localize_ambiguous_bool(self): + # make sure that we are correctly accepting bool values as ambiguous + + # GH#14402 + ts = Timestamp("2015-11-01 01:00:03") + expected0 = Timestamp("2015-11-01 01:00:03-0500", tz="US/Central") + expected1 = Timestamp("2015-11-01 01:00:03-0600", tz="US/Central") + + ser = Series([ts]) + expected0 = Series([expected0]) + expected1 = Series([expected1]) + + with pytest.raises(pytz.AmbiguousTimeError): + ser.dt.tz_localize("US/Central") + + result = ser.dt.tz_localize("US/Central", ambiguous=True) + tm.assert_series_equal(result, expected0) + + result = ser.dt.tz_localize("US/Central", ambiguous=[True]) + tm.assert_series_equal(result, expected0) + + result = ser.dt.tz_localize("US/Central", ambiguous=False) + tm.assert_series_equal(result, expected1) + + result = ser.dt.tz_localize("US/Central", ambiguous=[False]) + tm.assert_series_equal(result, expected1) + + @pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"]) + @pytest.mark.parametrize( + "method, exp", + [ + ["shift_forward", "2015-03-29 03:00:00"], + ["NaT", NaT], + ["raise", None], + ["foo", "invalid"], + ], + ) + def test_series_tz_localize_nonexistent(self, tz, method, exp): + # GH 8917 + n = 60 + dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min") + s = Series(1, dti) + if method == "raise": + with pytest.raises(pytz.NonExistentTimeError): + s.tz_localize(tz, nonexistent=method) + elif exp == "invalid": + with pytest.raises(ValueError): + dti.tz_localize(tz, nonexistent=method) + else: + result = s.tz_localize(tz, nonexistent=method) + expected = Series(1, index=DatetimeIndex([exp] * n, tz=tz)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_series_tz_localize_empty(self, tzstr): + # GH#2248 + ser = Series(dtype=object) + + ser2 = ser.tz_localize("utc") + assert ser2.index.tz == pytz.utc + + ser2 = ser.tz_localize(tzstr) + timezones.tz_compare(ser2.index.tz, timezones.maybe_get_tz(tzstr)) diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index b8be4ea137e3d..59ae0cd63690c 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -19,7 +19,6 @@ PeriodIndex, Series, TimedeltaIndex, - bdate_range, date_range, period_range, timedelta_range, @@ -622,18 +621,6 @@ def test_dt_accessor_updates_on_inplace(self): result = s.dt.date assert result[0] == result[2] - def test_between(self): - s = Series(bdate_range("1/1/2000", periods=20).astype(object)) - s[::2] = np.nan - - result = s[s.between(s[3], s[17])] - expected = s[3:18].dropna() - tm.assert_series_equal(result, expected) - - result = s[s.between(s[3], s[17], inclusive=False)] - expected = s[5:16].dropna() - tm.assert_series_equal(result, expected) - def test_date_tz(self): # GH11757 rng = pd.DatetimeIndex( diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py index 03fee389542e3..f41245c2872a7 100644 --- a/pandas/tests/series/test_period.py +++ b/pandas/tests/series/test_period.py @@ -52,12 +52,6 @@ def test_dropna(self): s = Series([pd.Period("2011-01", freq="M"), pd.Period("NaT", freq="M")]) tm.assert_series_equal(s.dropna(), Series([pd.Period("2011-01", freq="M")])) - def test_between(self): - left, right = self.series[[2, 7]] - result = self.series.between(left, right) - expected = (self.series >= left) & (self.series <= right) - tm.assert_series_equal(result, expected) - # --------------------------------------------------------------------- # NaT support @@ -110,28 +104,6 @@ def test_align_series(self, join_type): ts.align(ts[::2], join=join_type) - def test_truncate(self): - # GH 17717 - idx1 = pd.PeriodIndex( - [pd.Period("2017-09-02"), pd.Period("2017-09-02"), pd.Period("2017-09-03")] - ) - series1 = pd.Series([1, 2, 3], index=idx1) - result1 = series1.truncate(after="2017-09-02") - - expected_idx1 = pd.PeriodIndex( - [pd.Period("2017-09-02"), pd.Period("2017-09-02")] - ) - tm.assert_series_equal(result1, pd.Series([1, 2], index=expected_idx1)) - - idx2 = pd.PeriodIndex( - [pd.Period("2017-09-03"), pd.Period("2017-09-02"), pd.Period("2017-09-03")] - ) - series2 = pd.Series([1, 2, 3], index=idx2) - result2 = series2.sort_index().truncate(after="2017-09-02") - - expected_idx2 = pd.PeriodIndex([pd.Period("2017-09-02")]) - tm.assert_series_equal(result2, pd.Series([2], index=expected_idx2)) - @pytest.mark.parametrize( "input_vals", [ diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 459377fb18f29..8f06ea69f5d66 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -1,13 +1,11 @@ -from datetime import datetime, time, timedelta +from datetime import datetime, timedelta from io import StringIO -from itertools import product import numpy as np import pytest from pandas._libs.tslib import iNaT from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime -import pandas.util._test_decorators as td import pandas as pd from pandas import ( @@ -23,8 +21,6 @@ ) import pandas._testing as tm -from pandas.tseries.offsets import BDay, BMonthEnd - def _simple_ts(start, end, freq="D"): rng = date_range(start, end, freq=freq) @@ -38,44 +34,6 @@ def assert_range_equal(left, right): class TestTimeSeries: - def test_asfreq(self): - ts = Series( - [0.0, 1.0, 2.0], - index=[ - datetime(2009, 10, 30), - datetime(2009, 11, 30), - datetime(2009, 12, 31), - ], - ) - - daily_ts = ts.asfreq("B") - monthly_ts = daily_ts.asfreq("BM") - tm.assert_series_equal(monthly_ts, ts) - - daily_ts = ts.asfreq("B", method="pad") - monthly_ts = daily_ts.asfreq("BM") - tm.assert_series_equal(monthly_ts, ts) - - daily_ts = ts.asfreq(BDay()) - monthly_ts = daily_ts.asfreq(BMonthEnd()) - tm.assert_series_equal(monthly_ts, ts) - - result = ts[:0].asfreq("M") - assert len(result) == 0 - assert result is not ts - - daily_ts = ts.asfreq("D", fill_value=-1) - result = daily_ts.value_counts().sort_index() - expected = Series([60, 1, 1, 1], index=[-1.0, 2.0, 1.0, 0.0]).sort_index() - tm.assert_series_equal(result, expected) - - def test_asfreq_datetimeindex_empty_series(self): - # GH 14320 - index = pd.DatetimeIndex(["2016-09-29 11:00"]) - expected = Series(index=index, dtype=object).asfreq("H") - result = Series([3], index=index.copy()).asfreq("H") - tm.assert_index_equal(expected.index, result.index) - def test_autocorr(self, datetime_series): # Just run the function corr1 = datetime_series.autocorr() @@ -268,15 +226,6 @@ def test_series_repr_nat(self): ) assert result == expected - def test_asfreq_keep_index_name(self): - # GH #9854 - index_name = "bar" - index = pd.date_range("20130101", periods=20, name=index_name) - df = pd.DataFrame(list(range(20)), columns=["foo"], index=index) - - assert index_name == df.index.name - assert index_name == df.asfreq("10D").index.name - def test_promote_datetime_date(self): rng = date_range("1/1/2000", periods=20) ts = Series(np.random.randn(20), index=rng) @@ -300,26 +249,6 @@ def test_promote_datetime_date(self): expected = rng.get_indexer(ts_slice.index) tm.assert_numpy_array_equal(result, expected) - def test_asfreq_normalize(self): - rng = date_range("1/1/2000 09:30", periods=20) - norm = date_range("1/1/2000", periods=20) - vals = np.random.randn(20) - ts = Series(vals, index=rng) - - result = ts.asfreq("D", normalize=True) - norm = date_range("1/1/2000", periods=20) - expected = Series(vals, index=norm) - - tm.assert_series_equal(result, expected) - - vals = np.random.randn(20, 3) - ts = DataFrame(vals, index=rng) - - result = ts.asfreq("D", normalize=True) - expected = DataFrame(vals, index=norm) - - tm.assert_frame_equal(result, expected) - def test_first_subset(self): ts = _simple_ts("1/1/2000", "1/1/2010", freq="12h") result = ts.first("10d") @@ -380,180 +309,6 @@ def test_format_pre_1900_dates(self): ts = Series(1, index=rng) repr(ts) - def test_at_time(self): - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = Series(np.random.randn(len(rng)), index=rng) - rs = ts.at_time(rng[1]) - assert (rs.index.hour == rng[1].hour).all() - assert (rs.index.minute == rng[1].minute).all() - assert (rs.index.second == rng[1].second).all() - - result = ts.at_time("9:30") - expected = ts.at_time(time(9, 30)) - tm.assert_series_equal(result, expected) - - df = DataFrame(np.random.randn(len(rng), 3), index=rng) - - result = ts[time(9, 30)] - result_df = df.loc[time(9, 30)] - expected = ts[(rng.hour == 9) & (rng.minute == 30)] - exp_df = df[(rng.hour == 9) & (rng.minute == 30)] - - # FIXME: dont leave commented-out - # expected.index = date_range('1/1/2000', '1/4/2000') - - tm.assert_series_equal(result, expected) - tm.assert_frame_equal(result_df, exp_df) - - chunk = df.loc["1/4/2000":] - result = chunk.loc[time(9, 30)] - expected = result_df[-1:] - tm.assert_frame_equal(result, expected) - - # midnight, everything - rng = date_range("1/1/2000", "1/31/2000") - ts = Series(np.random.randn(len(rng)), index=rng) - - result = ts.at_time(time(0, 0)) - tm.assert_series_equal(result, ts) - - # time doesn't exist - rng = date_range("1/1/2012", freq="23Min", periods=384) - ts = Series(np.random.randn(len(rng)), rng) - rs = ts.at_time("16:00") - assert len(rs) == 0 - - def test_at_time_raises(self): - # GH20725 - ser = pd.Series("a b c".split()) - msg = "Index must be DatetimeIndex" - with pytest.raises(TypeError, match=msg): - ser.at_time("00:00") - - def test_between(self): - series = Series(date_range("1/1/2000", periods=10)) - left, right = series[[2, 7]] - - result = series.between(left, right) - expected = (series >= left) & (series <= right) - tm.assert_series_equal(result, expected) - - def test_between_time(self): - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = Series(np.random.randn(len(rng)), index=rng) - stime = time(0, 0) - etime = time(1, 0) - - close_open = product([True, False], [True, False]) - for inc_start, inc_end in close_open: - filtered = ts.between_time(stime, etime, inc_start, inc_end) - exp_len = 13 * 4 + 1 - if not inc_start: - exp_len -= 5 - if not inc_end: - exp_len -= 4 - - assert len(filtered) == exp_len - for rs in filtered.index: - t = rs.time() - if inc_start: - assert t >= stime - else: - assert t > stime - - if inc_end: - assert t <= etime - else: - assert t < etime - - result = ts.between_time("00:00", "01:00") - expected = ts.between_time(stime, etime) - tm.assert_series_equal(result, expected) - - # across midnight - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = Series(np.random.randn(len(rng)), index=rng) - stime = time(22, 0) - etime = time(9, 0) - - close_open = product([True, False], [True, False]) - for inc_start, inc_end in close_open: - filtered = ts.between_time(stime, etime, inc_start, inc_end) - exp_len = (12 * 11 + 1) * 4 + 1 - if not inc_start: - exp_len -= 4 - if not inc_end: - exp_len -= 4 - - assert len(filtered) == exp_len - for rs in filtered.index: - t = rs.time() - if inc_start: - assert (t >= stime) or (t <= etime) - else: - assert (t > stime) or (t <= etime) - - if inc_end: - assert (t <= etime) or (t >= stime) - else: - assert (t < etime) or (t >= stime) - - def test_between_time_raises(self): - # GH20725 - ser = pd.Series("a b c".split()) - msg = "Index must be DatetimeIndex" - with pytest.raises(TypeError, match=msg): - ser.between_time(start_time="00:00", end_time="12:00") - - def test_between_time_types(self): - # GH11818 - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time" - with pytest.raises(ValueError, match=msg): - rng.indexer_between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) - - frame = DataFrame({"A": 0}, index=rng) - with pytest.raises(ValueError, match=msg): - frame.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) - - series = Series(0, index=rng) - with pytest.raises(ValueError, match=msg): - series.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) - - @td.skip_if_has_locale - def test_between_time_formats(self): - # GH11818 - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = DataFrame(np.random.randn(len(rng), 2), index=rng) - - strings = [ - ("2:00", "2:30"), - ("0200", "0230"), - ("2:00am", "2:30am"), - ("0200am", "0230am"), - ("2:00:00", "2:30:00"), - ("020000", "023000"), - ("2:00:00am", "2:30:00am"), - ("020000am", "023000am"), - ] - expected_length = 28 - - for time_string in strings: - assert len(ts.between_time(*time_string)) == expected_length - - def test_between_time_axis(self): - # issue 8839 - rng = date_range("1/1/2000", periods=100, freq="10min") - ts = Series(np.random.randn(len(rng)), index=rng) - stime, etime = ("08:00:00", "09:00:00") - expected_length = 7 - - assert len(ts.between_time(stime, etime)) == expected_length - assert len(ts.between_time(stime, etime, axis=0)) == expected_length - msg = "No axis named 1 for object type <class 'pandas.core.series.Series'>" - with pytest.raises(ValueError, match=msg): - ts.between_time(stime, etime, axis=1) - def test_to_period(self): from pandas.core.indexes.period import period_range diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py index a363f927d10a9..74363f4c73c39 100644 --- a/pandas/tests/series/test_timezones.py +++ b/pandas/tests/series/test_timezones.py @@ -10,118 +10,12 @@ from pandas._libs.tslibs import conversion, timezones -from pandas import DatetimeIndex, Index, NaT, Series, Timestamp +from pandas import DatetimeIndex, Index, Series, Timestamp import pandas._testing as tm from pandas.core.indexes.datetimes import date_range class TestSeriesTimezones: - # ----------------------------------------------------------------- - # Series.tz_localize - def test_series_tz_localize(self): - - rng = date_range("1/1/2011", periods=100, freq="H") - ts = Series(1, index=rng) - - result = ts.tz_localize("utc") - assert result.index.tz.zone == "UTC" - - # Can't localize if already tz-aware - rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") - ts = Series(1, index=rng) - - with pytest.raises(TypeError, match="Already tz-aware"): - ts.tz_localize("US/Eastern") - - def test_series_tz_localize_ambiguous_bool(self): - # make sure that we are correctly accepting bool values as ambiguous - - # GH#14402 - ts = Timestamp("2015-11-01 01:00:03") - expected0 = Timestamp("2015-11-01 01:00:03-0500", tz="US/Central") - expected1 = Timestamp("2015-11-01 01:00:03-0600", tz="US/Central") - - ser = Series([ts]) - expected0 = Series([expected0]) - expected1 = Series([expected1]) - - with pytest.raises(pytz.AmbiguousTimeError): - ser.dt.tz_localize("US/Central") - - result = ser.dt.tz_localize("US/Central", ambiguous=True) - tm.assert_series_equal(result, expected0) - - result = ser.dt.tz_localize("US/Central", ambiguous=[True]) - tm.assert_series_equal(result, expected0) - - result = ser.dt.tz_localize("US/Central", ambiguous=False) - tm.assert_series_equal(result, expected1) - - result = ser.dt.tz_localize("US/Central", ambiguous=[False]) - tm.assert_series_equal(result, expected1) - - @pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"]) - @pytest.mark.parametrize( - "method, exp", - [ - ["shift_forward", "2015-03-29 03:00:00"], - ["NaT", NaT], - ["raise", None], - ["foo", "invalid"], - ], - ) - def test_series_tz_localize_nonexistent(self, tz, method, exp): - # GH 8917 - n = 60 - dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min") - s = Series(1, dti) - if method == "raise": - with pytest.raises(pytz.NonExistentTimeError): - s.tz_localize(tz, nonexistent=method) - elif exp == "invalid": - with pytest.raises(ValueError): - dti.tz_localize(tz, nonexistent=method) - else: - result = s.tz_localize(tz, nonexistent=method) - expected = Series(1, index=DatetimeIndex([exp] * n, tz=tz)) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_series_tz_localize_empty(self, tzstr): - # GH#2248 - ser = Series(dtype=object) - - ser2 = ser.tz_localize("utc") - assert ser2.index.tz == pytz.utc - - ser2 = ser.tz_localize(tzstr) - timezones.tz_compare(ser2.index.tz, timezones.maybe_get_tz(tzstr)) - - # ----------------------------------------------------------------- - # Series.tz_convert - - def test_series_tz_convert(self): - rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") - ts = Series(1, index=rng) - - result = ts.tz_convert("Europe/Berlin") - assert result.index.tz.zone == "Europe/Berlin" - - # can't convert tz-naive - rng = date_range("1/1/2011", periods=200, freq="D") - ts = Series(1, index=rng) - - with pytest.raises(TypeError, match="Cannot convert tz-naive"): - ts.tz_convert("US/Eastern") - - def test_series_tz_convert_to_utc(self): - base = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC") - idx1 = base.tz_convert("Asia/Tokyo")[:2] - idx2 = base.tz_convert("US/Eastern")[1:] - - res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2) - tm.assert_series_equal(res, Series([np.nan, 3, np.nan], index=base)) - # ----------------------------------------------------------------- # Series.append @@ -225,15 +119,6 @@ def test_dateutil_tzoffset_support(self): # it works! #2443 repr(series.index[0]) - @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) - def test_tz_aware_asfreq(self, tz): - dr = date_range("2011-12-01", "2012-07-20", freq="D", tz=tz) - - ser = Series(np.random.randn(len(dr)), index=dr) - - # it works! - ser.asfreq("T") - @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) def test_string_index_alias_tz_aware(self, tz): rng = date_range("1/1/2000", periods=10, tz=tz) @@ -299,28 +184,6 @@ def test_series_align_aware(self): assert new1.index.tz == pytz.UTC assert new2.index.tz == pytz.UTC - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_localized_at_time_between_time(self, tzstr): - from datetime import time - - tz = timezones.maybe_get_tz(tzstr) - - rng = date_range("4/16/2012", "5/1/2012", freq="H") - ts = Series(np.random.randn(len(rng)), index=rng) - - ts_local = ts.tz_localize(tzstr) - - result = ts_local.at_time(time(10, 0)) - expected = ts.at_time(time(10, 0)).tz_localize(tzstr) - tm.assert_series_equal(result, expected) - assert timezones.tz_compare(result.index.tz, tz) - - t1, t2 = time(10, 0), time(11, 0) - result = ts_local.between_time(t1, t2) - expected = ts.between_time(t1, t2).tz_localize(tzstr) - tm.assert_series_equal(result, expected) - assert timezones.tz_compare(result.index.tz, tz) - @pytest.mark.parametrize("tzstr", ["Europe/Berlin", "dateutil/Europe/Berlin"]) def test_getitem_pydatetime_tz(self, tzstr): tz = timezones.maybe_get_tz(tzstr) @@ -335,14 +198,6 @@ def test_getitem_pydatetime_tz(self, tzstr): time_datetime = conversion.localize_pydatetime(dt, tz) assert ts[time_pandas] == ts[time_datetime] - def test_series_truncate_datetimeindex_tz(self): - # GH 9243 - idx = date_range("4/1/2005", "4/30/2005", freq="D", tz="US/Pacific") - s = Series(range(len(idx)), index=idx) - result = s.truncate(datetime(2005, 4, 2), datetime(2005, 4, 4)) - expected = Series([1, 2, 3], index=idx[1:4]) - tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("copy", [True, False]) @pytest.mark.parametrize( "method, tz", [["tz_localize", None], ["tz_convert", "Europe/Berlin"]]
cc @WillAyd @simonjayhawkins got the [go ahead](https://github.com/pandas-dev/pandas/pull/32110#issuecomment-589974610) the other day to take these off Jeff's plate
https://api.github.com/repos/pandas-dev/pandas/pulls/32226
2020-02-24T23:50:30Z
2020-02-25T21:49:52Z
2020-02-25T21:49:52Z
2020-02-25T23:23:59Z
PERF: lazify consolidation check
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index d4f9c15a9f73f..329bfdf543c62 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -141,7 +141,7 @@ def __init__( if do_integrity_check: self._verify_integrity() - self._consolidate_check() + self._known_consolidated = False self._rebuild_blknos_and_blklocs() @@ -726,7 +726,6 @@ def get_slice(self, slobj: slice, axis: int = 0): new_axes[axis] = new_axes[axis][slobj] bm = type(self)(new_blocks, new_axes, do_integrity_check=False) - bm._consolidate_inplace() return bm def __contains__(self, item) -> bool:
The benchmark I'm using for this (and upcoming related PRs) is based on the asv that is most affected by removing `fast_apply` (see #32086). ``` import numpy as np from pandas import * %load_ext line_profiler def get_df(): N = 10 ** 4 labels = np.random.randint(0, 2000, size=N) labels2 = np.random.randint(0, 3, size=N) df = DataFrame( { "key": labels, "key2": labels2, "value1": np.random.randn(N), "value2": ["foo", "bar", "baz", "qux"] * (N // 4), } ) return df df = get_df() gb = df.groupby("key") %prun -s cumulative gb.apply(lambda x: 1) ``` If we disable `fast_apply` on master, this gives: ``` ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 0.278 0.278 groupby.py:701(apply) 1 0.000 0.000 0.278 0.278 groupby.py:750(_python_apply_general) 1 0.009 0.009 0.275 0.275 ops.py:151(apply) 1987 0.003 0.000 0.257 0.000 ops.py:858(__iter__) 1986 0.003 0.000 0.251 0.000 ops.py:889(_chop) 1986 0.003 0.000 0.247 0.000 indexing.py:814(__getitem__) 1986 0.001 0.000 0.243 0.000 indexing.py:1462(_getitem_axis) 1986 0.003 0.000 0.242 0.000 indexing.py:1488(_get_slice_axis) 1986 0.007 0.000 0.230 0.000 generic.py:3470(_slice) 1986 0.008 0.000 0.203 0.000 managers.py:713(get_slice) 1987 0.005 0.000 0.129 0.000 managers.py:125(__init__) 1987 0.003 0.000 0.060 0.000 managers.py:634(_consolidate_check) 1987 0.026 0.000 0.059 0.000 managers.py:215(_rebuild_blknos_and_blklocs) 1987 0.003 0.000 0.056 0.000 managers.py:635(<listcomp>) 5961 0.015 0.000 0.053 0.000 blocks.py:335(ftype) ``` If we disable `fast_apply` on this PR: ``` ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 0.198 0.198 groupby.py:701(apply) 1 0.000 0.000 0.198 0.198 groupby.py:750(_python_apply_general) 1 0.008 0.008 0.195 0.195 ops.py:151(apply) 1979 0.002 0.000 0.176 0.000 ops.py:903(__iter__) 1978 0.002 0.000 0.172 0.000 ops.py:934(_chop) 1978 0.003 0.000 0.169 0.000 indexing.py:814(__getitem__) 1978 0.001 0.000 0.165 0.000 indexing.py:1462(_getitem_axis) 1978 0.003 0.000 0.164 0.000 indexing.py:1488(_get_slice_axis) 1978 0.006 0.000 0.153 0.000 generic.py:3470(_slice) 1978 0.007 0.000 0.129 0.000 managers.py:713(get_slice) 1980 0.004 0.000 0.061 0.000 managers.py:125(__init__) 1980 0.021 0.000 0.052 0.000 managers.py:215(_rebuild_blknos_and_blklocs) 1978 0.002 0.000 0.048 0.000 managers.py:723(<listcomp>) 5934 0.010 0.000 0.045 0.000 blocks.py:310(getitem_block) 5942 0.003 0.000 0.031 0.000 blocks.py:275(make_block_same_class) ``` We save almost 30% by lazifying the consolidation check and consolidate on _slice.
https://api.github.com/repos/pandas-dev/pandas/pulls/32224
2020-02-24T21:58:32Z
2020-02-26T02:07:34Z
2020-02-26T02:07:34Z
2020-02-26T02:18:52Z
BUG: Fix DateFrameGroupBy.mean error for Int64 dtype
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst index 123dfa07f4331..621ce60e38dcc 100644 --- a/doc/source/whatsnew/v1.0.2.rst +++ b/doc/source/whatsnew/v1.0.2.rst @@ -89,6 +89,7 @@ Bug fixes - Fixed bug in :meth:`DataFrame.convert_dtypes` where ``BooleanDtype`` columns were converted to ``Int64`` (:issue:`32287`) - Fixed bug in setting values using a slice indexer with string dtype (:issue:`31772`) - Fixed bug where :meth:`pandas.core.groupby.GroupBy.first` and :meth:`pandas.core.groupby.GroupBy.last` would raise a ``TypeError`` when groups contained ``pd.NA`` in a column of object dtype (:issue:`32123`) +- Fixed bug where :meth:`DataFrameGroupBy.mean`, :meth:`DataFrameGroupBy.median`, :meth:`DataFrameGroupBy.var`, and :meth:`DataFrameGroupBy.std` would raise a ``TypeError`` on ``Int64`` dtype columns (:issue:`32219`) **Strings** diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index b7ac3048631c5..a9a608e6f76ba 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -1083,7 +1083,7 @@ def _cython_agg_blocks( result = type(block.values)._from_sequence( result.ravel(), dtype=block.values.dtype ) - except ValueError: + except (ValueError, TypeError): # reshape to be valid for non-Extension Block result = result.reshape(1, -1) diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 03278e69fe94a..9c33843cdcecc 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -1605,3 +1605,34 @@ def test_groupby_mean_no_overflow(): } ) assert df.groupby("user")["connections"].mean()["A"] == 3689348814740003840 + + +@pytest.mark.parametrize( + "values", + [ + { + "a": [1, 1, 1, 2, 2, 2, 3, 3, 3], + "b": [1, pd.NA, 2, 1, pd.NA, 2, 1, pd.NA, 2], + }, + {"a": [1, 1, 2, 2, 3, 3], "b": [1, 2, 1, 2, 1, 2]}, + ], +) +@pytest.mark.parametrize("function", ["mean", "median", "var"]) +def test_apply_to_nullable_integer_returns_float(values, function): + # https://github.com/pandas-dev/pandas/issues/32219 + output = 0.5 if function == "var" else 1.5 + arr = np.array([output] * 3, dtype=float) + idx = pd.Index([1, 2, 3], dtype=object, name="a") + expected = pd.DataFrame({"b": arr}, index=idx) + + groups = pd.DataFrame(values, dtype="Int64").groupby("a") + + result = getattr(groups, function)() + tm.assert_frame_equal(result, expected) + + result = groups.agg(function) + tm.assert_frame_equal(result, expected) + + result = groups.agg([function]) + expected.columns = MultiIndex.from_tuples([("b", function)]) + tm.assert_frame_equal(result, expected)
- [x] closes #32219 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry It looks like this was due to the `TypeError` not being caught
https://api.github.com/repos/pandas-dev/pandas/pulls/32223
2020-02-24T20:55:12Z
2020-03-12T02:29:00Z
2020-03-12T02:28:59Z
2020-03-12T13:19:45Z
BUG: the sample skewness is wrong computed
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py index a5c609473760d..b683c25264f54 100644 --- a/pandas/core/nanops.py +++ b/pandas/core/nanops.py @@ -1017,7 +1017,7 @@ def nanskew( m3 = _zero_out_fperr(m3) with np.errstate(invalid="ignore", divide="ignore"): - result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5) + result = ((count * (count - 1)) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5) dtype = values.dtype if is_float_dtype(dtype):
- [ ] closes #xxxx - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Right formula can be found here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.skew.html
https://api.github.com/repos/pandas-dev/pandas/pulls/32222
2020-02-24T20:48:21Z
2020-04-16T16:59:58Z
null
2020-04-16T16:59:58Z
CI: Remove float16 fixture value
diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py index 625d559001e72..f85d823cb2fac 100644 --- a/pandas/tests/base/test_ops.py +++ b/pandas/tests/base/test_ops.py @@ -277,6 +277,12 @@ def test_value_counts_unique_nunique_null(self, null_obj, index_or_series_obj): pytest.skip(f"values of {klass} cannot be changed") elif isinstance(orig, pd.MultiIndex): pytest.skip("MultiIndex doesn't support isna") + elif orig.duplicated().any(): + pytest.xfail( + "The test implementation isn't flexible enough to deal" + " with duplicated values. This isn't a bug in the" + " application code, but in the test code." + ) # special assign to the numpy array if is_datetime64tz_dtype(obj):
- [x] xref #32220 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Hopefully, this fixes the flaky test
https://api.github.com/repos/pandas-dev/pandas/pulls/32221
2020-02-24T20:24:56Z
2020-02-25T18:51:26Z
2020-02-25T18:51:26Z
2020-02-25T18:51:35Z
REF: de-duplicate factorize and duplicated code
diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py index 0f3b3838de1b2..1768e682b3db4 100644 --- a/asv_bench/benchmarks/algorithms.py +++ b/asv_bench/benchmarks/algorithms.py @@ -31,83 +31,62 @@ def time_maybe_convert_objects(self): class Factorize: - params = [[True, False], ["int", "uint", "float", "string"]] - param_names = ["sort", "dtype"] - - def setup(self, sort, dtype): - N = 10 ** 5 - data = { - "int": pd.Int64Index(np.arange(N).repeat(5)), - "uint": pd.UInt64Index(np.arange(N).repeat(5)), - "float": pd.Float64Index(np.random.randn(N).repeat(5)), - "string": tm.makeStringIndex(N).repeat(5), - } - self.idx = data[dtype] - - def time_factorize(self, sort, dtype): - self.idx.factorize(sort=sort) - - -class FactorizeUnique: - - params = [[True, False], ["int", "uint", "float", "string"]] - param_names = ["sort", "dtype"] + params = [ + [True, False], + [True, False], + ["int", "uint", "float", "string", "datetime64[ns]", "datetime64[ns, tz]"], + ] + param_names = ["unique", "sort", "dtype"] - def setup(self, sort, dtype): + def setup(self, unique, sort, dtype): N = 10 ** 5 data = { "int": pd.Int64Index(np.arange(N)), "uint": pd.UInt64Index(np.arange(N)), - "float": pd.Float64Index(np.arange(N)), + "float": pd.Float64Index(np.random.randn(N)), "string": tm.makeStringIndex(N), - } - self.idx = data[dtype] - assert self.idx.is_unique - - def time_factorize(self, sort, dtype): + "datetime64[ns]": pd.date_range("2011-01-01", freq="H", periods=N), + "datetime64[ns, tz]": pd.date_range( + "2011-01-01", freq="H", periods=N, tz="Asia/Tokyo" + ), + }[dtype] + if not unique: + data = data.repeat(5) + self.idx = data + + def time_factorize(self, unique, sort, dtype): self.idx.factorize(sort=sort) class Duplicated: - params = [["first", "last", False], ["int", "uint", "float", "string"]] - param_names = ["keep", "dtype"] - - def setup(self, keep, dtype): - N = 10 ** 5 - data = { - "int": pd.Int64Index(np.arange(N).repeat(5)), - "uint": pd.UInt64Index(np.arange(N).repeat(5)), - "float": pd.Float64Index(np.random.randn(N).repeat(5)), - "string": tm.makeStringIndex(N).repeat(5), - } - self.idx = data[dtype] - # cache is_unique - self.idx.is_unique - - def time_duplicated(self, keep, dtype): - self.idx.duplicated(keep=keep) - - -class DuplicatedUniqueIndex: - - params = ["int", "uint", "float", "string"] - param_names = ["dtype"] + params = [ + [True, False], + ["first", "last", False], + ["int", "uint", "float", "string", "datetime64[ns]", "datetime64[ns, tz]"], + ] + param_names = ["unique", "keep", "dtype"] - def setup(self, dtype): + def setup(self, unique, keep, dtype): N = 10 ** 5 data = { "int": pd.Int64Index(np.arange(N)), "uint": pd.UInt64Index(np.arange(N)), "float": pd.Float64Index(np.random.randn(N)), "string": tm.makeStringIndex(N), - } - self.idx = data[dtype] + "datetime64[ns]": pd.date_range("2011-01-01", freq="H", periods=N), + "datetime64[ns, tz]": pd.date_range( + "2011-01-01", freq="H", periods=N, tz="Asia/Tokyo" + ), + }[dtype] + if not unique: + data = data.repeat(5) + self.idx = data # cache is_unique self.idx.is_unique - def time_duplicated_unique(self, dtype): - self.idx.duplicated() + def time_duplicated(self, unique, keep, dtype): + self.idx.duplicated(keep=keep) class Hashing: diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index 2f7ea8b9c0873..e3ed33456ee44 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -91,20 +91,6 @@ def time_reest_datetimeindex(self, tz): self.df.reset_index() -class Factorize: - - params = [None, "Asia/Tokyo"] - param_names = "tz" - - def setup(self, tz): - N = 100000 - self.dti = date_range("2011-01-01", freq="H", periods=N, tz=tz) - self.dti = self.dti.repeat(5) - - def time_factorize(self, tz): - self.dti.factorize() - - class InferFreq: params = [None, "D", "B"]
https://api.github.com/repos/pandas-dev/pandas/pulls/32216
2020-02-24T01:13:36Z
2020-02-24T14:32:12Z
2020-02-24T14:32:12Z
2020-02-24T16:13:10Z
REF: include CategoricalIndex in index_cached parametrization
diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 6f43a6fd3fc9b..107b9b9edcd5d 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -258,9 +258,6 @@ def setup(self): def time_get_loc(self): self.index.get_loc(self.category) - def time_shape(self): - self.index.shape - def time_shallow_copy(self): self.index._shallow_copy() diff --git a/asv_bench/benchmarks/index_cached_properties.py b/asv_bench/benchmarks/index_cached_properties.py index 13b33855569c9..16fbc741775e4 100644 --- a/asv_bench/benchmarks/index_cached_properties.py +++ b/asv_bench/benchmarks/index_cached_properties.py @@ -7,6 +7,7 @@ class IndexCache: params = [ [ + "CategoricalIndex", "DatetimeIndex", "Float64Index", "IntervalIndex", @@ -42,6 +43,8 @@ def setup(self, index_type): self.idx = pd.Float64Index(range(N)) elif index_type == "UInt64Index": self.idx = pd.UInt64Index(range(N)) + elif index_type == "CategoricalIndex": + self.idx = pd.CategoricalIndex(range(N), range(N)) else: raise ValueError assert len(self.idx) == N diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py index cf51a4d35f805..b242de6a17208 100644 --- a/asv_bench/benchmarks/index_object.py +++ b/asv_bench/benchmarks/index_object.py @@ -55,14 +55,6 @@ def time_datetime_difference_disjoint(self): self.datetime_left.difference(self.datetime_right) -class Datetime: - def setup(self): - self.dr = date_range("20000101", freq="D", periods=10000) - - def time_is_dates_only(self): - self.dr._is_dates_only - - class Range: def setup(self): self.idx_inc = RangeIndex(start=0, stop=10 ** 7, step=3) diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index 087fe3916845b..e98d2948e76ea 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -1,3 +1,8 @@ +""" +These benchmarks are for Series and DataFrame indexing methods. For the +lower-level methods directly on Index and subclasses, see index_object.py, +indexing_engine.py, and index_cached.py +""" import warnings import numpy as np diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py index b52aa2e55af35..e15d4c66e4fc0 100644 --- a/asv_bench/benchmarks/period.py +++ b/asv_bench/benchmarks/period.py @@ -85,9 +85,6 @@ def setup(self): def time_get_loc(self): self.index.get_loc(self.period) - def time_shape(self): - self.index.shape - def time_shallow_copy(self): self.index._shallow_copy() diff --git a/asv_bench/benchmarks/timedelta.py b/asv_bench/benchmarks/timedelta.py index 208c8f9d14a5e..cfe05c3e257b1 100644 --- a/asv_bench/benchmarks/timedelta.py +++ b/asv_bench/benchmarks/timedelta.py @@ -73,9 +73,6 @@ def setup(self): def time_get_loc(self): self.index.get_loc(self.timedelta) - def time_shape(self): - self.index.shape - def time_shallow_copy(self): self.index._shallow_copy() diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index e3ed33456ee44..6c9f8ee77e5ad 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -57,6 +57,9 @@ def time_to_date(self, index_type): def time_to_pydatetime(self, index_type): self.index.to_pydatetime() + def time_is_dates_only(self, index_type): + self.index._is_dates_only + class TzLocalize:
There is a lot of de-duplication to be done, but pytest-based habits dont quite work here. So baby steps.
https://api.github.com/repos/pandas-dev/pandas/pulls/32215
2020-02-24T01:05:07Z
2020-02-26T02:13:16Z
2020-02-26T02:13:16Z
2020-02-26T02:14:14Z
BUG: Cast pd.NA to pd.NaT in to_datetime
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst index f491774991090..d3b1442953e41 100644 --- a/doc/source/whatsnew/v1.0.2.rst +++ b/doc/source/whatsnew/v1.0.2.rst @@ -62,6 +62,7 @@ Bug fixes **Datetimelike** - Bug in :meth:`DataFrame.reindex` and :meth:`Series.reindex` when reindexing with a tz-aware index (:issue:`26683`) +- Bug where :func:`to_datetime` would raise when passed ``pd.NA`` (:issue:`32213`) **Categorical** diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 9f6f401a1a5f5..68a25d0cc481a 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -22,6 +22,8 @@ from pandas._libs.tslibs.util cimport ( get_nat, is_integer_object, is_float_object, is_datetime64_object, is_timedelta64_object) +from pandas._libs.missing cimport C_NA + # ---------------------------------------------------------------------- # Constants @@ -763,7 +765,7 @@ NaT = c_NaT # Python-visible cdef inline bint checknull_with_nat(object val): """ utility to check if a value is a nat or not """ - return val is None or util.is_nan(val) or val is c_NaT + return val is None or util.is_nan(val) or val is c_NaT or val is C_NA cpdef bint is_null_datetimelike(object val, bint inat_is_null=True): diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py index 13723f6455bff..e1f04d3d4489b 100644 --- a/pandas/tests/indexes/datetimes/test_tools.py +++ b/pandas/tests/indexes/datetimes/test_tools.py @@ -2315,3 +2315,10 @@ def test_nullable_integer_to_datetime(): tm.assert_series_equal(res, expected) # Check that ser isn't mutated tm.assert_series_equal(ser, ser_copy) + + +@pytest.mark.parametrize("klass", [np.array, list]) +def test_na_to_datetime(nulls_fixture, klass): + result = pd.to_datetime(klass([nulls_fixture])) + + assert result[0] is pd.NaT
- [x] closes #32213 - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32214
2020-02-24T00:30:05Z
2020-02-26T12:39:57Z
2020-02-26T12:39:56Z
2020-02-26T21:25:40Z
DOC: DataFrame.ewm clean-up
diff --git a/pandas/core/window/ewm.py b/pandas/core/window/ewm.py index e045d1c2211d7..9da762d15dadd 100644 --- a/pandas/core/window/ewm.py +++ b/pandas/core/window/ewm.py @@ -29,19 +29,25 @@ class EWM(_Rolling): r""" - Provide exponential weighted functions. + Provide exponential weighted (EW) functions. + + Available EW functions: ``mean()``, ``var()``, ``std()``, ``corr()``, ``cov()``. + + Exactly one parameter: ``com``, ``span``, ``halflife``, or ``alpha`` must be + provided. Parameters ---------- com : float, optional Specify decay in terms of center of mass, - :math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`. + :math:`\alpha = 1 / (1 + com)`, for :math:`com \geq 0`. span : float, optional Specify decay in terms of span, - :math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`. + :math:`\alpha = 2 / (span + 1)`, for :math:`span \geq 1`. halflife : float, optional Specify decay in terms of half-life, - :math:`\alpha = 1 - exp(log(0.5) / halflife),\text{for} halflife > 0`. + :math:`\alpha = 1 - \exp\left(-\ln(2) / halflife\right)`, for + :math:`halflife > 0`. alpha : float, optional Specify smoothing factor :math:`\alpha` directly, :math:`0 < \alpha \leq 1`. @@ -50,11 +56,39 @@ class EWM(_Rolling): (otherwise result is NA). adjust : bool, default True Divide by decaying adjustment factor in beginning periods to account - for imbalance in relative weightings - (viewing EWMA as a moving average). + for imbalance in relative weightings (viewing EWMA as a moving average). + + - When ``adjust=True`` (default), the EW function is calculated using weights + :math:`w_i = (1 - \alpha)^i`. For example, the EW moving average of the series + [:math:`x_0, x_1, ..., x_t`] would be: + + .. math:: + y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ... + (1 - + \alpha)^t x_0}{1 + (1 - \alpha) + (1 - \alpha)^2 + ... + (1 - \alpha)^t} + + - When ``adjust=False``, the exponentially weighted function is calculated + recursively: + + .. math:: + \begin{split} + y_0 &= x_0\\ + y_t &= (1 - \alpha) y_{t-1} + \alpha x_t, + \end{split} ignore_na : bool, default False - Ignore missing values when calculating weights; - specify True to reproduce pre-0.15.0 behavior. + Ignore missing values when calculating weights; specify ``True`` to reproduce + pre-0.15.0 behavior. + + - When ``ignore_na=False`` (default), weights are based on absolute positions. + For example, the weights of :math:`x_0` and :math:`x_2` used in calculating + the final weighted average of [:math:`x_0`, None, :math:`x_2`] are + :math:`(1-\alpha)^2` and :math:`1` if ``adjust=True``, and + :math:`(1-\alpha)^2` and :math:`\alpha` if ``adjust=False``. + + - When ``ignore_na=True`` (reproducing pre-0.15.0 behavior), weights are based + on relative positions. For example, the weights of :math:`x_0` and :math:`x_2` + used in calculating the final weighted average of + [:math:`x_0`, None, :math:`x_2`] are :math:`1-\alpha` and :math:`1` if + ``adjust=True``, and :math:`1-\alpha` and :math:`\alpha` if ``adjust=False``. axis : {0 or 'index', 1 or 'columns'}, default 0 The axis to use. The value 0 identifies the rows, and 1 identifies the columns. @@ -71,30 +105,9 @@ class EWM(_Rolling): Notes ----- - Exactly one of center of mass, span, half-life, and alpha must be provided. - Allowed values and relationship between the parameters are specified in the - parameter descriptions above; see the link at the end of this section for - a detailed explanation. - - When adjust is True (default), weighted averages are calculated using - weights (1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1. - - When adjust is False, weighted averages are calculated recursively as: - weighted_average[0] = arg[0]; - weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i]. - - When ignore_na is False (default), weights are based on absolute positions. - For example, the weights of x and y used in calculating the final weighted - average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and - (1-alpha)**2 and alpha (if adjust is False). - - When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based - on relative positions. For example, the weights of x and y used in - calculating the final weighted average of [x, None, y] are 1-alpha and 1 - (if adjust is True), and 1-alpha and alpha (if adjust is False). - - More details can be found at - https://pandas.pydata.org/pandas-docs/stable/user_guide/computation.html#exponentially-weighted-windows + + More details can be found at: + :ref:`Exponentially weighted windows <stats.moments.exponentially_weighted>`. Examples --------
Cosmetic change.
https://api.github.com/repos/pandas-dev/pandas/pulls/32212
2020-02-23T22:56:41Z
2020-03-07T19:59:42Z
2020-03-07T19:59:42Z
2020-03-09T14:25:48Z
CLN/REF: Split up / clean Categorical constructor tests
diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py index d5537359d6948..c6b4c4904735c 100644 --- a/pandas/tests/arrays/categorical/test_constructors.py +++ b/pandas/tests/arrays/categorical/test_constructors.py @@ -353,9 +353,9 @@ def test_constructor_from_index_series_period(self): result = Categorical(Series(idx)) tm.assert_index_equal(result.categories, idx) - def test_constructor_invariant(self): - # GH 14190 - vals = [ + @pytest.mark.parametrize( + "values", + [ np.array([1.0, 1.2, 1.8, np.nan]), np.array([1, 2, 3], dtype="int64"), ["a", "b", "c", np.nan], @@ -366,11 +366,13 @@ def test_constructor_invariant(self): Timestamp("2014-01-02", tz="US/Eastern"), NaT, ], - ] - for val in vals: - c = Categorical(val) - c2 = Categorical(c) - tm.assert_categorical_equal(c, c2) + ], + ) + def test_constructor_invariant(self, values): + # GH 14190 + c = Categorical(values) + c2 = Categorical(c) + tm.assert_categorical_equal(c, c2) @pytest.mark.parametrize("ordered", [True, False]) def test_constructor_with_dtype(self, ordered): @@ -470,9 +472,14 @@ def test_construction_with_null(self, klass, nulls_fixture): tm.assert_categorical_equal(result, expected) - def test_from_codes(self): + def test_from_codes_empty(self): + cat = ["a", "b", "c"] + result = Categorical.from_codes([], categories=cat) + expected = Categorical([], categories=cat) - # too few categories + tm.assert_categorical_equal(result, expected) + + def test_from_codes_too_few_categories(self): dtype = CategoricalDtype(categories=[1, 2]) msg = "codes need to be between " with pytest.raises(ValueError, match=msg): @@ -480,22 +487,23 @@ def test_from_codes(self): with pytest.raises(ValueError, match=msg): Categorical.from_codes([1, 2], dtype=dtype) - # no int codes + def test_from_codes_non_int_codes(self): + dtype = CategoricalDtype(categories=[1, 2]) msg = "codes need to be array-like integers" with pytest.raises(ValueError, match=msg): Categorical.from_codes(["a"], categories=dtype.categories) with pytest.raises(ValueError, match=msg): Categorical.from_codes(["a"], dtype=dtype) - # no unique categories + def test_from_codes_non_unique_categories(self): with pytest.raises(ValueError, match="Categorical categories must be unique"): Categorical.from_codes([0, 1, 2], categories=["a", "a", "b"]) - # NaN categories included + def test_from_codes_nan_cat_included(self): with pytest.raises(ValueError, match="Categorial categories cannot be null"): Categorical.from_codes([0, 1, 2], categories=["a", "b", np.nan]) - # too negative + def test_from_codes_too_negative(self): dtype = CategoricalDtype(categories=["a", "b", "c"]) msg = r"codes need to be between -1 and len\(categories\)-1" with pytest.raises(ValueError, match=msg): @@ -503,6 +511,8 @@ def test_from_codes(self): with pytest.raises(ValueError, match=msg): Categorical.from_codes([-2, 1, 2], dtype=dtype) + def test_from_codes(self): + dtype = CategoricalDtype(categories=["a", "b", "c"]) exp = Categorical(["a", "b", "c"], ordered=False) res = Categorical.from_codes([0, 1, 2], categories=dtype.categories) tm.assert_categorical_equal(exp, res) @@ -510,21 +520,18 @@ def test_from_codes(self): res = Categorical.from_codes([0, 1, 2], dtype=dtype) tm.assert_categorical_equal(exp, res) - def test_from_codes_with_categorical_categories(self): + @pytest.mark.parametrize("klass", [Categorical, CategoricalIndex]) + def test_from_codes_with_categorical_categories(self, klass): # GH17884 expected = Categorical(["a", "b"], categories=["a", "b", "c"]) - result = Categorical.from_codes([0, 1], categories=Categorical(["a", "b", "c"])) + result = Categorical.from_codes([0, 1], categories=klass(["a", "b", "c"])) tm.assert_categorical_equal(result, expected) - result = Categorical.from_codes( - [0, 1], categories=CategoricalIndex(["a", "b", "c"]) - ) - tm.assert_categorical_equal(result, expected) - - # non-unique Categorical still raises + @pytest.mark.parametrize("klass", [Categorical, CategoricalIndex]) + def test_from_codes_with_non_unique_categorical_categories(self, klass): with pytest.raises(ValueError, match="Categorical categories must be unique"): - Categorical.from_codes([0, 1], Categorical(["a", "b", "a"])) + Categorical.from_codes([0, 1], klass(["a", "b", "a"])) def test_from_codes_with_nan_code(self): # GH21767 @@ -535,24 +542,16 @@ def test_from_codes_with_nan_code(self): with pytest.raises(ValueError, match="codes need to be array-like integers"): Categorical.from_codes(codes, dtype=dtype) - def test_from_codes_with_float(self): + @pytest.mark.parametrize("codes", [[1.0, 2.0, 0], [1.1, 2.0, 0]]) + def test_from_codes_with_float(self, codes): # GH21767 - codes = [1.0, 2.0, 0] # integer, but in float dtype + # float codes should raise even if values are equal to integers dtype = CategoricalDtype(categories=["a", "b", "c"]) - # empty codes should not raise for floats - Categorical.from_codes([], dtype.categories) - - with pytest.raises(ValueError, match="codes need to be array-like integers"): - Categorical.from_codes(codes, dtype.categories) - - with pytest.raises(ValueError, match="codes need to be array-like integers"): - Categorical.from_codes(codes, dtype=dtype) - - codes = [1.1, 2.0, 0] # non-integer - with pytest.raises(ValueError, match="codes need to be array-like integers"): + msg = "codes need to be array-like integers" + with pytest.raises(ValueError, match=msg): Categorical.from_codes(codes, dtype.categories) - with pytest.raises(ValueError, match="codes need to be array-like integers"): + with pytest.raises(ValueError, match=msg): Categorical.from_codes(codes, dtype=dtype) def test_from_codes_with_dtype_raises(self):
https://api.github.com/repos/pandas-dev/pandas/pulls/32211
2020-02-23T22:50:36Z
2020-02-25T23:44:06Z
2020-02-25T23:44:06Z
2020-02-25T23:44:53Z
REF: share benchmark code
diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py index 0f3b3838de1b2..af4da5a0a1a5a 100644 --- a/asv_bench/benchmarks/algorithms.py +++ b/asv_bench/benchmarks/algorithms.py @@ -29,87 +29,39 @@ def time_maybe_convert_objects(self): lib.maybe_convert_objects(self.data) -class Factorize: +class IndexAlgos: - params = [[True, False], ["int", "uint", "float", "string"]] - param_names = ["sort", "dtype"] - - def setup(self, sort, dtype): - N = 10 ** 5 - data = { - "int": pd.Int64Index(np.arange(N).repeat(5)), - "uint": pd.UInt64Index(np.arange(N).repeat(5)), - "float": pd.Float64Index(np.random.randn(N).repeat(5)), - "string": tm.makeStringIndex(N).repeat(5), - } - self.idx = data[dtype] - - def time_factorize(self, sort, dtype): - self.idx.factorize(sort=sort) - - -class FactorizeUnique: - - params = [[True, False], ["int", "uint", "float", "string"]] - param_names = ["sort", "dtype"] + params = [ + [True, False], + [True, False], + ["int", "uint", "float", "string", "datetime64[ns]", "datetime64[ns, tz]"], + ] + param_names = ["unique", "sort", "dtype"] - def setup(self, sort, dtype): + def setup(self, unique, sort, dtype): N = 10 ** 5 data = { "int": pd.Int64Index(np.arange(N)), "uint": pd.UInt64Index(np.arange(N)), - "float": pd.Float64Index(np.arange(N)), + "float": pd.Float64Index(np.random.randn(N)), "string": tm.makeStringIndex(N), - } - self.idx = data[dtype] - assert self.idx.is_unique - - def time_factorize(self, sort, dtype): + "datetime64[ns]": pd.date_range("2011-01-01", freq="H", periods=N), + "datetime64[ns, tz]": pd.date_range( + "2011-01-01", freq="H", periods=N, tz="Asia/Tokyo" + ), + }[dtype] + if not unique: + data = data.repeat(5) + data.is_unique # cache is_unique + self.idx = data + + def time_factorize(self, unique, sort, dtype): self.idx.factorize(sort=sort) - -class Duplicated: - - params = [["first", "last", False], ["int", "uint", "float", "string"]] - param_names = ["keep", "dtype"] - - def setup(self, keep, dtype): - N = 10 ** 5 - data = { - "int": pd.Int64Index(np.arange(N).repeat(5)), - "uint": pd.UInt64Index(np.arange(N).repeat(5)), - "float": pd.Float64Index(np.random.randn(N).repeat(5)), - "string": tm.makeStringIndex(N).repeat(5), - } - self.idx = data[dtype] - # cache is_unique - self.idx.is_unique - - def time_duplicated(self, keep, dtype): + def time_duplicated(self, unique, keep, dtype): self.idx.duplicated(keep=keep) -class DuplicatedUniqueIndex: - - params = ["int", "uint", "float", "string"] - param_names = ["dtype"] - - def setup(self, dtype): - N = 10 ** 5 - data = { - "int": pd.Int64Index(np.arange(N)), - "uint": pd.UInt64Index(np.arange(N)), - "float": pd.Float64Index(np.random.randn(N)), - "string": tm.makeStringIndex(N), - } - self.idx = data[dtype] - # cache is_unique - self.idx.is_unique - - def time_duplicated_unique(self, dtype): - self.idx.duplicated() - - class Hashing: def setup_cache(self): N = 10 ** 5 @@ -166,10 +118,10 @@ def setup(self, quantile, interpolation, dtype): "uint": np.arange(N).astype(np.uint64), "float": np.random.randn(N), } - self.idx = pd.Series(data[dtype].repeat(5)) + self.ser = pd.Series(data[dtype].repeat(5)) def time_quantile(self, quantile, interpolation, dtype): - self.idx.quantile(quantile, interpolation=interpolation) + self.ser.quantile(quantile, interpolation=interpolation) class SortIntegerArray: diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index 2f7ea8b9c0873..e3ed33456ee44 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -91,20 +91,6 @@ def time_reest_datetimeindex(self, tz): self.df.reset_index() -class Factorize: - - params = [None, "Asia/Tokyo"] - param_names = "tz" - - def setup(self, tz): - N = 100000 - self.dti = date_range("2011-01-01", freq="H", periods=N, tz=tz) - self.dti = self.dti.repeat(5) - - def time_factorize(self, tz): - self.dti.factorize() - - class InferFreq: params = [None, "D", "B"]
Collapse 5 classes down to 1
https://api.github.com/repos/pandas-dev/pandas/pulls/32210
2020-02-23T22:10:27Z
2020-02-24T00:11:18Z
null
2020-04-21T23:02:14Z
CLN: Use defaultdict for minor optimization
diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py index 0f3b3838de1b2..1768e682b3db4 100644 --- a/asv_bench/benchmarks/algorithms.py +++ b/asv_bench/benchmarks/algorithms.py @@ -31,83 +31,62 @@ def time_maybe_convert_objects(self): class Factorize: - params = [[True, False], ["int", "uint", "float", "string"]] - param_names = ["sort", "dtype"] - - def setup(self, sort, dtype): - N = 10 ** 5 - data = { - "int": pd.Int64Index(np.arange(N).repeat(5)), - "uint": pd.UInt64Index(np.arange(N).repeat(5)), - "float": pd.Float64Index(np.random.randn(N).repeat(5)), - "string": tm.makeStringIndex(N).repeat(5), - } - self.idx = data[dtype] - - def time_factorize(self, sort, dtype): - self.idx.factorize(sort=sort) - - -class FactorizeUnique: - - params = [[True, False], ["int", "uint", "float", "string"]] - param_names = ["sort", "dtype"] + params = [ + [True, False], + [True, False], + ["int", "uint", "float", "string", "datetime64[ns]", "datetime64[ns, tz]"], + ] + param_names = ["unique", "sort", "dtype"] - def setup(self, sort, dtype): + def setup(self, unique, sort, dtype): N = 10 ** 5 data = { "int": pd.Int64Index(np.arange(N)), "uint": pd.UInt64Index(np.arange(N)), - "float": pd.Float64Index(np.arange(N)), + "float": pd.Float64Index(np.random.randn(N)), "string": tm.makeStringIndex(N), - } - self.idx = data[dtype] - assert self.idx.is_unique - - def time_factorize(self, sort, dtype): + "datetime64[ns]": pd.date_range("2011-01-01", freq="H", periods=N), + "datetime64[ns, tz]": pd.date_range( + "2011-01-01", freq="H", periods=N, tz="Asia/Tokyo" + ), + }[dtype] + if not unique: + data = data.repeat(5) + self.idx = data + + def time_factorize(self, unique, sort, dtype): self.idx.factorize(sort=sort) class Duplicated: - params = [["first", "last", False], ["int", "uint", "float", "string"]] - param_names = ["keep", "dtype"] - - def setup(self, keep, dtype): - N = 10 ** 5 - data = { - "int": pd.Int64Index(np.arange(N).repeat(5)), - "uint": pd.UInt64Index(np.arange(N).repeat(5)), - "float": pd.Float64Index(np.random.randn(N).repeat(5)), - "string": tm.makeStringIndex(N).repeat(5), - } - self.idx = data[dtype] - # cache is_unique - self.idx.is_unique - - def time_duplicated(self, keep, dtype): - self.idx.duplicated(keep=keep) - - -class DuplicatedUniqueIndex: - - params = ["int", "uint", "float", "string"] - param_names = ["dtype"] + params = [ + [True, False], + ["first", "last", False], + ["int", "uint", "float", "string", "datetime64[ns]", "datetime64[ns, tz]"], + ] + param_names = ["unique", "keep", "dtype"] - def setup(self, dtype): + def setup(self, unique, keep, dtype): N = 10 ** 5 data = { "int": pd.Int64Index(np.arange(N)), "uint": pd.UInt64Index(np.arange(N)), "float": pd.Float64Index(np.random.randn(N)), "string": tm.makeStringIndex(N), - } - self.idx = data[dtype] + "datetime64[ns]": pd.date_range("2011-01-01", freq="H", periods=N), + "datetime64[ns, tz]": pd.date_range( + "2011-01-01", freq="H", periods=N, tz="Asia/Tokyo" + ), + }[dtype] + if not unique: + data = data.repeat(5) + self.idx = data # cache is_unique self.idx.is_unique - def time_duplicated_unique(self, dtype): - self.idx.duplicated() + def time_duplicated(self, unique, keep, dtype): + self.idx.duplicated(keep=keep) class Hashing: diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 6f43a6fd3fc9b..107b9b9edcd5d 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -258,9 +258,6 @@ def setup(self): def time_get_loc(self): self.index.get_loc(self.category) - def time_shape(self): - self.index.shape - def time_shallow_copy(self): self.index._shallow_copy() diff --git a/asv_bench/benchmarks/index_cached_properties.py b/asv_bench/benchmarks/index_cached_properties.py index 13b33855569c9..16fbc741775e4 100644 --- a/asv_bench/benchmarks/index_cached_properties.py +++ b/asv_bench/benchmarks/index_cached_properties.py @@ -7,6 +7,7 @@ class IndexCache: params = [ [ + "CategoricalIndex", "DatetimeIndex", "Float64Index", "IntervalIndex", @@ -42,6 +43,8 @@ def setup(self, index_type): self.idx = pd.Float64Index(range(N)) elif index_type == "UInt64Index": self.idx = pd.UInt64Index(range(N)) + elif index_type == "CategoricalIndex": + self.idx = pd.CategoricalIndex(range(N), range(N)) else: raise ValueError assert len(self.idx) == N diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py index cf51a4d35f805..b242de6a17208 100644 --- a/asv_bench/benchmarks/index_object.py +++ b/asv_bench/benchmarks/index_object.py @@ -55,14 +55,6 @@ def time_datetime_difference_disjoint(self): self.datetime_left.difference(self.datetime_right) -class Datetime: - def setup(self): - self.dr = date_range("20000101", freq="D", periods=10000) - - def time_is_dates_only(self): - self.dr._is_dates_only - - class Range: def setup(self): self.idx_inc = RangeIndex(start=0, stop=10 ** 7, step=3) diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index 087fe3916845b..e98d2948e76ea 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -1,3 +1,8 @@ +""" +These benchmarks are for Series and DataFrame indexing methods. For the +lower-level methods directly on Index and subclasses, see index_object.py, +indexing_engine.py, and index_cached.py +""" import warnings import numpy as np diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py index b52aa2e55af35..e15d4c66e4fc0 100644 --- a/asv_bench/benchmarks/period.py +++ b/asv_bench/benchmarks/period.py @@ -85,9 +85,6 @@ def setup(self): def time_get_loc(self): self.index.get_loc(self.period) - def time_shape(self): - self.index.shape - def time_shallow_copy(self): self.index._shallow_copy() diff --git a/asv_bench/benchmarks/timedelta.py b/asv_bench/benchmarks/timedelta.py index 208c8f9d14a5e..cfe05c3e257b1 100644 --- a/asv_bench/benchmarks/timedelta.py +++ b/asv_bench/benchmarks/timedelta.py @@ -73,9 +73,6 @@ def setup(self): def time_get_loc(self): self.index.get_loc(self.timedelta) - def time_shape(self): - self.index.shape - def time_shallow_copy(self): self.index._shallow_copy() diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index 2f7ea8b9c0873..6c9f8ee77e5ad 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -57,6 +57,9 @@ def time_to_date(self, index_type): def time_to_pydatetime(self, index_type): self.index.to_pydatetime() + def time_is_dates_only(self, index_type): + self.index._is_dates_only + class TzLocalize: @@ -91,20 +94,6 @@ def time_reest_datetimeindex(self, tz): self.df.reset_index() -class Factorize: - - params = [None, "Asia/Tokyo"] - param_names = "tz" - - def setup(self, tz): - N = 100000 - self.dti = date_range("2011-01-01", freq="H", periods=N, tz=tz) - self.dti = self.dti.repeat(5) - - def time_factorize(self, tz): - self.dti.factorize() - - class InferFreq: params = [None, "D", "B"] diff --git a/ci/code_checks.sh b/ci/code_checks.sh index bb7d8a388e6e2..e2dc543360a62 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -269,7 +269,7 @@ if [[ -z "$CHECK" || "$CHECK" == "doctests" ]]; then MSG='Doctests generic.py' ; echo $MSG pytest -q --doctest-modules pandas/core/generic.py \ - -k"-_set_axis_name -_xs -describe -droplevel -groupby -interpolate -pct_change -pipe -reindex -reindex_axis -to_json -transpose -values -xs -to_clipboard" + -k"-_set_axis_name -_xs -describe -groupby -interpolate -pct_change -pipe -reindex -reindex_axis -to_json -transpose -values -xs -to_clipboard" RET=$(($RET + $?)) ; echo $MSG "DONE" MSG='Doctests groupby.py' ; echo $MSG diff --git a/ci/setup_env.sh b/ci/setup_env.sh index e5bee09fe2f79..ae39b0dda5d09 100755 --- a/ci/setup_env.sh +++ b/ci/setup_env.sh @@ -50,7 +50,7 @@ echo echo "update conda" conda config --set ssl_verify false conda config --set quiet true --set always_yes true --set changeps1 false -conda install pip # create conda to create a historical artifact for pip & setuptools +conda install pip conda # create conda to create a historical artifact for pip & setuptools conda update -n base conda echo "conda info -a" diff --git a/doc/source/ecosystem.rst b/doc/source/ecosystem.rst index fb06ee122ae88..b7e53b84f0e02 100644 --- a/doc/source/ecosystem.rst +++ b/doc/source/ecosystem.rst @@ -56,6 +56,11 @@ joining paths, replacing file extensions, and checking if files exist are also a Statistics and machine learning ------------------------------- +`pandas-tfrecords <https://pypi.org/project/pandas-tfrecords/>`__ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Easy saving pandas dataframe to tensorflow tfrecords format and reading tfrecords to pandas. + `Statsmodels <https://www.statsmodels.org/>`__ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/getting_started/basics.rst b/doc/source/getting_started/basics.rst index 277080006cb3c..c6d9a48fcf8ed 100644 --- a/doc/source/getting_started/basics.rst +++ b/doc/source/getting_started/basics.rst @@ -689,6 +689,17 @@ of a 1D array of values. It can also be used as a function on regular arrays: s.value_counts() pd.value_counts(data) +.. versionadded:: 1.1.0 + +The :meth:`~DataFrame.value_counts` method can be used to count combinations across multiple columns. +By default all columns are used but a subset can be selected using the ``subset`` argument. + +.. ipython:: python + + data = {"a": [1, 2, 3, 4], "b": ["x", "x", "y", "y"]} + frame = pd.DataFrame(data) + frame.value_counts() + Similarly, you can get the most frequently occurring value(s) (the mode) of the values in a Series or DataFrame: .. ipython:: python diff --git a/doc/source/reference/frame.rst b/doc/source/reference/frame.rst index c7b1cc1c832be..b326bbb5a465e 100644 --- a/doc/source/reference/frame.rst +++ b/doc/source/reference/frame.rst @@ -170,6 +170,7 @@ Computations / descriptive stats DataFrame.std DataFrame.var DataFrame.nunique + DataFrame.value_counts Reindexing / selection / label manipulation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index a4c991dcc166c..888b7d23aeb35 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -43,7 +43,7 @@ Other enhancements - :class:`Styler` may now render CSS more efficiently where multiple cells have the same styling (:issue:`30876`) - When writing directly to a sqlite connection :func:`to_sql` now supports the ``multi`` method (:issue:`29921`) -- +- `OptionError` is now exposed in `pandas.errors` (:issue:`27553`) - .. --------------------------------------------------------------------------- @@ -55,6 +55,7 @@ Other API changes - :meth:`Series.describe` will now show distribution percentiles for ``datetime`` dtypes, statistics ``first`` and ``last`` will now be ``min`` and ``max`` to match with numeric dtypes in :meth:`DataFrame.describe` (:issue:`30164`) +- Added :meth:`DataFrame.value_counts` (:issue:`5377`) - :meth:`Groupby.groups` now returns an abbreviated representation when called on large dataframes (:issue:`1135`) - ``loc`` lookups with an object-dtype :class:`Index` and an integer key will now raise ``KeyError`` instead of ``TypeError`` when key is missing (:issue:`31905`) - @@ -114,6 +115,7 @@ Datetimelike - :meth:`DatetimeArray.searchsorted`, :meth:`TimedeltaArray.searchsorted`, :meth:`PeriodArray.searchsorted` not recognizing non-pandas scalars and incorrectly raising ``ValueError`` instead of ``TypeError`` (:issue:`30950`) - Bug in :class:`Timestamp` where constructing :class:`Timestamp` with dateutil timezone less than 128 nanoseconds before daylight saving time switch from winter to summer would result in nonexistent time (:issue:`31043`) - Bug in :meth:`Period.to_timestamp`, :meth:`Period.start_time` with microsecond frequency returning a timestamp one nanosecond earlier than the correct time (:issue:`31475`) +- :class:`Timestamp` raising confusing error message when year, month or day is missing (:issue:`31200`) Timedelta ^^^^^^^^^ diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index b8c462abe35f1..9f3b4a8a554b5 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -411,10 +411,25 @@ class Timestamp(_Timestamp): ) elif ts_input is _no_input: - # User passed keyword arguments. - ts_input = datetime(year, month, day, hour or 0, - minute or 0, second or 0, - microsecond or 0) + # GH 31200 + # When year, month or day is not given, we call the datetime + # constructor to make sure we get the same error message + # since Timestamp inherits datetime + datetime_kwargs = { + "hour": hour or 0, + "minute": minute or 0, + "second": second or 0, + "microsecond": microsecond or 0 + } + if year is not None: + datetime_kwargs["year"] = year + if month is not None: + datetime_kwargs["month"] = month + if day is not None: + datetime_kwargs["day"] = day + + ts_input = datetime(**datetime_kwargs) + elif is_integer_object(freq): # User passed positional arguments: # Timestamp(year, month, day[, hour[, minute[, second[, diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 854075eaa8d09..f637e16caa4c6 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -777,8 +777,10 @@ def searchsorted(self, value, side="left", sorter=None): if isinstance(value, str): try: value = self._scalar_from_string(value) - except ValueError: - raise TypeError("searchsorted requires compatible dtype or scalar") + except ValueError as e: + raise TypeError( + "searchsorted requires compatible dtype or scalar" + ) from e elif is_valid_nat_for_dtype(value, self.dtype): value = NaT @@ -1041,7 +1043,7 @@ def _validate_frequency(cls, index, freq, **kwargs): raise ValueError( f"Inferred frequency {inferred} from passed values " f"does not conform to passed frequency {freq.freqstr}" - ) + ) from e # monotonicity/uniqueness properties are called via frequencies.infer_freq, # see GH#23789 diff --git a/pandas/core/arrays/interval.py b/pandas/core/arrays/interval.py index b11736248c12a..f5167f470b056 100644 --- a/pandas/core/arrays/interval.py +++ b/pandas/core/arrays/interval.py @@ -725,45 +725,18 @@ def _concat_same_type(cls, to_concat): right = np.concatenate([interval.right for interval in to_concat]) return cls._simple_new(left, right, closed=closed, copy=False) - def _shallow_copy(self, left=None, right=None, closed=None): + def _shallow_copy(self, left, right): """ Return a new IntervalArray with the replacement attributes Parameters ---------- - left : array-like + left : Index Values to be used for the left-side of the intervals. - If None, the existing left and right values will be used. - - right : array-like + right : Index Values to be used for the right-side of the intervals. - If None and left is IntervalArray-like, the left and right - of the IntervalArray-like will be used. - - closed : {'left', 'right', 'both', 'neither'}, optional - Whether the intervals are closed on the left-side, right-side, both - or neither. If None, the existing closed will be used. """ - if left is None: - - # no values passed - left, right = self.left, self.right - - elif right is None: - - # only single value passed, could be an IntervalArray - # or array of Intervals - if not isinstance(left, (type(self), ABCIntervalIndex)): - left = type(self)(left) - - left, right = left.left, left.right - else: - - # both left and right are values - pass - - closed = closed or self.closed - return self._simple_new(left, right, closed=closed, verify_integrity=False) + return self._simple_new(left, right, closed=self.closed, verify_integrity=False) def copy(self): """ @@ -1035,7 +1008,9 @@ def set_closed(self, closed): msg = f"invalid option for 'closed': {closed}" raise ValueError(msg) - return self._shallow_copy(closed=closed) + return type(self)._simple_new( + left=self.left, right=self.right, closed=closed, verify_integrity=False + ) @property def length(self): diff --git a/pandas/core/base.py b/pandas/core/base.py index 56d3596f71813..85424e35fa0e0 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1196,6 +1196,7 @@ def value_counts( -------- Series.count: Number of non-NA elements in a Series. DataFrame.count: Number of non-NA elements in a DataFrame. + DataFrame.value_counts: Equivalent method on DataFrames. Examples -------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 7efb4fbb878d6..b6b6a4fe74ed5 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -111,7 +111,7 @@ from pandas.core.indexes import base as ibase from pandas.core.indexes.api import Index, ensure_index, ensure_index_from_sequences from pandas.core.indexes.datetimes import DatetimeIndex -from pandas.core.indexes.multi import maybe_droplevels +from pandas.core.indexes.multi import MultiIndex, maybe_droplevels from pandas.core.indexes.period import PeriodIndex from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable from pandas.core.internals import BlockManager @@ -4569,6 +4569,10 @@ def drop_duplicates( ------- DataFrame DataFrame with duplicates removed or None if ``inplace=True``. + + See Also + -------- + DataFrame.value_counts: Count unique combinations of columns. """ if self.empty: return self.copy() @@ -4814,6 +4818,102 @@ def sort_index( else: return self._constructor(new_data).__finalize__(self) + def value_counts( + self, + subset: Optional[Sequence[Label]] = None, + normalize: bool = False, + sort: bool = True, + ascending: bool = False, + ): + """ + Return a Series containing counts of unique rows in the DataFrame. + + .. versionadded:: 1.1.0 + + Parameters + ---------- + subset : list-like, optional + Columns to use when counting unique combinations. + normalize : bool, default False + Return proportions rather than frequencies. + sort : bool, default True + Sort by frequencies. + ascending : bool, default False + Sort in ascending order. + + Returns + ------- + Series + + See Also + -------- + Series.value_counts: Equivalent method on Series. + + Notes + ----- + The returned Series will have a MultiIndex with one level per input + column. By default, rows that contain any NA values are omitted from + the result. By default, the resulting Series will be in descending + order so that the first element is the most frequently-occurring row. + + Examples + -------- + >>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6], + ... 'num_wings': [2, 0, 0, 0]}, + ... index=['falcon', 'dog', 'cat', 'ant']) + >>> df + num_legs num_wings + falcon 2 2 + dog 4 0 + cat 4 0 + ant 6 0 + + >>> df.value_counts() + num_legs num_wings + 4 0 2 + 6 0 1 + 2 2 1 + dtype: int64 + + >>> df.value_counts(sort=False) + num_legs num_wings + 2 2 1 + 4 0 2 + 6 0 1 + dtype: int64 + + >>> df.value_counts(ascending=True) + num_legs num_wings + 2 2 1 + 6 0 1 + 4 0 2 + dtype: int64 + + >>> df.value_counts(normalize=True) + num_legs num_wings + 4 0 0.50 + 6 0 0.25 + 2 2 0.25 + dtype: float64 + """ + if subset is None: + subset = self.columns.tolist() + + counts = self.groupby(subset).size() + + if sort: + counts = counts.sort_values(ascending=ascending) + if normalize: + counts /= counts.sum() + + # Force MultiIndex for single column + if len(subset) == 1: + counts.index = MultiIndex.from_arrays( + [counts.index], names=[counts.index.name] + ) + + return counts + def nlargest(self, n, columns, keep="first") -> "DataFrame": """ Return the first `n` rows ordered by `columns` in descending order. @@ -8346,9 +8446,8 @@ def isin(self, values) -> "DataFrame": def _from_nested_dict(data): # TODO: this should be seriously cythonized - new_data = {} + new_data = collections.defaultdict(dict) for index, s in data.items(): for col, v in s.items(): - new_data[col] = new_data.get(col, {}) new_data[col][index] = v return new_data diff --git a/pandas/core/generic.py b/pandas/core/generic.py index a6ab0d4034ddb..ff7c481d550d4 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -602,6 +602,10 @@ def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries: of levels. axis : {0 or 'index', 1 or 'columns'}, default 0 + Axis along which the level(s) is removed: + + * 0 or 'index': remove level(s) in column. + * 1 or 'columns': remove level(s) in row. Returns ------- @@ -617,7 +621,7 @@ def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries: ... ]).set_index([0, 1]).rename_axis(['a', 'b']) >>> df.columns = pd.MultiIndex.from_tuples([ - ... ('c', 'e'), ('d', 'f') + ... ('c', 'e'), ('d', 'f') ... ], names=['level_1', 'level_2']) >>> df @@ -636,7 +640,7 @@ def droplevel(self: FrameOrSeries, level, axis=0) -> FrameOrSeries: 6 7 8 10 11 12 - >>> df.droplevel('level2', axis=1) + >>> df.droplevel('level_2', axis=1) level_1 c d a b 1 2 3 4 diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index aa22527d8c2d7..67f2f05c8af1e 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -8,7 +8,7 @@ from pandas._libs import algos as libalgos, index as libindex, lib import pandas._libs.join as libjoin -from pandas._libs.lib import is_datetime_array +from pandas._libs.lib import is_datetime_array, no_default from pandas._libs.tslibs import OutOfBoundsDatetime, Timestamp from pandas._libs.tslibs.period import IncompatibleFrequency from pandas._libs.tslibs.timezones import tz_compare @@ -485,7 +485,7 @@ def _get_attributes_dict(self): """ return {k: getattr(self, k, None) for k in self._attributes} - def _shallow_copy(self, values=None, **kwargs): + def _shallow_copy(self, values=None, name: Label = no_default): """ Create a new Index with the same class as the caller, don't copy the data, use the same object attributes with passed in attributes taking @@ -496,16 +496,14 @@ def _shallow_copy(self, values=None, **kwargs): Parameters ---------- values : the values to create the new Index, optional - kwargs : updates the default attributes for this Index + name : Label, defaults to self.name """ + name = self.name if name is no_default else name + if values is None: values = self.values - attributes = self._get_attributes_dict() - - attributes.update(kwargs) - - return self._simple_new(values, **attributes) + return self._simple_new(values, name=name) def _shallow_copy_with_infer(self, values, **kwargs): """ diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index caa6a9a93141f..67bed7bd77c7f 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -7,6 +7,8 @@ from pandas._libs import index as libindex from pandas._libs.hashtable import duplicated_int64 +from pandas._libs.lib import no_default +from pandas._typing import Label from pandas.util._decorators import Appender, cache_readonly from pandas.core.dtypes.common import ( @@ -17,7 +19,6 @@ is_scalar, ) from pandas.core.dtypes.dtypes import CategoricalDtype -from pandas.core.dtypes.generic import ABCCategorical, ABCSeries from pandas.core.dtypes.missing import isna from pandas.core import accessor @@ -193,7 +194,9 @@ def __new__( raise cls._scalar_data_error(data) data = [] - data = cls._create_categorical(data, dtype=dtype) + assert isinstance(dtype, CategoricalDtype), dtype + if not isinstance(data, Categorical) or data.dtype != dtype: + data = Categorical(data, dtype=dtype) data = data.copy() if copy else data @@ -223,37 +226,11 @@ def _create_from_codes(self, codes, dtype=None, name=None): return CategoricalIndex(cat, name=name) @classmethod - def _create_categorical(cls, data, dtype=None): - """ - *this is an internal non-public method* - - create the correct categorical from data and the properties - - Parameters - ---------- - data : data for new Categorical - dtype : CategoricalDtype, defaults to existing - - Returns - ------- - Categorical - """ - if isinstance(data, (cls, ABCSeries)) and is_categorical_dtype(data): - data = data.values - - if not isinstance(data, ABCCategorical): - return Categorical(data, dtype=dtype) - - if isinstance(dtype, CategoricalDtype) and dtype != data.dtype: - # we want to silently ignore dtype='category' - data = data._set_dtype(dtype) - return data - - @classmethod - def _simple_new(cls, values, name=None, dtype=None): + def _simple_new(cls, values: Categorical, name=None, dtype=None): + # GH#32204 dtype is included for compat with Index._simple_new + assert isinstance(values, Categorical), type(values) result = object.__new__(cls) - values = cls._create_categorical(values, dtype=dtype) result._data = values result.name = name @@ -264,13 +241,14 @@ def _simple_new(cls, values, name=None, dtype=None): # -------------------------------------------------------------------- @Appender(Index._shallow_copy.__doc__) - def _shallow_copy(self, values=None, **kwargs): + def _shallow_copy(self, values=None, name: Label = no_default): + name = self.name if name is no_default else name + if values is None: values = self.values cat = Categorical(values, dtype=self.dtype) - name = kwargs.get("name", self.name) return type(self)._simple_new(cat, name=name) def _is_dtype_compat(self, other) -> bool: @@ -295,7 +273,8 @@ def _is_dtype_compat(self, other) -> bool: values = other if not is_list_like(values): values = [values] - other = CategoricalIndex(self._create_categorical(other, dtype=self.dtype)) + cat = Categorical(other, dtype=self.dtype) + other = CategoricalIndex(cat) if not other.isin(values).all(): raise TypeError( "cannot append a non-category item to a CategoricalIndex" diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 1b3b6934aa53a..349b582de4358 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -8,6 +8,7 @@ from pandas._libs import NaT, iNaT, join as libjoin, lib from pandas._libs.tslibs import timezones +from pandas._typing import Label from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, cache_readonly @@ -551,14 +552,6 @@ def _summary(self, name=None) -> str: result = result.replace("'", "") return result - def _concat_same_dtype(self, to_concat, name): - """ - Concatenate to_concat which has the same class. - """ - new_data = type(self._data)._concat_same_type(to_concat) - - return self._simple_new(new_data, name=name) - def shift(self, periods=1, freq=None): """ Shift index by desired number of time frequency increments. @@ -649,7 +642,9 @@ def _set_freq(self, freq): self._data._freq = freq - def _shallow_copy(self, values=None, **kwargs): + def _shallow_copy(self, values=None, name: Label = lib.no_default): + name = self.name if name is lib.no_default else name + if values is None: values = self._data @@ -657,18 +652,16 @@ def _shallow_copy(self, values=None, **kwargs): values = values._data if isinstance(values, np.ndarray): # TODO: We would rather not get here - if kwargs.get("freq") is not None: - raise ValueError(kwargs) values = type(self._data)(values, dtype=self.dtype) attributes = self._get_attributes_dict() - if "freq" not in kwargs and self.freq is not None: + if self.freq is not None: if isinstance(values, (DatetimeArray, TimedeltaArray)): if values.freq is None: del attributes["freq"] - attributes.update(kwargs) + attributes["name"] = name return type(self)._simple_new(values, **attributes) # -------------------------------------------------------------------- @@ -738,9 +731,7 @@ def intersection(self, other, sort=False): # this point, depending on the values. result._set_freq(None) - result = self._shallow_copy( - result._data, name=result.name, dtype=result.dtype, freq=None - ) + result = self._shallow_copy(result._data, name=result.name) if result.freq is None: result._set_freq("infer") return result diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 6ea4250e4acf4..b3923a1298859 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -333,11 +333,12 @@ def from_tuples( # -------------------------------------------------------------------- @Appender(Index._shallow_copy.__doc__) - def _shallow_copy(self, left=None, right=None, **kwargs): - result = self._data._shallow_copy(left=left, right=right) + def _shallow_copy(self, values=None, **kwargs): + if values is None: + values = self._data attributes = self._get_attributes_dict() attributes.update(kwargs) - return self._simple_new(result, **attributes) + return self._simple_new(values, **attributes) @cache_readonly def _isnan(self): @@ -407,7 +408,7 @@ def astype(self, dtype, copy=True): with rewrite_exception("IntervalArray", type(self).__name__): new_values = self.values.astype(dtype, copy=copy) if is_interval_dtype(new_values): - return self._shallow_copy(new_values.left, new_values.right) + return self._shallow_copy(new_values) return Index.astype(self, dtype, copy=copy) @property @@ -881,7 +882,8 @@ def where(self, cond, other=None): if other is None: other = self._na_value values = np.where(cond, self.values, other) - return self._shallow_copy(values) + result = IntervalArray(values) + return self._shallow_copy(result) def delete(self, loc): """ @@ -893,7 +895,8 @@ def delete(self, loc): """ new_left = self.left.delete(loc) new_right = self.right.delete(loc) - return self._shallow_copy(new_left, new_right) + result = self._data._shallow_copy(new_left, new_right) + return self._shallow_copy(result) def insert(self, loc, item): """ @@ -927,7 +930,8 @@ def insert(self, loc, item): new_left = self.left.insert(loc, left_insert) new_right = self.right.insert(loc, right_insert) - return self._shallow_copy(new_left, new_right) + result = self._data._shallow_copy(new_left, new_right) + return self._shallow_copy(result) @Appender(_index_shared_docs["take"] % _index_doc_kwargs) def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): diff --git a/pandas/core/indexes/numeric.py b/pandas/core/indexes/numeric.py index 367870f0ee467..06a26cc90555e 100644 --- a/pandas/core/indexes/numeric.py +++ b/pandas/core/indexes/numeric.py @@ -3,7 +3,7 @@ import numpy as np from pandas._libs import index as libindex, lib -from pandas._typing import Dtype +from pandas._typing import Dtype, Label from pandas.util._decorators import Appender, cache_readonly from pandas.core.dtypes.cast import astype_nansafe @@ -103,7 +103,7 @@ def _maybe_cast_slice_bound(self, label, side, kind): return self._maybe_cast_indexer(label) @Appender(Index._shallow_copy.__doc__) - def _shallow_copy(self, values=None, name=lib.no_default): + def _shallow_copy(self, values=None, name: Label = lib.no_default): name = name if name is not lib.no_default else self.name if values is not None and not self._can_hold_na and values.dtype.kind == "f": diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 0b85433b699a8..c7c11c60185b3 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -5,9 +5,11 @@ import numpy as np from pandas._libs import index as libindex +from pandas._libs.lib import no_default from pandas._libs.tslibs import frequencies as libfrequencies, resolution from pandas._libs.tslibs.parsing import parse_time_string from pandas._libs.tslibs.period import Period +from pandas._typing import Label from pandas.util._decorators import Appender, cache_readonly from pandas.core.dtypes.common import ( @@ -248,8 +250,10 @@ def _has_complex_internals(self): # used to avoid libreduction code paths, which raise or require conversion return True - def _shallow_copy(self, values=None, **kwargs): + def _shallow_copy(self, values=None, name: Label = no_default): # TODO: simplify, figure out type of values + name = name if name is not no_default else self.name + if values is None: values = self._data @@ -263,18 +267,7 @@ def _shallow_copy(self, values=None, **kwargs): # GH#30713 this should never be reached raise TypeError(type(values), getattr(values, "dtype", None)) - # We don't allow changing `freq` in _shallow_copy. - validate_dtype_freq(self.dtype, kwargs.get("freq")) - attributes = self._get_attributes_dict() - - attributes.update(kwargs) - if not len(values) and "dtype" not in kwargs: - attributes["dtype"] = self.dtype - return self._simple_new(values, **attributes) - - def _shallow_copy_with_infer(self, values=None, **kwargs): - """ we always want to return a PeriodIndex """ - return self._shallow_copy(values=values, **kwargs) + return self._simple_new(values, name=name) def _maybe_convert_timedelta(self, other): """ diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py index d6752da6bc58f..fa8551bc646a6 100644 --- a/pandas/core/indexes/range.py +++ b/pandas/core/indexes/range.py @@ -7,6 +7,8 @@ import numpy as np from pandas._libs import index as libindex +from pandas._libs.lib import no_default +from pandas._typing import Label import pandas.compat as compat from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, cache_readonly @@ -385,13 +387,13 @@ def tolist(self): return list(self._range) @Appender(Int64Index._shallow_copy.__doc__) - def _shallow_copy(self, values=None, **kwargs): + def _shallow_copy(self, values=None, name: Label = no_default): + name = self.name if name is no_default else name + if values is None: - name = kwargs.get("name", self.name) return self._simple_new(self._range, name=name) else: - kwargs.setdefault("name", self.name) - return self._int64index._shallow_copy(values, **kwargs) + return Int64Index._simple_new(values, name=name) @Appender(Int64Index.copy.__doc__) def copy(self, name=None, deep=False, dtype=None, **kwargs): diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index d4f9c15a9f73f..329bfdf543c62 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -141,7 +141,7 @@ def __init__( if do_integrity_check: self._verify_integrity() - self._consolidate_check() + self._known_consolidated = False self._rebuild_blknos_and_blklocs() @@ -726,7 +726,6 @@ def get_slice(self, slobj: slice, axis: int = 0): new_axes[axis] = new_axes[axis][slobj] bm = type(self)(new_blocks, new_axes, do_integrity_check=False) - bm._consolidate_inplace() return bm def __contains__(self, item) -> bool: diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index ebe9a3d5bf472..29e69cc5fe509 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -4,6 +4,8 @@ Expose public exceptions & warnings """ +from pandas._config.config import OptionError + from pandas._libs.tslibs import NullFrequencyError, OutOfBoundsDatetime diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py index d5537359d6948..c6b4c4904735c 100644 --- a/pandas/tests/arrays/categorical/test_constructors.py +++ b/pandas/tests/arrays/categorical/test_constructors.py @@ -353,9 +353,9 @@ def test_constructor_from_index_series_period(self): result = Categorical(Series(idx)) tm.assert_index_equal(result.categories, idx) - def test_constructor_invariant(self): - # GH 14190 - vals = [ + @pytest.mark.parametrize( + "values", + [ np.array([1.0, 1.2, 1.8, np.nan]), np.array([1, 2, 3], dtype="int64"), ["a", "b", "c", np.nan], @@ -366,11 +366,13 @@ def test_constructor_invariant(self): Timestamp("2014-01-02", tz="US/Eastern"), NaT, ], - ] - for val in vals: - c = Categorical(val) - c2 = Categorical(c) - tm.assert_categorical_equal(c, c2) + ], + ) + def test_constructor_invariant(self, values): + # GH 14190 + c = Categorical(values) + c2 = Categorical(c) + tm.assert_categorical_equal(c, c2) @pytest.mark.parametrize("ordered", [True, False]) def test_constructor_with_dtype(self, ordered): @@ -470,9 +472,14 @@ def test_construction_with_null(self, klass, nulls_fixture): tm.assert_categorical_equal(result, expected) - def test_from_codes(self): + def test_from_codes_empty(self): + cat = ["a", "b", "c"] + result = Categorical.from_codes([], categories=cat) + expected = Categorical([], categories=cat) - # too few categories + tm.assert_categorical_equal(result, expected) + + def test_from_codes_too_few_categories(self): dtype = CategoricalDtype(categories=[1, 2]) msg = "codes need to be between " with pytest.raises(ValueError, match=msg): @@ -480,22 +487,23 @@ def test_from_codes(self): with pytest.raises(ValueError, match=msg): Categorical.from_codes([1, 2], dtype=dtype) - # no int codes + def test_from_codes_non_int_codes(self): + dtype = CategoricalDtype(categories=[1, 2]) msg = "codes need to be array-like integers" with pytest.raises(ValueError, match=msg): Categorical.from_codes(["a"], categories=dtype.categories) with pytest.raises(ValueError, match=msg): Categorical.from_codes(["a"], dtype=dtype) - # no unique categories + def test_from_codes_non_unique_categories(self): with pytest.raises(ValueError, match="Categorical categories must be unique"): Categorical.from_codes([0, 1, 2], categories=["a", "a", "b"]) - # NaN categories included + def test_from_codes_nan_cat_included(self): with pytest.raises(ValueError, match="Categorial categories cannot be null"): Categorical.from_codes([0, 1, 2], categories=["a", "b", np.nan]) - # too negative + def test_from_codes_too_negative(self): dtype = CategoricalDtype(categories=["a", "b", "c"]) msg = r"codes need to be between -1 and len\(categories\)-1" with pytest.raises(ValueError, match=msg): @@ -503,6 +511,8 @@ def test_from_codes(self): with pytest.raises(ValueError, match=msg): Categorical.from_codes([-2, 1, 2], dtype=dtype) + def test_from_codes(self): + dtype = CategoricalDtype(categories=["a", "b", "c"]) exp = Categorical(["a", "b", "c"], ordered=False) res = Categorical.from_codes([0, 1, 2], categories=dtype.categories) tm.assert_categorical_equal(exp, res) @@ -510,21 +520,18 @@ def test_from_codes(self): res = Categorical.from_codes([0, 1, 2], dtype=dtype) tm.assert_categorical_equal(exp, res) - def test_from_codes_with_categorical_categories(self): + @pytest.mark.parametrize("klass", [Categorical, CategoricalIndex]) + def test_from_codes_with_categorical_categories(self, klass): # GH17884 expected = Categorical(["a", "b"], categories=["a", "b", "c"]) - result = Categorical.from_codes([0, 1], categories=Categorical(["a", "b", "c"])) + result = Categorical.from_codes([0, 1], categories=klass(["a", "b", "c"])) tm.assert_categorical_equal(result, expected) - result = Categorical.from_codes( - [0, 1], categories=CategoricalIndex(["a", "b", "c"]) - ) - tm.assert_categorical_equal(result, expected) - - # non-unique Categorical still raises + @pytest.mark.parametrize("klass", [Categorical, CategoricalIndex]) + def test_from_codes_with_non_unique_categorical_categories(self, klass): with pytest.raises(ValueError, match="Categorical categories must be unique"): - Categorical.from_codes([0, 1], Categorical(["a", "b", "a"])) + Categorical.from_codes([0, 1], klass(["a", "b", "a"])) def test_from_codes_with_nan_code(self): # GH21767 @@ -535,24 +542,16 @@ def test_from_codes_with_nan_code(self): with pytest.raises(ValueError, match="codes need to be array-like integers"): Categorical.from_codes(codes, dtype=dtype) - def test_from_codes_with_float(self): + @pytest.mark.parametrize("codes", [[1.0, 2.0, 0], [1.1, 2.0, 0]]) + def test_from_codes_with_float(self, codes): # GH21767 - codes = [1.0, 2.0, 0] # integer, but in float dtype + # float codes should raise even if values are equal to integers dtype = CategoricalDtype(categories=["a", "b", "c"]) - # empty codes should not raise for floats - Categorical.from_codes([], dtype.categories) - - with pytest.raises(ValueError, match="codes need to be array-like integers"): - Categorical.from_codes(codes, dtype.categories) - - with pytest.raises(ValueError, match="codes need to be array-like integers"): - Categorical.from_codes(codes, dtype=dtype) - - codes = [1.1, 2.0, 0] # non-integer - with pytest.raises(ValueError, match="codes need to be array-like integers"): + msg = "codes need to be array-like integers" + with pytest.raises(ValueError, match=msg): Categorical.from_codes(codes, dtype.categories) - with pytest.raises(ValueError, match="codes need to be array-like integers"): + with pytest.raises(ValueError, match=msg): Categorical.from_codes(codes, dtype=dtype) def test_from_codes_with_dtype_raises(self): diff --git a/pandas/tests/base/test_ops.py b/pandas/tests/base/test_ops.py index 625d559001e72..f85d823cb2fac 100644 --- a/pandas/tests/base/test_ops.py +++ b/pandas/tests/base/test_ops.py @@ -277,6 +277,12 @@ def test_value_counts_unique_nunique_null(self, null_obj, index_or_series_obj): pytest.skip(f"values of {klass} cannot be changed") elif isinstance(orig, pd.MultiIndex): pytest.skip("MultiIndex doesn't support isna") + elif orig.duplicated().any(): + pytest.xfail( + "The test implementation isn't flexible enough to deal" + " with duplicated values. This isn't a bug in the" + " application code, but in the test code." + ) # special assign to the numpy array if is_datetime64tz_dtype(obj): diff --git a/pandas/tests/frame/conftest.py b/pandas/tests/frame/conftest.py index 774eb443c45fe..03598b6bb5eca 100644 --- a/pandas/tests/frame/conftest.py +++ b/pandas/tests/frame/conftest.py @@ -1,3 +1,5 @@ +from itertools import product + import numpy as np import pytest @@ -5,6 +7,11 @@ import pandas._testing as tm +@pytest.fixture(params=product([True, False], [True, False])) +def close_open_fixture(request): + return request.param + + @pytest.fixture def float_frame_with_na(): """ diff --git a/pandas/tests/frame/methods/test_asfreq.py b/pandas/tests/frame/methods/test_asfreq.py new file mode 100644 index 0000000000000..40b0ec0c0d811 --- /dev/null +++ b/pandas/tests/frame/methods/test_asfreq.py @@ -0,0 +1,58 @@ +from datetime import datetime + +import numpy as np + +from pandas import DataFrame, DatetimeIndex, Series, date_range +import pandas._testing as tm + +from pandas.tseries import offsets + + +class TestAsFreq: + def test_asfreq(self, datetime_frame): + offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd()) + rule_monthly = datetime_frame.asfreq("BM") + + tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"]) + + filled = rule_monthly.asfreq("B", method="pad") # noqa + # TODO: actually check that this worked. + + # don't forget! + filled_dep = rule_monthly.asfreq("B", method="pad") # noqa + + # test does not blow up on length-0 DataFrame + zero_length = datetime_frame.reindex([]) + result = zero_length.asfreq("BM") + assert result is not zero_length + + def test_asfreq_datetimeindex(self): + df = DataFrame( + {"A": [1, 2, 3]}, + index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)], + ) + df = df.asfreq("B") + assert isinstance(df.index, DatetimeIndex) + + ts = df["A"].asfreq("B") + assert isinstance(ts.index, DatetimeIndex) + + def test_asfreq_fillvalue(self): + # test for fill value during upsampling, related to issue 3715 + + # setup + rng = date_range("1/1/2016", periods=10, freq="2S") + ts = Series(np.arange(len(rng)), index=rng) + df = DataFrame({"one": ts}) + + # insert pre-existing missing value + df.loc["2016-01-01 00:00:08", "one"] = None + + actual_df = df.asfreq(freq="1S", fill_value=9.0) + expected_df = df.asfreq(freq="1S").fillna(9.0) + expected_df.loc["2016-01-01 00:00:08", "one"] = None + tm.assert_frame_equal(expected_df, actual_df) + + expected_series = ts.asfreq(freq="1S").fillna(9.0) + actual_series = ts.asfreq(freq="1S", fill_value=9.0) + tm.assert_series_equal(expected_series, actual_series) diff --git a/pandas/tests/frame/methods/test_at_time.py b/pandas/tests/frame/methods/test_at_time.py new file mode 100644 index 0000000000000..108bbbfa183c4 --- /dev/null +++ b/pandas/tests/frame/methods/test_at_time.py @@ -0,0 +1,86 @@ +from datetime import time + +import numpy as np +import pytest +import pytz + +from pandas import DataFrame, date_range +import pandas._testing as tm + + +class TestAtTime: + def test_at_time(self): + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame(np.random.randn(len(rng), 2), index=rng) + rs = ts.at_time(rng[1]) + assert (rs.index.hour == rng[1].hour).all() + assert (rs.index.minute == rng[1].minute).all() + assert (rs.index.second == rng[1].second).all() + + result = ts.at_time("9:30") + expected = ts.at_time(time(9, 30)) + tm.assert_frame_equal(result, expected) + + result = ts.loc[time(9, 30)] + expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)] + + tm.assert_frame_equal(result, expected) + + # midnight, everything + rng = date_range("1/1/2000", "1/31/2000") + ts = DataFrame(np.random.randn(len(rng), 3), index=rng) + + result = ts.at_time(time(0, 0)) + tm.assert_frame_equal(result, ts) + + # time doesn't exist + rng = date_range("1/1/2012", freq="23Min", periods=384) + ts = DataFrame(np.random.randn(len(rng), 2), rng) + rs = ts.at_time("16:00") + assert len(rs) == 0 + + @pytest.mark.parametrize( + "hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)] + ) + def test_at_time_errors(self, hour): + # GH#24043 + dti = date_range("2018", periods=3, freq="H") + df = DataFrame(list(range(len(dti))), index=dti) + if getattr(hour, "tzinfo", None) is None: + result = df.at_time(hour) + expected = df.iloc[1:2] + tm.assert_frame_equal(result, expected) + else: + with pytest.raises(ValueError, match="Index must be timezone"): + df.at_time(hour) + + def test_at_time_tz(self): + # GH#24043 + dti = date_range("2018", periods=3, freq="H", tz="US/Pacific") + df = DataFrame(list(range(len(dti))), index=dti) + result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern"))) + expected = df.iloc[1:2] + tm.assert_frame_equal(result, expected) + + def test_at_time_raises(self): + # GH#20725 + df = DataFrame([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(TypeError): # index is not a DatetimeIndex + df.at_time("00:00") + + @pytest.mark.parametrize("axis", ["index", "columns", 0, 1]) + def test_at_time_axis(self, axis): + # issue 8839 + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame(np.random.randn(len(rng), len(rng))) + ts.index, ts.columns = rng, rng + + indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)] + + if axis in ["index", 0]: + expected = ts.loc[indices, :] + elif axis in ["columns", 1]: + expected = ts.loc[:, indices] + + result = ts.at_time("9:30", axis=axis) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_between_time.py b/pandas/tests/frame/methods/test_between_time.py new file mode 100644 index 0000000000000..b40604b4f4a16 --- /dev/null +++ b/pandas/tests/frame/methods/test_between_time.py @@ -0,0 +1,110 @@ +from datetime import time + +import numpy as np +import pytest + +from pandas import DataFrame, date_range +import pandas._testing as tm + + +class TestBetweenTime: + def test_between_time(self, close_open_fixture): + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame(np.random.randn(len(rng), 2), index=rng) + stime = time(0, 0) + etime = time(1, 0) + inc_start, inc_end = close_open_fixture + + filtered = ts.between_time(stime, etime, inc_start, inc_end) + exp_len = 13 * 4 + 1 + if not inc_start: + exp_len -= 5 + if not inc_end: + exp_len -= 4 + + assert len(filtered) == exp_len + for rs in filtered.index: + t = rs.time() + if inc_start: + assert t >= stime + else: + assert t > stime + + if inc_end: + assert t <= etime + else: + assert t < etime + + result = ts.between_time("00:00", "01:00") + expected = ts.between_time(stime, etime) + tm.assert_frame_equal(result, expected) + + # across midnight + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame(np.random.randn(len(rng), 2), index=rng) + stime = time(22, 0) + etime = time(9, 0) + + filtered = ts.between_time(stime, etime, inc_start, inc_end) + exp_len = (12 * 11 + 1) * 4 + 1 + if not inc_start: + exp_len -= 4 + if not inc_end: + exp_len -= 4 + + assert len(filtered) == exp_len + for rs in filtered.index: + t = rs.time() + if inc_start: + assert (t >= stime) or (t <= etime) + else: + assert (t > stime) or (t <= etime) + + if inc_end: + assert (t <= etime) or (t >= stime) + else: + assert (t < etime) or (t >= stime) + + def test_between_time_raises(self): + # GH#20725 + df = DataFrame([[1, 2, 3], [4, 5, 6]]) + with pytest.raises(TypeError): # index is not a DatetimeIndex + df.between_time(start_time="00:00", end_time="12:00") + + def test_between_time_axis(self, axis): + # GH#8839 + rng = date_range("1/1/2000", periods=100, freq="10min") + ts = DataFrame(np.random.randn(len(rng), len(rng))) + stime, etime = ("08:00:00", "09:00:00") + exp_len = 7 + + if axis in ["index", 0]: + ts.index = rng + assert len(ts.between_time(stime, etime)) == exp_len + assert len(ts.between_time(stime, etime, axis=0)) == exp_len + + if axis in ["columns", 1]: + ts.columns = rng + selected = ts.between_time(stime, etime, axis=1).columns + assert len(selected) == exp_len + + def test_between_time_axis_raises(self, axis): + # issue 8839 + rng = date_range("1/1/2000", periods=100, freq="10min") + mask = np.arange(0, len(rng)) + rand_data = np.random.randn(len(rng), len(rng)) + ts = DataFrame(rand_data, index=rng, columns=rng) + stime, etime = ("08:00:00", "09:00:00") + + msg = "Index must be DatetimeIndex" + if axis in ["columns", 1]: + ts.index = mask + with pytest.raises(TypeError, match=msg): + ts.between_time(stime, etime) + with pytest.raises(TypeError, match=msg): + ts.between_time(stime, etime, axis=0) + + if axis in ["index", 0]: + ts.columns = mask + with pytest.raises(TypeError, match=msg): + ts.between_time(stime, etime, axis=1) diff --git a/pandas/tests/frame/methods/test_combine.py b/pandas/tests/frame/methods/test_combine.py new file mode 100644 index 0000000000000..bc6a67e4e1f32 --- /dev/null +++ b/pandas/tests/frame/methods/test_combine.py @@ -0,0 +1,47 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class TestCombine: + @pytest.mark.parametrize( + "data", + [ + pd.date_range("2000", periods=4), + pd.date_range("2000", periods=4, tz="US/Central"), + pd.period_range("2000", periods=4), + pd.timedelta_range(0, periods=4), + ], + ) + def test_combine_datetlike_udf(self, data): + # GH#23079 + df = pd.DataFrame({"A": data}) + other = df.copy() + df.iloc[1, 0] = None + + def combiner(a, b): + return b + + result = df.combine(other, combiner) + tm.assert_frame_equal(result, other) + + def test_combine_generic(self, float_frame): + df1 = float_frame + df2 = float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]] + + combined = df1.combine(df2, np.add) + combined2 = df2.combine(df1, np.add) + assert combined["D"].isna().all() + assert combined2["D"].isna().all() + + chunk = combined.loc[combined.index[:-5], ["A", "B", "C"]] + chunk2 = combined2.loc[combined2.index[:-5], ["A", "B", "C"]] + + exp = ( + float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]].reindex_like(chunk) + * 2 + ) + tm.assert_frame_equal(chunk, exp) + tm.assert_frame_equal(chunk2, exp) diff --git a/pandas/tests/frame/methods/test_rename.py b/pandas/tests/frame/methods/test_rename.py new file mode 100644 index 0000000000000..e69a562f8214d --- /dev/null +++ b/pandas/tests/frame/methods/test_rename.py @@ -0,0 +1,353 @@ +from collections import ChainMap + +import numpy as np +import pytest + +from pandas import DataFrame, Index, MultiIndex +import pandas._testing as tm + + +class TestRename: + def test_rename(self, float_frame): + mapping = {"A": "a", "B": "b", "C": "c", "D": "d"} + + renamed = float_frame.rename(columns=mapping) + renamed2 = float_frame.rename(columns=str.lower) + + tm.assert_frame_equal(renamed, renamed2) + tm.assert_frame_equal( + renamed2.rename(columns=str.upper), float_frame, check_names=False + ) + + # index + data = {"A": {"foo": 0, "bar": 1}} + + # gets sorted alphabetical + df = DataFrame(data) + renamed = df.rename(index={"foo": "bar", "bar": "foo"}) + tm.assert_index_equal(renamed.index, Index(["foo", "bar"])) + + renamed = df.rename(index=str.upper) + tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"])) + + # have to pass something + with pytest.raises(TypeError, match="must pass an index to rename"): + float_frame.rename() + + # partial columns + renamed = float_frame.rename(columns={"C": "foo", "D": "bar"}) + tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"])) + + # other axis + renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"}) + tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"])) + + # index with name + index = Index(["foo", "bar"], name="name") + renamer = DataFrame(data, index=index) + renamed = renamer.rename(index={"foo": "bar", "bar": "foo"}) + tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name")) + assert renamed.index.name == renamer.index.name + + @pytest.mark.parametrize( + "args,kwargs", + [ + ((ChainMap({"A": "a"}, {"B": "b"}),), dict(axis="columns")), + ((), dict(columns=ChainMap({"A": "a"}, {"B": "b"}))), + ], + ) + def test_rename_chainmap(self, args, kwargs): + # see gh-23859 + colAData = range(1, 11) + colBdata = np.random.randn(10) + + df = DataFrame({"A": colAData, "B": colBdata}) + result = df.rename(*args, **kwargs) + + expected = DataFrame({"a": colAData, "b": colBdata}) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "kwargs, rename_index, rename_columns", + [ + ({"mapper": None, "axis": 0}, True, False), + ({"mapper": None, "axis": 1}, False, True), + ({"index": None}, True, False), + ({"columns": None}, False, True), + ({"index": None, "columns": None}, True, True), + ({}, False, False), + ], + ) + def test_rename_axis_none(self, kwargs, rename_index, rename_columns): + # GH 25034 + index = Index(list("abc"), name="foo") + columns = Index(["col1", "col2"], name="bar") + data = np.arange(6).reshape(3, 2) + df = DataFrame(data, index, columns) + + result = df.rename_axis(**kwargs) + expected_index = index.rename(None) if rename_index else index + expected_columns = columns.rename(None) if rename_columns else columns + expected = DataFrame(data, expected_index, expected_columns) + tm.assert_frame_equal(result, expected) + + def test_rename_multiindex(self): + + tuples_index = [("foo1", "bar1"), ("foo2", "bar2")] + tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")] + index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"]) + columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"]) + df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns) + + # + # without specifying level -> across all levels + + renamed = df.rename( + index={"foo1": "foo3", "bar2": "bar3"}, + columns={"fizz1": "fizz3", "buzz2": "buzz3"}, + ) + new_index = MultiIndex.from_tuples( + [("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"] + ) + new_columns = MultiIndex.from_tuples( + [("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"] + ) + tm.assert_index_equal(renamed.index, new_index) + tm.assert_index_equal(renamed.columns, new_columns) + assert renamed.index.names == df.index.names + assert renamed.columns.names == df.columns.names + + # + # with specifying a level (GH13766) + + # dict + new_columns = MultiIndex.from_tuples( + [("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"] + ) + renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0) + tm.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz") + tm.assert_index_equal(renamed.columns, new_columns) + + new_columns = MultiIndex.from_tuples( + [("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"] + ) + renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1) + tm.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz") + tm.assert_index_equal(renamed.columns, new_columns) + + # function + func = str.upper + new_columns = MultiIndex.from_tuples( + [("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"] + ) + renamed = df.rename(columns=func, level=0) + tm.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns=func, level="fizz") + tm.assert_index_equal(renamed.columns, new_columns) + + new_columns = MultiIndex.from_tuples( + [("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"] + ) + renamed = df.rename(columns=func, level=1) + tm.assert_index_equal(renamed.columns, new_columns) + renamed = df.rename(columns=func, level="buzz") + tm.assert_index_equal(renamed.columns, new_columns) + + # index + new_index = MultiIndex.from_tuples( + [("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"] + ) + renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0) + tm.assert_index_equal(renamed.index, new_index) + + def test_rename_nocopy(self, float_frame): + renamed = float_frame.rename(columns={"C": "foo"}, copy=False) + renamed["foo"] = 1.0 + assert (float_frame["C"] == 1.0).all() + + def test_rename_inplace(self, float_frame): + float_frame.rename(columns={"C": "foo"}) + assert "C" in float_frame + assert "foo" not in float_frame + + c_id = id(float_frame["C"]) + float_frame = float_frame.copy() + float_frame.rename(columns={"C": "foo"}, inplace=True) + + assert "C" not in float_frame + assert "foo" in float_frame + assert id(float_frame["foo"]) != c_id + + def test_rename_bug(self): + # GH 5344 + # rename set ref_locs, and set_index was not resetting + df = DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}) + df = df.rename(columns={0: "a"}) + df = df.rename(columns={1: "b"}) + df = df.set_index(["a", "b"]) + df.columns = ["2001-01-01"] + expected = DataFrame( + [[1], [2]], + index=MultiIndex.from_tuples( + [("foo", "bah"), ("bar", "bas")], names=["a", "b"] + ), + columns=["2001-01-01"], + ) + tm.assert_frame_equal(df, expected) + + def test_rename_bug2(self): + # GH 19497 + # rename was changing Index to MultiIndex if Index contained tuples + + df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=["a"]) + df = df.rename({(1, 1): (5, 4)}, axis="index") + expected = DataFrame( + data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=["a"] + ) + tm.assert_frame_equal(df, expected) + + def test_rename_errors_raises(self): + df = DataFrame(columns=["A", "B", "C", "D"]) + with pytest.raises(KeyError, match="'E'] not found in axis"): + df.rename(columns={"A": "a", "E": "e"}, errors="raise") + + @pytest.mark.parametrize( + "mapper, errors, expected_columns", + [ + ({"A": "a", "E": "e"}, "ignore", ["a", "B", "C", "D"]), + ({"A": "a"}, "raise", ["a", "B", "C", "D"]), + (str.lower, "raise", ["a", "b", "c", "d"]), + ], + ) + def test_rename_errors(self, mapper, errors, expected_columns): + # GH 13473 + # rename now works with errors parameter + df = DataFrame(columns=["A", "B", "C", "D"]) + result = df.rename(columns=mapper, errors=errors) + expected = DataFrame(columns=expected_columns) + tm.assert_frame_equal(result, expected) + + def test_rename_objects(self, float_string_frame): + renamed = float_string_frame.rename(columns=str.upper) + + assert "FOO" in renamed + assert "foo" not in renamed + + def test_rename_axis_style(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["X", "Y"]) + expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"]) + + result = df.rename(str.lower, axis=1) + tm.assert_frame_equal(result, expected) + + result = df.rename(str.lower, axis="columns") + tm.assert_frame_equal(result, expected) + + result = df.rename({"A": "a", "B": "b"}, axis=1) + tm.assert_frame_equal(result, expected) + + result = df.rename({"A": "a", "B": "b"}, axis="columns") + tm.assert_frame_equal(result, expected) + + # Index + expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"]) + result = df.rename(str.lower, axis=0) + tm.assert_frame_equal(result, expected) + + result = df.rename(str.lower, axis="index") + tm.assert_frame_equal(result, expected) + + result = df.rename({"X": "x", "Y": "y"}, axis=0) + tm.assert_frame_equal(result, expected) + + result = df.rename({"X": "x", "Y": "y"}, axis="index") + tm.assert_frame_equal(result, expected) + + result = df.rename(mapper=str.lower, axis="index") + tm.assert_frame_equal(result, expected) + + def test_rename_mapper_multi(self): + df = DataFrame({"A": ["a", "b"], "B": ["c", "d"], "C": [1, 2]}).set_index( + ["A", "B"] + ) + result = df.rename(str.upper) + expected = df.rename(index=str.upper) + tm.assert_frame_equal(result, expected) + + def test_rename_positional_named(self): + # https://github.com/pandas-dev/pandas/issues/12392 + df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"]) + result = df.rename(index=str.lower, columns=str.upper) + expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"]) + tm.assert_frame_equal(result, expected) + + def test_rename_axis_style_raises(self): + # see gh-12392 + df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"]) + + # Named target and axis + over_spec_msg = "Cannot specify both 'axis' and any of 'index' or 'columns'" + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(index=str.lower, axis=1) + + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(index=str.lower, axis="columns") + + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(columns=str.lower, axis="columns") + + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(index=str.lower, axis=0) + + # Multiple targets and axis + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(str.lower, index=str.lower, axis="columns") + + # Too many targets + over_spec_msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'" + with pytest.raises(TypeError, match=over_spec_msg): + df.rename(str.lower, index=str.lower, columns=str.lower) + + # Duplicates + with pytest.raises(TypeError, match="multiple values"): + df.rename(id, mapper=id) + + def test_rename_positional_raises(self): + # GH 29136 + df = DataFrame(columns=["A", "B"]) + msg = r"rename\(\) takes from 1 to 2 positional arguments" + + with pytest.raises(TypeError, match=msg): + df.rename(None, str.lower) + + def test_rename_no_mappings_raises(self): + # GH 29136 + df = DataFrame([[1]]) + msg = "must pass an index to rename" + with pytest.raises(TypeError, match=msg): + df.rename() + + with pytest.raises(TypeError, match=msg): + df.rename(None, index=None) + + with pytest.raises(TypeError, match=msg): + df.rename(None, columns=None) + + with pytest.raises(TypeError, match=msg): + df.rename(None, columns=None, index=None) + + def test_rename_mapper_and_positional_arguments_raises(self): + # GH 29136 + df = DataFrame([[1]]) + msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'" + with pytest.raises(TypeError, match=msg): + df.rename({}, index={}) + + with pytest.raises(TypeError, match=msg): + df.rename({}, columns={}) + + with pytest.raises(TypeError, match=msg): + df.rename({}, columns={}, index={}) diff --git a/pandas/tests/frame/methods/test_reset_index.py b/pandas/tests/frame/methods/test_reset_index.py new file mode 100644 index 0000000000000..6586c19af2539 --- /dev/null +++ b/pandas/tests/frame/methods/test_reset_index.py @@ -0,0 +1,299 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + IntervalIndex, + MultiIndex, + RangeIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestResetIndex: + def test_reset_index_tz(self, tz_aware_fixture): + # GH 3950 + # reset_index with single level + tz = tz_aware_fixture + idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx") + df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx) + + expected = DataFrame( + { + "idx": [ + datetime(2011, 1, 1), + datetime(2011, 1, 2), + datetime(2011, 1, 3), + datetime(2011, 1, 4), + datetime(2011, 1, 5), + ], + "a": range(5), + "b": ["A", "B", "C", "D", "E"], + }, + columns=["idx", "a", "b"], + ) + expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz)) + tm.assert_frame_equal(df.reset_index(), expected) + + def test_reset_index_with_intervals(self): + idx = IntervalIndex.from_breaks(np.arange(11), name="x") + original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]] + + result = original.set_index("x") + expected = DataFrame({"y": np.arange(10)}, index=idx) + tm.assert_frame_equal(result, expected) + + result2 = result.reset_index() + tm.assert_frame_equal(result2, original) + + def test_reset_index(self, float_frame): + stacked = float_frame.stack()[::2] + stacked = DataFrame({"foo": stacked, "bar": stacked}) + + names = ["first", "second"] + stacked.index.names = names + deleveled = stacked.reset_index() + for i, (lev, level_codes) in enumerate( + zip(stacked.index.levels, stacked.index.codes) + ): + values = lev.take(level_codes) + name = names[i] + tm.assert_index_equal(values, Index(deleveled[name])) + + stacked.index.names = [None, None] + deleveled2 = stacked.reset_index() + tm.assert_series_equal( + deleveled["first"], deleveled2["level_0"], check_names=False + ) + tm.assert_series_equal( + deleveled["second"], deleveled2["level_1"], check_names=False + ) + + # default name assigned + rdf = float_frame.reset_index() + exp = Series(float_frame.index.values, name="index") + tm.assert_series_equal(rdf["index"], exp) + + # default name assigned, corner case + df = float_frame.copy() + df["index"] = "foo" + rdf = df.reset_index() + exp = Series(float_frame.index.values, name="level_0") + tm.assert_series_equal(rdf["level_0"], exp) + + # but this is ok + float_frame.index.name = "index" + deleveled = float_frame.reset_index() + tm.assert_series_equal(deleveled["index"], Series(float_frame.index)) + tm.assert_index_equal(deleveled.index, Index(np.arange(len(deleveled)))) + + # preserve column names + float_frame.columns.name = "columns" + resetted = float_frame.reset_index() + assert resetted.columns.name == "columns" + + # only remove certain columns + df = float_frame.reset_index().set_index(["index", "A", "B"]) + rs = df.reset_index(["A", "B"]) + + # TODO should reset_index check_names ? + tm.assert_frame_equal(rs, float_frame, check_names=False) + + rs = df.reset_index(["index", "A", "B"]) + tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False) + + rs = df.reset_index(["index", "A", "B"]) + tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False) + + rs = df.reset_index("A") + xp = float_frame.reset_index().set_index(["index", "B"]) + tm.assert_frame_equal(rs, xp, check_names=False) + + # test resetting in place + df = float_frame.copy() + resetted = float_frame.reset_index() + df.reset_index(inplace=True) + tm.assert_frame_equal(df, resetted, check_names=False) + + df = float_frame.reset_index().set_index(["index", "A", "B"]) + rs = df.reset_index("A", drop=True) + xp = float_frame.copy() + del xp["A"] + xp = xp.set_index(["B"], append=True) + tm.assert_frame_equal(rs, xp, check_names=False) + + def test_reset_index_name(self): + df = DataFrame( + [[1, 2, 3, 4], [5, 6, 7, 8]], + columns=["A", "B", "C", "D"], + index=Index(range(2), name="x"), + ) + assert df.reset_index().index.name is None + assert df.reset_index(drop=True).index.name is None + df.reset_index(inplace=True) + assert df.index.name is None + + def test_reset_index_level(self): + df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"]) + + for levels in ["A", "B"], [0, 1]: + # With MultiIndex + result = df.set_index(["A", "B"]).reset_index(level=levels[0]) + tm.assert_frame_equal(result, df.set_index("B")) + + result = df.set_index(["A", "B"]).reset_index(level=levels[:1]) + tm.assert_frame_equal(result, df.set_index("B")) + + result = df.set_index(["A", "B"]).reset_index(level=levels) + tm.assert_frame_equal(result, df) + + result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True) + tm.assert_frame_equal(result, df[["C", "D"]]) + + # With single-level Index (GH 16263) + result = df.set_index("A").reset_index(level=levels[0]) + tm.assert_frame_equal(result, df) + + result = df.set_index("A").reset_index(level=levels[:1]) + tm.assert_frame_equal(result, df) + + result = df.set_index(["A"]).reset_index(level=levels[0], drop=True) + tm.assert_frame_equal(result, df[["B", "C", "D"]]) + + # Missing levels - for both MultiIndex and single-level Index: + for idx_lev in ["A", "B"], ["A"]: + with pytest.raises(KeyError, match=r"(L|l)evel \(?E\)?"): + df.set_index(idx_lev).reset_index(level=["A", "E"]) + with pytest.raises(IndexError, match="Too many levels"): + df.set_index(idx_lev).reset_index(level=[0, 1, 2]) + + def test_reset_index_right_dtype(self): + time = np.arange(0.0, 10, np.sqrt(2) / 2) + s1 = Series( + (9.81 * time ** 2) / 2, index=Index(time, name="time"), name="speed" + ) + df = DataFrame(s1) + + resetted = s1.reset_index() + assert resetted["time"].dtype == np.float64 + + resetted = df.reset_index() + assert resetted["time"].dtype == np.float64 + + def test_reset_index_multiindex_col(self): + vals = np.random.randn(3, 3).astype(object) + idx = ["x", "y", "z"] + full = np.hstack(([[x] for x in idx], vals)) + df = DataFrame( + vals, + Index(idx, name="a"), + columns=[["b", "b", "c"], ["mean", "median", "mean"]], + ) + rs = df.reset_index() + xp = DataFrame( + full, columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]] + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index(col_fill=None) + xp = DataFrame( + full, columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]] + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index(col_level=1, col_fill="blah") + xp = DataFrame( + full, columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]] + ) + tm.assert_frame_equal(rs, xp) + + df = DataFrame( + vals, + MultiIndex.from_arrays([[0, 1, 2], ["x", "y", "z"]], names=["d", "a"]), + columns=[["b", "b", "c"], ["mean", "median", "mean"]], + ) + rs = df.reset_index("a") + xp = DataFrame( + full, + Index([0, 1, 2], name="d"), + columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]], + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index("a", col_fill=None) + xp = DataFrame( + full, + Index(range(3), name="d"), + columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]], + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index("a", col_fill="blah", col_level=1) + xp = DataFrame( + full, + Index(range(3), name="d"), + columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]], + ) + tm.assert_frame_equal(rs, xp) + + def test_reset_index_multiindex_nan(self): + # GH#6322, testing reset_index on MultiIndexes + # when we have a nan or all nan + df = DataFrame( + {"A": ["a", "b", "c"], "B": [0, 1, np.nan], "C": np.random.rand(3)} + ) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + df = DataFrame( + {"A": [np.nan, "b", "c"], "B": [0, 1, 2], "C": np.random.rand(3)} + ) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + df = DataFrame({"A": ["a", "b", "c"], "B": [0, 1, 2], "C": [np.nan, 1.1, 2.2]}) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + df = DataFrame( + { + "A": ["a", "b", "c"], + "B": [np.nan, np.nan, np.nan], + "C": np.random.rand(3), + } + ) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + def test_reset_index_with_datetimeindex_cols(self): + # GH#5818 + df = DataFrame( + [[1, 2], [3, 4]], + columns=date_range("1/1/2013", "1/2/2013"), + index=["A", "B"], + ) + + result = df.reset_index() + expected = DataFrame( + [["A", 1, 2], ["B", 3, 4]], + columns=["index", datetime(2013, 1, 1), datetime(2013, 1, 2)], + ) + tm.assert_frame_equal(result, expected) + + def test_reset_index_range(self): + # GH#12071 + df = DataFrame([[0, 0], [1, 1]], columns=["A", "B"], index=RangeIndex(stop=2)) + result = df.reset_index() + assert isinstance(result.index, RangeIndex) + expected = DataFrame( + [[0, 0, 0], [1, 1, 1]], + columns=["index", "A", "B"], + index=RangeIndex(stop=2), + ) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/frame/methods/test_select_dtypes.py b/pandas/tests/frame/methods/test_select_dtypes.py new file mode 100644 index 0000000000000..fe7baebcf0cf7 --- /dev/null +++ b/pandas/tests/frame/methods/test_select_dtypes.py @@ -0,0 +1,329 @@ +from collections import OrderedDict + +import numpy as np +import pytest + +import pandas as pd +from pandas import DataFrame, Timestamp +import pandas._testing as tm + + +class TestSelectDtypes: + def test_select_dtypes_include_using_list_like(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(include=[np.number]) + ei = df[["b", "c", "d", "k"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=[np.number], exclude=["timedelta"]) + ei = df[["b", "c", "d"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"]) + ei = df[["b", "c", "d", "f"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=["datetime"]) + ei = df[["g"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=["datetime64"]) + ei = df[["g"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=["datetimetz"]) + ei = df[["h", "i"]] + tm.assert_frame_equal(ri, ei) + + with pytest.raises(NotImplementedError, match=r"^$"): + df.select_dtypes(include=["period"]) + + def test_select_dtypes_exclude_using_list_like(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + } + ) + re = df.select_dtypes(exclude=[np.number]) + ee = df[["a", "e"]] + tm.assert_frame_equal(re, ee) + + def test_select_dtypes_exclude_include_using_list_like(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + exclude = (np.datetime64,) + include = np.bool_, "integer" + r = df.select_dtypes(include=include, exclude=exclude) + e = df[["b", "c", "e"]] + tm.assert_frame_equal(r, e) + + exclude = ("datetime",) + include = "bool", "int64", "int32" + r = df.select_dtypes(include=include, exclude=exclude) + e = df[["b", "e"]] + tm.assert_frame_equal(r, e) + + def test_select_dtypes_include_using_scalars(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(include=np.number) + ei = df[["b", "c", "d", "k"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include="datetime") + ei = df[["g"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include="datetime64") + ei = df[["g"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include="category") + ei = df[["f"]] + tm.assert_frame_equal(ri, ei) + + with pytest.raises(NotImplementedError, match=r"^$"): + df.select_dtypes(include="period") + + def test_select_dtypes_exclude_using_scalars(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(exclude=np.number) + ei = df[["a", "e", "f", "g", "h", "i", "j"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(exclude="category") + ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]] + tm.assert_frame_equal(ri, ei) + + with pytest.raises(NotImplementedError, match=r"^$"): + df.select_dtypes(exclude="period") + + def test_select_dtypes_include_exclude_using_scalars(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(include=np.number, exclude="floating") + ei = df[["b", "c", "k"]] + tm.assert_frame_equal(ri, ei) + + def test_select_dtypes_include_exclude_mixed_scalars_lists(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.Categorical(list("abc")), + "g": pd.date_range("20130101", periods=3), + "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), + "i": pd.date_range("20130101", periods=3, tz="CET"), + "j": pd.period_range("2013-01", periods=3, freq="M"), + "k": pd.timedelta_range("1 day", periods=3), + } + ) + + ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"]) + ei = df[["b", "c"]] + tm.assert_frame_equal(ri, ei) + + ri = df.select_dtypes(include=[np.number, "category"], exclude="floating") + ei = df[["b", "c", "f", "k"]] + tm.assert_frame_equal(ri, ei) + + def test_select_dtypes_duplicate_columns(self): + # GH20839 + odict = OrderedDict + df = DataFrame( + odict( + [ + ("a", list("abc")), + ("b", list(range(1, 4))), + ("c", np.arange(3, 6).astype("u1")), + ("d", np.arange(4.0, 7.0, dtype="float64")), + ("e", [True, False, True]), + ("f", pd.date_range("now", periods=3).values), + ] + ) + ) + df.columns = ["a", "a", "b", "b", "b", "c"] + + expected = DataFrame( + {"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")} + ) + + result = df.select_dtypes(include=[np.number], exclude=["floating"]) + tm.assert_frame_equal(result, expected) + + def test_select_dtypes_not_an_attr_but_still_valid_dtype(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + df["g"] = df.f.diff() + assert not hasattr(np, "u8") + r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"]) + e = df[["a", "b"]] + tm.assert_frame_equal(r, e) + + r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"]) + e = df[["a", "b", "g"]] + tm.assert_frame_equal(r, e) + + def test_select_dtypes_empty(self): + df = DataFrame({"a": list("abc"), "b": list(range(1, 4))}) + msg = "at least one of include or exclude must be nonempty" + with pytest.raises(ValueError, match=msg): + df.select_dtypes() + + def test_select_dtypes_bad_datetime64(self): + df = DataFrame( + { + "a": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + with pytest.raises(ValueError, match=".+ is too specific"): + df.select_dtypes(include=["datetime64[D]"]) + + with pytest.raises(ValueError, match=".+ is too specific"): + df.select_dtypes(exclude=["datetime64[as]"]) + + def test_select_dtypes_datetime_with_tz(self): + + df2 = DataFrame( + dict( + A=Timestamp("20130102", tz="US/Eastern"), + B=Timestamp("20130603", tz="CET"), + ), + index=range(5), + ) + df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1) + result = df3.select_dtypes(include=["datetime64[ns]"]) + expected = df3.reindex(columns=[]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"] + ) + @pytest.mark.parametrize("arg", ["include", "exclude"]) + def test_select_dtypes_str_raises(self, dtype, arg): + df = DataFrame( + { + "a": list("abc"), + "g": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + msg = "string dtypes are not allowed" + kwargs = {arg: [dtype]} + + with pytest.raises(TypeError, match=msg): + df.select_dtypes(**kwargs) + + def test_select_dtypes_bad_arg_raises(self): + df = DataFrame( + { + "a": list("abc"), + "g": list("abc"), + "b": list(range(1, 4)), + "c": np.arange(3, 6).astype("u1"), + "d": np.arange(4.0, 7.0, dtype="float64"), + "e": [True, False, True], + "f": pd.date_range("now", periods=3).values, + } + ) + + msg = "data type.*not understood" + with pytest.raises(TypeError, match=msg): + df.select_dtypes(["blargy, blarg, blarg"]) + + def test_select_dtypes_typecodes(self): + # GH 11990 + df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random()) + expected = df + FLOAT_TYPES = list(np.typecodes["AllFloat"]) + tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected) diff --git a/pandas/tests/frame/methods/test_to_period.py b/pandas/tests/frame/methods/test_to_period.py new file mode 100644 index 0000000000000..eac78e611b008 --- /dev/null +++ b/pandas/tests/frame/methods/test_to_period.py @@ -0,0 +1,36 @@ +import numpy as np +import pytest + +from pandas import DataFrame, date_range, period_range +import pandas._testing as tm + + +class TestToPeriod: + def test_frame_to_period(self): + K = 5 + + dr = date_range("1/1/2000", "1/1/2001") + pr = period_range("1/1/2000", "1/1/2001") + df = DataFrame(np.random.randn(len(dr), K), index=dr) + df["mix"] = "a" + + pts = df.to_period() + exp = df.copy() + exp.index = pr + tm.assert_frame_equal(pts, exp) + + pts = df.to_period("M") + tm.assert_index_equal(pts.index, exp.index.asfreq("M")) + + df = df.T + pts = df.to_period(axis=1) + exp = df.copy() + exp.columns = pr + tm.assert_frame_equal(pts, exp) + + pts = df.to_period("M", axis=1) + tm.assert_index_equal(pts.columns, exp.columns.asfreq("M")) + + msg = "No axis named 2 for object type <class 'pandas.core.frame.DataFrame'>" + with pytest.raises(ValueError, match=msg): + df.to_period(axis=2) diff --git a/pandas/tests/frame/methods/test_to_timestamp.py b/pandas/tests/frame/methods/test_to_timestamp.py new file mode 100644 index 0000000000000..ae7d2827e05a6 --- /dev/null +++ b/pandas/tests/frame/methods/test_to_timestamp.py @@ -0,0 +1,103 @@ +from datetime import timedelta + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + DatetimeIndex, + Timedelta, + date_range, + period_range, + to_datetime, +) +import pandas._testing as tm + + +class TestToTimestamp: + def test_frame_to_time_stamp(self): + K = 5 + index = period_range(freq="A", start="1/1/2001", end="12/1/2009") + df = DataFrame(np.random.randn(len(index), K), index=index) + df["mix"] = "a" + + exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC") + exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") + result = df.to_timestamp("D", "end") + tm.assert_index_equal(result.index, exp_index) + tm.assert_numpy_array_equal(result.values, df.values) + + exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN") + result = df.to_timestamp("D", "start") + tm.assert_index_equal(result.index, exp_index) + + def _get_with_delta(delta, freq="A-DEC"): + return date_range( + to_datetime("1/1/2001") + delta, + to_datetime("12/31/2009") + delta, + freq=freq, + ) + + delta = timedelta(hours=23) + result = df.to_timestamp("H", "end") + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + + delta = timedelta(hours=23, minutes=59) + result = df.to_timestamp("T", "end") + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + + result = df.to_timestamp("S", "end") + delta = timedelta(hours=23, minutes=59, seconds=59) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") + tm.assert_index_equal(result.index, exp_index) + + # columns + df = df.T + + exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC") + exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") + result = df.to_timestamp("D", "end", axis=1) + tm.assert_index_equal(result.columns, exp_index) + tm.assert_numpy_array_equal(result.values, df.values) + + exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN") + result = df.to_timestamp("D", "start", axis=1) + tm.assert_index_equal(result.columns, exp_index) + + delta = timedelta(hours=23) + result = df.to_timestamp("H", "end", axis=1) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns") + tm.assert_index_equal(result.columns, exp_index) + + delta = timedelta(hours=23, minutes=59) + result = df.to_timestamp("T", "end", axis=1) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns") + tm.assert_index_equal(result.columns, exp_index) + + result = df.to_timestamp("S", "end", axis=1) + delta = timedelta(hours=23, minutes=59, seconds=59) + exp_index = _get_with_delta(delta) + exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") + tm.assert_index_equal(result.columns, exp_index) + + # invalid axis + with pytest.raises(ValueError, match="axis"): + df.to_timestamp(axis=2) + + result1 = df.to_timestamp("5t", axis=1) + result2 = df.to_timestamp("t", axis=1) + expected = date_range("2001-01-01", "2009-01-01", freq="AS") + assert isinstance(result1.columns, DatetimeIndex) + assert isinstance(result2.columns, DatetimeIndex) + tm.assert_numpy_array_equal(result1.columns.asi8, expected.asi8) + tm.assert_numpy_array_equal(result2.columns.asi8, expected.asi8) + # PeriodIndex.to_timestamp always use 'infer' + assert result1.columns.freqstr == "AS-JAN" + assert result2.columns.freqstr == "AS-JAN" diff --git a/pandas/tests/frame/methods/test_tz_convert.py b/pandas/tests/frame/methods/test_tz_convert.py new file mode 100644 index 0000000000000..ea8c4b88538d4 --- /dev/null +++ b/pandas/tests/frame/methods/test_tz_convert.py @@ -0,0 +1,84 @@ +import numpy as np +import pytest + +from pandas import DataFrame, Index, MultiIndex, date_range +import pandas._testing as tm + + +class TestTZConvert: + def test_frame_tz_convert(self): + rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") + + df = DataFrame({"a": 1}, index=rng) + result = df.tz_convert("Europe/Berlin") + expected = DataFrame({"a": 1}, rng.tz_convert("Europe/Berlin")) + assert result.index.tz.zone == "Europe/Berlin" + tm.assert_frame_equal(result, expected) + + df = df.T + result = df.tz_convert("Europe/Berlin", axis=1) + assert result.columns.tz.zone == "Europe/Berlin" + tm.assert_frame_equal(result, expected.T) + + @pytest.mark.parametrize("fn", ["tz_localize", "tz_convert"]) + def test_tz_convert_and_localize(self, fn): + l0 = date_range("20140701", periods=5, freq="D") + l1 = date_range("20140701", periods=5, freq="D") + + int_idx = Index(range(5)) + + if fn == "tz_convert": + l0 = l0.tz_localize("UTC") + l1 = l1.tz_localize("UTC") + + for idx in [l0, l1]: + + l0_expected = getattr(idx, fn)("US/Pacific") + l1_expected = getattr(idx, fn)("US/Pacific") + + df1 = DataFrame(np.ones(5), index=l0) + df1 = getattr(df1, fn)("US/Pacific") + tm.assert_index_equal(df1.index, l0_expected) + + # MultiIndex + # GH7846 + df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1])) + + df3 = getattr(df2, fn)("US/Pacific", level=0) + assert not df3.index.levels[0].equals(l0) + tm.assert_index_equal(df3.index.levels[0], l0_expected) + tm.assert_index_equal(df3.index.levels[1], l1) + assert not df3.index.levels[1].equals(l1_expected) + + df3 = getattr(df2, fn)("US/Pacific", level=1) + tm.assert_index_equal(df3.index.levels[0], l0) + assert not df3.index.levels[0].equals(l0_expected) + tm.assert_index_equal(df3.index.levels[1], l1_expected) + assert not df3.index.levels[1].equals(l1) + + df4 = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) + + # TODO: untested + df5 = getattr(df4, fn)("US/Pacific", level=1) # noqa + + tm.assert_index_equal(df3.index.levels[0], l0) + assert not df3.index.levels[0].equals(l0_expected) + tm.assert_index_equal(df3.index.levels[1], l1_expected) + assert not df3.index.levels[1].equals(l1) + + # Bad Inputs + + # Not DatetimeIndex / PeriodIndex + with pytest.raises(TypeError, match="DatetimeIndex"): + df = DataFrame(index=int_idx) + df = getattr(df, fn)("US/Pacific") + + # Not DatetimeIndex / PeriodIndex + with pytest.raises(TypeError, match="DatetimeIndex"): + df = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) + df = getattr(df, fn)("US/Pacific", level=0) + + # Invalid level + with pytest.raises(ValueError, match="not valid"): + df = DataFrame(index=l0) + df = getattr(df, fn)("US/Pacific", level=1) diff --git a/pandas/tests/frame/methods/test_tz_localize.py b/pandas/tests/frame/methods/test_tz_localize.py new file mode 100644 index 0000000000000..1d4e26a6999b7 --- /dev/null +++ b/pandas/tests/frame/methods/test_tz_localize.py @@ -0,0 +1,21 @@ +from pandas import DataFrame, date_range +import pandas._testing as tm + + +class TestTZLocalize: + # See also: + # test_tz_convert_and_localize in test_tz_convert + + def test_frame_tz_localize(self): + rng = date_range("1/1/2011", periods=100, freq="H") + + df = DataFrame({"a": 1}, index=rng) + result = df.tz_localize("utc") + expected = DataFrame({"a": 1}, rng.tz_localize("UTC")) + assert result.index.tz.zone == "UTC" + tm.assert_frame_equal(result, expected) + + df = df.T + result = df.tz_localize("utc", axis=1) + assert result.columns.tz.zone == "UTC" + tm.assert_frame_equal(result, expected.T) diff --git a/pandas/tests/frame/methods/test_value_counts.py b/pandas/tests/frame/methods/test_value_counts.py new file mode 100644 index 0000000000000..c409b0bbe6fa9 --- /dev/null +++ b/pandas/tests/frame/methods/test_value_counts.py @@ -0,0 +1,102 @@ +import numpy as np + +import pandas as pd +import pandas._testing as tm + + +def test_data_frame_value_counts_unsorted(): + df = pd.DataFrame( + {"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]}, + index=["falcon", "dog", "cat", "ant"], + ) + + result = df.value_counts(sort=False) + expected = pd.Series( + data=[1, 2, 1], + index=pd.MultiIndex.from_arrays( + [(2, 4, 6), (2, 0, 0)], names=["num_legs", "num_wings"] + ), + ) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_ascending(): + df = pd.DataFrame( + {"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]}, + index=["falcon", "dog", "cat", "ant"], + ) + + result = df.value_counts(ascending=True) + expected = pd.Series( + data=[1, 1, 2], + index=pd.MultiIndex.from_arrays( + [(2, 6, 4), (2, 0, 0)], names=["num_legs", "num_wings"] + ), + ) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_default(): + df = pd.DataFrame( + {"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]}, + index=["falcon", "dog", "cat", "ant"], + ) + + result = df.value_counts() + expected = pd.Series( + data=[2, 1, 1], + index=pd.MultiIndex.from_arrays( + [(4, 6, 2), (0, 0, 2)], names=["num_legs", "num_wings"] + ), + ) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_normalize(): + df = pd.DataFrame( + {"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]}, + index=["falcon", "dog", "cat", "ant"], + ) + + result = df.value_counts(normalize=True) + expected = pd.Series( + data=[0.5, 0.25, 0.25], + index=pd.MultiIndex.from_arrays( + [(4, 6, 2), (0, 0, 2)], names=["num_legs", "num_wings"] + ), + ) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_single_col_default(): + df = pd.DataFrame({"num_legs": [2, 4, 4, 6]}) + + result = df.value_counts() + expected = pd.Series( + data=[2, 1, 1], + index=pd.MultiIndex.from_arrays([[4, 6, 2]], names=["num_legs"]), + ) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_empty(): + df_no_cols = pd.DataFrame() + + result = df_no_cols.value_counts() + expected = pd.Series([], dtype=np.int64) + + tm.assert_series_equal(result, expected) + + +def test_data_frame_value_counts_empty_normalize(): + df_no_cols = pd.DataFrame() + + result = df_no_cols.value_counts(normalize=True) + expected = pd.Series([], dtype=np.float64) + + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index 0c19a38bb5fa2..751ed1dfdd847 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -1,4 +1,3 @@ -from collections import ChainMap from datetime import datetime, timedelta import inspect @@ -18,7 +17,6 @@ Index, IntervalIndex, MultiIndex, - RangeIndex, Series, Timestamp, cut, @@ -533,30 +531,6 @@ def test_convert_dti_to_series(self): df.pop("ts") tm.assert_frame_equal(df, expected) - def test_reset_index_tz(self, tz_aware_fixture): - # GH 3950 - # reset_index with single level - tz = tz_aware_fixture - idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx") - df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx) - - expected = DataFrame( - { - "idx": [ - datetime(2011, 1, 1), - datetime(2011, 1, 2), - datetime(2011, 1, 3), - datetime(2011, 1, 4), - datetime(2011, 1, 5), - ], - "a": range(5), - "b": ["A", "B", "C", "D", "E"], - }, - columns=["idx", "a", "b"], - ) - expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz)) - tm.assert_frame_equal(df.reset_index(), expected) - def test_set_index_timezone(self): # GH 12358 # tz-aware Series should retain the tz @@ -583,17 +557,6 @@ def test_set_index_dst(self): exp = DataFrame({"b": [3, 4, 5]}, index=exp_index) tm.assert_frame_equal(res, exp) - def test_reset_index_with_intervals(self): - idx = IntervalIndex.from_breaks(np.arange(11), name="x") - original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]] - - result = original.set_index("x") - expected = DataFrame({"y": np.arange(10)}, index=idx) - tm.assert_frame_equal(result, expected) - - result2 = result.reset_index() - tm.assert_frame_equal(result2, original) - def test_set_index_multiindexcolumns(self): columns = MultiIndex.from_tuples([("foo", 1), ("foo", 2), ("bar", 1)]) df = DataFrame(np.random.randn(3, 3), columns=columns) @@ -652,65 +615,6 @@ def test_dti_set_index_reindex(self): # Renaming - def test_rename(self, float_frame): - mapping = {"A": "a", "B": "b", "C": "c", "D": "d"} - - renamed = float_frame.rename(columns=mapping) - renamed2 = float_frame.rename(columns=str.lower) - - tm.assert_frame_equal(renamed, renamed2) - tm.assert_frame_equal( - renamed2.rename(columns=str.upper), float_frame, check_names=False - ) - - # index - data = {"A": {"foo": 0, "bar": 1}} - - # gets sorted alphabetical - df = DataFrame(data) - renamed = df.rename(index={"foo": "bar", "bar": "foo"}) - tm.assert_index_equal(renamed.index, Index(["foo", "bar"])) - - renamed = df.rename(index=str.upper) - tm.assert_index_equal(renamed.index, Index(["BAR", "FOO"])) - - # have to pass something - with pytest.raises(TypeError, match="must pass an index to rename"): - float_frame.rename() - - # partial columns - renamed = float_frame.rename(columns={"C": "foo", "D": "bar"}) - tm.assert_index_equal(renamed.columns, Index(["A", "B", "foo", "bar"])) - - # other axis - renamed = float_frame.T.rename(index={"C": "foo", "D": "bar"}) - tm.assert_index_equal(renamed.index, Index(["A", "B", "foo", "bar"])) - - # index with name - index = Index(["foo", "bar"], name="name") - renamer = DataFrame(data, index=index) - renamed = renamer.rename(index={"foo": "bar", "bar": "foo"}) - tm.assert_index_equal(renamed.index, Index(["bar", "foo"], name="name")) - assert renamed.index.name == renamer.index.name - - @pytest.mark.parametrize( - "args,kwargs", - [ - ((ChainMap({"A": "a"}, {"B": "b"}),), dict(axis="columns")), - ((), dict(columns=ChainMap({"A": "a"}, {"B": "b"}))), - ], - ) - def test_rename_chainmap(self, args, kwargs): - # see gh-23859 - colAData = range(1, 11) - colBdata = np.random.randn(10) - - df = DataFrame({"A": colAData, "B": colBdata}) - result = df.rename(*args, **kwargs) - - expected = DataFrame({"a": colAData, "b": colBdata}) - tm.assert_frame_equal(result, expected) - def test_rename_axis_inplace(self, float_frame): # GH 15704 expected = float_frame.rename_axis("foo") @@ -785,168 +689,6 @@ def test_rename_axis_mapper(self): with pytest.raises(TypeError, match="bogus"): df.rename_axis(bogus=None) - @pytest.mark.parametrize( - "kwargs, rename_index, rename_columns", - [ - ({"mapper": None, "axis": 0}, True, False), - ({"mapper": None, "axis": 1}, False, True), - ({"index": None}, True, False), - ({"columns": None}, False, True), - ({"index": None, "columns": None}, True, True), - ({}, False, False), - ], - ) - def test_rename_axis_none(self, kwargs, rename_index, rename_columns): - # GH 25034 - index = Index(list("abc"), name="foo") - columns = Index(["col1", "col2"], name="bar") - data = np.arange(6).reshape(3, 2) - df = DataFrame(data, index, columns) - - result = df.rename_axis(**kwargs) - expected_index = index.rename(None) if rename_index else index - expected_columns = columns.rename(None) if rename_columns else columns - expected = DataFrame(data, expected_index, expected_columns) - tm.assert_frame_equal(result, expected) - - def test_rename_multiindex(self): - - tuples_index = [("foo1", "bar1"), ("foo2", "bar2")] - tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")] - index = MultiIndex.from_tuples(tuples_index, names=["foo", "bar"]) - columns = MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"]) - df = DataFrame([(0, 0), (1, 1)], index=index, columns=columns) - - # - # without specifying level -> across all levels - - renamed = df.rename( - index={"foo1": "foo3", "bar2": "bar3"}, - columns={"fizz1": "fizz3", "buzz2": "buzz3"}, - ) - new_index = MultiIndex.from_tuples( - [("foo3", "bar1"), ("foo2", "bar3")], names=["foo", "bar"] - ) - new_columns = MultiIndex.from_tuples( - [("fizz3", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"] - ) - tm.assert_index_equal(renamed.index, new_index) - tm.assert_index_equal(renamed.columns, new_columns) - assert renamed.index.names == df.index.names - assert renamed.columns.names == df.columns.names - - # - # with specifying a level (GH13766) - - # dict - new_columns = MultiIndex.from_tuples( - [("fizz3", "buzz1"), ("fizz2", "buzz2")], names=["fizz", "buzz"] - ) - renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0) - tm.assert_index_equal(renamed.columns, new_columns) - renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz") - tm.assert_index_equal(renamed.columns, new_columns) - - new_columns = MultiIndex.from_tuples( - [("fizz1", "buzz1"), ("fizz2", "buzz3")], names=["fizz", "buzz"] - ) - renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1) - tm.assert_index_equal(renamed.columns, new_columns) - renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz") - tm.assert_index_equal(renamed.columns, new_columns) - - # function - func = str.upper - new_columns = MultiIndex.from_tuples( - [("FIZZ1", "buzz1"), ("FIZZ2", "buzz2")], names=["fizz", "buzz"] - ) - renamed = df.rename(columns=func, level=0) - tm.assert_index_equal(renamed.columns, new_columns) - renamed = df.rename(columns=func, level="fizz") - tm.assert_index_equal(renamed.columns, new_columns) - - new_columns = MultiIndex.from_tuples( - [("fizz1", "BUZZ1"), ("fizz2", "BUZZ2")], names=["fizz", "buzz"] - ) - renamed = df.rename(columns=func, level=1) - tm.assert_index_equal(renamed.columns, new_columns) - renamed = df.rename(columns=func, level="buzz") - tm.assert_index_equal(renamed.columns, new_columns) - - # index - new_index = MultiIndex.from_tuples( - [("foo3", "bar1"), ("foo2", "bar2")], names=["foo", "bar"] - ) - renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0) - tm.assert_index_equal(renamed.index, new_index) - - def test_rename_nocopy(self, float_frame): - renamed = float_frame.rename(columns={"C": "foo"}, copy=False) - renamed["foo"] = 1.0 - assert (float_frame["C"] == 1.0).all() - - def test_rename_inplace(self, float_frame): - float_frame.rename(columns={"C": "foo"}) - assert "C" in float_frame - assert "foo" not in float_frame - - c_id = id(float_frame["C"]) - float_frame = float_frame.copy() - float_frame.rename(columns={"C": "foo"}, inplace=True) - - assert "C" not in float_frame - assert "foo" in float_frame - assert id(float_frame["foo"]) != c_id - - def test_rename_bug(self): - # GH 5344 - # rename set ref_locs, and set_index was not resetting - df = DataFrame({0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}) - df = df.rename(columns={0: "a"}) - df = df.rename(columns={1: "b"}) - df = df.set_index(["a", "b"]) - df.columns = ["2001-01-01"] - expected = DataFrame( - [[1], [2]], - index=MultiIndex.from_tuples( - [("foo", "bah"), ("bar", "bas")], names=["a", "b"] - ), - columns=["2001-01-01"], - ) - tm.assert_frame_equal(df, expected) - - def test_rename_bug2(self): - # GH 19497 - # rename was changing Index to MultiIndex if Index contained tuples - - df = DataFrame(data=np.arange(3), index=[(0, 0), (1, 1), (2, 2)], columns=["a"]) - df = df.rename({(1, 1): (5, 4)}, axis="index") - expected = DataFrame( - data=np.arange(3), index=[(0, 0), (5, 4), (2, 2)], columns=["a"] - ) - tm.assert_frame_equal(df, expected) - - def test_rename_errors_raises(self): - df = DataFrame(columns=["A", "B", "C", "D"]) - with pytest.raises(KeyError, match="'E'] not found in axis"): - df.rename(columns={"A": "a", "E": "e"}, errors="raise") - - @pytest.mark.parametrize( - "mapper, errors, expected_columns", - [ - ({"A": "a", "E": "e"}, "ignore", ["a", "B", "C", "D"]), - ({"A": "a"}, "raise", ["a", "B", "C", "D"]), - (str.lower, "raise", ["a", "b", "c", "d"]), - ], - ) - def test_rename_errors(self, mapper, errors, expected_columns): - # GH 13473 - # rename now works with errors parameter - df = DataFrame(columns=["A", "B", "C", "D"]) - result = df.rename(columns=mapper, errors=errors) - expected = DataFrame(columns=expected_columns) - tm.assert_frame_equal(result, expected) - def test_reorder_levels(self): index = MultiIndex( levels=[["bar"], ["one", "two", "three"], [0, 1]], @@ -985,253 +727,6 @@ def test_reorder_levels(self): result = df.reorder_levels(["L0", "L0", "L0"]) tm.assert_frame_equal(result, expected) - def test_reset_index(self, float_frame): - stacked = float_frame.stack()[::2] - stacked = DataFrame({"foo": stacked, "bar": stacked}) - - names = ["first", "second"] - stacked.index.names = names - deleveled = stacked.reset_index() - for i, (lev, level_codes) in enumerate( - zip(stacked.index.levels, stacked.index.codes) - ): - values = lev.take(level_codes) - name = names[i] - tm.assert_index_equal(values, Index(deleveled[name])) - - stacked.index.names = [None, None] - deleveled2 = stacked.reset_index() - tm.assert_series_equal( - deleveled["first"], deleveled2["level_0"], check_names=False - ) - tm.assert_series_equal( - deleveled["second"], deleveled2["level_1"], check_names=False - ) - - # default name assigned - rdf = float_frame.reset_index() - exp = Series(float_frame.index.values, name="index") - tm.assert_series_equal(rdf["index"], exp) - - # default name assigned, corner case - df = float_frame.copy() - df["index"] = "foo" - rdf = df.reset_index() - exp = Series(float_frame.index.values, name="level_0") - tm.assert_series_equal(rdf["level_0"], exp) - - # but this is ok - float_frame.index.name = "index" - deleveled = float_frame.reset_index() - tm.assert_series_equal(deleveled["index"], Series(float_frame.index)) - tm.assert_index_equal(deleveled.index, Index(np.arange(len(deleveled)))) - - # preserve column names - float_frame.columns.name = "columns" - resetted = float_frame.reset_index() - assert resetted.columns.name == "columns" - - # only remove certain columns - df = float_frame.reset_index().set_index(["index", "A", "B"]) - rs = df.reset_index(["A", "B"]) - - # TODO should reset_index check_names ? - tm.assert_frame_equal(rs, float_frame, check_names=False) - - rs = df.reset_index(["index", "A", "B"]) - tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False) - - rs = df.reset_index(["index", "A", "B"]) - tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False) - - rs = df.reset_index("A") - xp = float_frame.reset_index().set_index(["index", "B"]) - tm.assert_frame_equal(rs, xp, check_names=False) - - # test resetting in place - df = float_frame.copy() - resetted = float_frame.reset_index() - df.reset_index(inplace=True) - tm.assert_frame_equal(df, resetted, check_names=False) - - df = float_frame.reset_index().set_index(["index", "A", "B"]) - rs = df.reset_index("A", drop=True) - xp = float_frame.copy() - del xp["A"] - xp = xp.set_index(["B"], append=True) - tm.assert_frame_equal(rs, xp, check_names=False) - - def test_reset_index_name(self): - df = DataFrame( - [[1, 2, 3, 4], [5, 6, 7, 8]], - columns=["A", "B", "C", "D"], - index=Index(range(2), name="x"), - ) - assert df.reset_index().index.name is None - assert df.reset_index(drop=True).index.name is None - df.reset_index(inplace=True) - assert df.index.name is None - - def test_reset_index_level(self): - df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"]) - - for levels in ["A", "B"], [0, 1]: - # With MultiIndex - result = df.set_index(["A", "B"]).reset_index(level=levels[0]) - tm.assert_frame_equal(result, df.set_index("B")) - - result = df.set_index(["A", "B"]).reset_index(level=levels[:1]) - tm.assert_frame_equal(result, df.set_index("B")) - - result = df.set_index(["A", "B"]).reset_index(level=levels) - tm.assert_frame_equal(result, df) - - result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True) - tm.assert_frame_equal(result, df[["C", "D"]]) - - # With single-level Index (GH 16263) - result = df.set_index("A").reset_index(level=levels[0]) - tm.assert_frame_equal(result, df) - - result = df.set_index("A").reset_index(level=levels[:1]) - tm.assert_frame_equal(result, df) - - result = df.set_index(["A"]).reset_index(level=levels[0], drop=True) - tm.assert_frame_equal(result, df[["B", "C", "D"]]) - - # Missing levels - for both MultiIndex and single-level Index: - for idx_lev in ["A", "B"], ["A"]: - with pytest.raises(KeyError, match=r"(L|l)evel \(?E\)?"): - df.set_index(idx_lev).reset_index(level=["A", "E"]) - with pytest.raises(IndexError, match="Too many levels"): - df.set_index(idx_lev).reset_index(level=[0, 1, 2]) - - def test_reset_index_right_dtype(self): - time = np.arange(0.0, 10, np.sqrt(2) / 2) - s1 = Series( - (9.81 * time ** 2) / 2, index=Index(time, name="time"), name="speed" - ) - df = DataFrame(s1) - - resetted = s1.reset_index() - assert resetted["time"].dtype == np.float64 - - resetted = df.reset_index() - assert resetted["time"].dtype == np.float64 - - def test_reset_index_multiindex_col(self): - vals = np.random.randn(3, 3).astype(object) - idx = ["x", "y", "z"] - full = np.hstack(([[x] for x in idx], vals)) - df = DataFrame( - vals, - Index(idx, name="a"), - columns=[["b", "b", "c"], ["mean", "median", "mean"]], - ) - rs = df.reset_index() - xp = DataFrame( - full, columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]] - ) - tm.assert_frame_equal(rs, xp) - - rs = df.reset_index(col_fill=None) - xp = DataFrame( - full, columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]] - ) - tm.assert_frame_equal(rs, xp) - - rs = df.reset_index(col_level=1, col_fill="blah") - xp = DataFrame( - full, columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]] - ) - tm.assert_frame_equal(rs, xp) - - df = DataFrame( - vals, - MultiIndex.from_arrays([[0, 1, 2], ["x", "y", "z"]], names=["d", "a"]), - columns=[["b", "b", "c"], ["mean", "median", "mean"]], - ) - rs = df.reset_index("a") - xp = DataFrame( - full, - Index([0, 1, 2], name="d"), - columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]], - ) - tm.assert_frame_equal(rs, xp) - - rs = df.reset_index("a", col_fill=None) - xp = DataFrame( - full, - Index(range(3), name="d"), - columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]], - ) - tm.assert_frame_equal(rs, xp) - - rs = df.reset_index("a", col_fill="blah", col_level=1) - xp = DataFrame( - full, - Index(range(3), name="d"), - columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]], - ) - tm.assert_frame_equal(rs, xp) - - def test_reset_index_multiindex_nan(self): - # GH6322, testing reset_index on MultiIndexes - # when we have a nan or all nan - df = DataFrame( - {"A": ["a", "b", "c"], "B": [0, 1, np.nan], "C": np.random.rand(3)} - ) - rs = df.set_index(["A", "B"]).reset_index() - tm.assert_frame_equal(rs, df) - - df = DataFrame( - {"A": [np.nan, "b", "c"], "B": [0, 1, 2], "C": np.random.rand(3)} - ) - rs = df.set_index(["A", "B"]).reset_index() - tm.assert_frame_equal(rs, df) - - df = DataFrame({"A": ["a", "b", "c"], "B": [0, 1, 2], "C": [np.nan, 1.1, 2.2]}) - rs = df.set_index(["A", "B"]).reset_index() - tm.assert_frame_equal(rs, df) - - df = DataFrame( - { - "A": ["a", "b", "c"], - "B": [np.nan, np.nan, np.nan], - "C": np.random.rand(3), - } - ) - rs = df.set_index(["A", "B"]).reset_index() - tm.assert_frame_equal(rs, df) - - def test_reset_index_with_datetimeindex_cols(self): - # GH5818 - # - df = DataFrame( - [[1, 2], [3, 4]], - columns=date_range("1/1/2013", "1/2/2013"), - index=["A", "B"], - ) - - result = df.reset_index() - expected = DataFrame( - [["A", 1, 2], ["B", 3, 4]], - columns=["index", datetime(2013, 1, 1), datetime(2013, 1, 2)], - ) - tm.assert_frame_equal(result, expected) - - def test_reset_index_range(self): - # GH 12071 - df = DataFrame([[0, 0], [1, 1]], columns=["A", "B"], index=RangeIndex(stop=2)) - result = df.reset_index() - assert isinstance(result.index, RangeIndex) - expected = DataFrame( - [[0, 0, 0], [1, 1, 1]], - columns=["index", "A", "B"], - index=RangeIndex(stop=2), - ) - tm.assert_frame_equal(result, expected) - def test_set_index_names(self): df = tm.makeDataFrame() df.index.name = "name" @@ -1262,92 +757,6 @@ def test_set_index_names(self): # Check equality tm.assert_index_equal(df.set_index([df.index, idx2]).index, mi2) - def test_rename_objects(self, float_string_frame): - renamed = float_string_frame.rename(columns=str.upper) - - assert "FOO" in renamed - assert "foo" not in renamed - - def test_rename_axis_style(self): - # https://github.com/pandas-dev/pandas/issues/12392 - df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["X", "Y"]) - expected = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"]) - - result = df.rename(str.lower, axis=1) - tm.assert_frame_equal(result, expected) - - result = df.rename(str.lower, axis="columns") - tm.assert_frame_equal(result, expected) - - result = df.rename({"A": "a", "B": "b"}, axis=1) - tm.assert_frame_equal(result, expected) - - result = df.rename({"A": "a", "B": "b"}, axis="columns") - tm.assert_frame_equal(result, expected) - - # Index - expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"]) - result = df.rename(str.lower, axis=0) - tm.assert_frame_equal(result, expected) - - result = df.rename(str.lower, axis="index") - tm.assert_frame_equal(result, expected) - - result = df.rename({"X": "x", "Y": "y"}, axis=0) - tm.assert_frame_equal(result, expected) - - result = df.rename({"X": "x", "Y": "y"}, axis="index") - tm.assert_frame_equal(result, expected) - - result = df.rename(mapper=str.lower, axis="index") - tm.assert_frame_equal(result, expected) - - def test_rename_mapper_multi(self): - df = DataFrame({"A": ["a", "b"], "B": ["c", "d"], "C": [1, 2]}).set_index( - ["A", "B"] - ) - result = df.rename(str.upper) - expected = df.rename(index=str.upper) - tm.assert_frame_equal(result, expected) - - def test_rename_positional_named(self): - # https://github.com/pandas-dev/pandas/issues/12392 - df = DataFrame({"a": [1, 2], "b": [1, 2]}, index=["X", "Y"]) - result = df.rename(index=str.lower, columns=str.upper) - expected = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["x", "y"]) - tm.assert_frame_equal(result, expected) - - def test_rename_axis_style_raises(self): - # see gh-12392 - df = DataFrame({"A": [1, 2], "B": [1, 2]}, index=["0", "1"]) - - # Named target and axis - over_spec_msg = "Cannot specify both 'axis' and any of 'index' or 'columns'" - with pytest.raises(TypeError, match=over_spec_msg): - df.rename(index=str.lower, axis=1) - - with pytest.raises(TypeError, match=over_spec_msg): - df.rename(index=str.lower, axis="columns") - - with pytest.raises(TypeError, match=over_spec_msg): - df.rename(columns=str.lower, axis="columns") - - with pytest.raises(TypeError, match=over_spec_msg): - df.rename(index=str.lower, axis=0) - - # Multiple targets and axis - with pytest.raises(TypeError, match=over_spec_msg): - df.rename(str.lower, index=str.lower, axis="columns") - - # Too many targets - over_spec_msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'" - with pytest.raises(TypeError, match=over_spec_msg): - df.rename(str.lower, index=str.lower, columns=str.lower) - - # Duplicates - with pytest.raises(TypeError, match="multiple values"): - df.rename(id, mapper=id) - def test_reindex_api_equivalence(self): # equivalence of the labels/axis and index/columns API's df = DataFrame( @@ -1376,43 +785,6 @@ def test_reindex_api_equivalence(self): for res in [res2, res3]: tm.assert_frame_equal(res1, res) - def test_rename_positional_raises(self): - # GH 29136 - df = DataFrame(columns=["A", "B"]) - msg = r"rename\(\) takes from 1 to 2 positional arguments" - - with pytest.raises(TypeError, match=msg): - df.rename(None, str.lower) - - def test_rename_no_mappings_raises(self): - # GH 29136 - df = DataFrame([[1]]) - msg = "must pass an index to rename" - with pytest.raises(TypeError, match=msg): - df.rename() - - with pytest.raises(TypeError, match=msg): - df.rename(None, index=None) - - with pytest.raises(TypeError, match=msg): - df.rename(None, columns=None) - - with pytest.raises(TypeError, match=msg): - df.rename(None, columns=None, index=None) - - def test_rename_mapper_and_positional_arguments_raises(self): - # GH 29136 - df = DataFrame([[1]]) - msg = "Cannot specify both 'mapper' and any of 'index' or 'columns'" - with pytest.raises(TypeError, match=msg): - df.rename({}, index={}) - - with pytest.raises(TypeError, match=msg): - df.rename({}, columns={}) - - with pytest.raises(TypeError, match=msg): - df.rename({}, columns={}, index={}) - def test_assign_columns(self, float_frame): float_frame["hi"] = "there" diff --git a/pandas/tests/frame/test_combine_concat.py b/pandas/tests/frame/test_combine_concat.py index 36a476d195fe5..321eb5fe94daf 100644 --- a/pandas/tests/frame/test_combine_concat.py +++ b/pandas/tests/frame/test_combine_concat.py @@ -21,27 +21,6 @@ def test_concat_multiple_frames_dtypes(self): ) tm.assert_series_equal(results, expected) - @pytest.mark.parametrize( - "data", - [ - pd.date_range("2000", periods=4), - pd.date_range("2000", periods=4, tz="US/Central"), - pd.period_range("2000", periods=4), - pd.timedelta_range(0, periods=4), - ], - ) - def test_combine_datetlike_udf(self, data): - # https://github.com/pandas-dev/pandas/issues/23079 - df = pd.DataFrame({"A": data}) - other = df.copy() - df.iloc[1, 0] = None - - def combiner(a, b): - return b - - result = df.combine(other, combiner) - tm.assert_frame_equal(result, other) - def test_concat_multiple_tzs(self): # GH 12467 # combining datetime tz-aware and naive DataFrames diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 8b63f0614eebf..713d8f3ceeedb 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -111,325 +111,6 @@ def test_dtypes_are_correct_after_column_slice(self): pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])), ) - def test_select_dtypes_include_using_list_like(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.Categorical(list("abc")), - "g": pd.date_range("20130101", periods=3), - "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), - "i": pd.date_range("20130101", periods=3, tz="CET"), - "j": pd.period_range("2013-01", periods=3, freq="M"), - "k": pd.timedelta_range("1 day", periods=3), - } - ) - - ri = df.select_dtypes(include=[np.number]) - ei = df[["b", "c", "d", "k"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include=[np.number], exclude=["timedelta"]) - ei = df[["b", "c", "d"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"]) - ei = df[["b", "c", "d", "f"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include=["datetime"]) - ei = df[["g"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include=["datetime64"]) - ei = df[["g"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include=["datetimetz"]) - ei = df[["h", "i"]] - tm.assert_frame_equal(ri, ei) - - with pytest.raises(NotImplementedError, match=r"^$"): - df.select_dtypes(include=["period"]) - - def test_select_dtypes_exclude_using_list_like(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - } - ) - re = df.select_dtypes(exclude=[np.number]) - ee = df[["a", "e"]] - tm.assert_frame_equal(re, ee) - - def test_select_dtypes_exclude_include_using_list_like(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.date_range("now", periods=3).values, - } - ) - exclude = (np.datetime64,) - include = np.bool_, "integer" - r = df.select_dtypes(include=include, exclude=exclude) - e = df[["b", "c", "e"]] - tm.assert_frame_equal(r, e) - - exclude = ("datetime",) - include = "bool", "int64", "int32" - r = df.select_dtypes(include=include, exclude=exclude) - e = df[["b", "e"]] - tm.assert_frame_equal(r, e) - - def test_select_dtypes_include_using_scalars(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.Categorical(list("abc")), - "g": pd.date_range("20130101", periods=3), - "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), - "i": pd.date_range("20130101", periods=3, tz="CET"), - "j": pd.period_range("2013-01", periods=3, freq="M"), - "k": pd.timedelta_range("1 day", periods=3), - } - ) - - ri = df.select_dtypes(include=np.number) - ei = df[["b", "c", "d", "k"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include="datetime") - ei = df[["g"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include="datetime64") - ei = df[["g"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include="category") - ei = df[["f"]] - tm.assert_frame_equal(ri, ei) - - with pytest.raises(NotImplementedError, match=r"^$"): - df.select_dtypes(include="period") - - def test_select_dtypes_exclude_using_scalars(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.Categorical(list("abc")), - "g": pd.date_range("20130101", periods=3), - "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), - "i": pd.date_range("20130101", periods=3, tz="CET"), - "j": pd.period_range("2013-01", periods=3, freq="M"), - "k": pd.timedelta_range("1 day", periods=3), - } - ) - - ri = df.select_dtypes(exclude=np.number) - ei = df[["a", "e", "f", "g", "h", "i", "j"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(exclude="category") - ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]] - tm.assert_frame_equal(ri, ei) - - with pytest.raises(NotImplementedError, match=r"^$"): - df.select_dtypes(exclude="period") - - def test_select_dtypes_include_exclude_using_scalars(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.Categorical(list("abc")), - "g": pd.date_range("20130101", periods=3), - "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), - "i": pd.date_range("20130101", periods=3, tz="CET"), - "j": pd.period_range("2013-01", periods=3, freq="M"), - "k": pd.timedelta_range("1 day", periods=3), - } - ) - - ri = df.select_dtypes(include=np.number, exclude="floating") - ei = df[["b", "c", "k"]] - tm.assert_frame_equal(ri, ei) - - def test_select_dtypes_include_exclude_mixed_scalars_lists(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.Categorical(list("abc")), - "g": pd.date_range("20130101", periods=3), - "h": pd.date_range("20130101", periods=3, tz="US/Eastern"), - "i": pd.date_range("20130101", periods=3, tz="CET"), - "j": pd.period_range("2013-01", periods=3, freq="M"), - "k": pd.timedelta_range("1 day", periods=3), - } - ) - - ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"]) - ei = df[["b", "c"]] - tm.assert_frame_equal(ri, ei) - - ri = df.select_dtypes(include=[np.number, "category"], exclude="floating") - ei = df[["b", "c", "f", "k"]] - tm.assert_frame_equal(ri, ei) - - def test_select_dtypes_duplicate_columns(self): - # GH20839 - odict = OrderedDict - df = DataFrame( - odict( - [ - ("a", list("abc")), - ("b", list(range(1, 4))), - ("c", np.arange(3, 6).astype("u1")), - ("d", np.arange(4.0, 7.0, dtype="float64")), - ("e", [True, False, True]), - ("f", pd.date_range("now", periods=3).values), - ] - ) - ) - df.columns = ["a", "a", "b", "b", "b", "c"] - - expected = DataFrame( - {"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")} - ) - - result = df.select_dtypes(include=[np.number], exclude=["floating"]) - tm.assert_frame_equal(result, expected) - - def test_select_dtypes_not_an_attr_but_still_valid_dtype(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.date_range("now", periods=3).values, - } - ) - df["g"] = df.f.diff() - assert not hasattr(np, "u8") - r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"]) - e = df[["a", "b"]] - tm.assert_frame_equal(r, e) - - r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"]) - e = df[["a", "b", "g"]] - tm.assert_frame_equal(r, e) - - def test_select_dtypes_empty(self): - df = DataFrame({"a": list("abc"), "b": list(range(1, 4))}) - msg = "at least one of include or exclude must be nonempty" - with pytest.raises(ValueError, match=msg): - df.select_dtypes() - - def test_select_dtypes_bad_datetime64(self): - df = DataFrame( - { - "a": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.date_range("now", periods=3).values, - } - ) - with pytest.raises(ValueError, match=".+ is too specific"): - df.select_dtypes(include=["datetime64[D]"]) - - with pytest.raises(ValueError, match=".+ is too specific"): - df.select_dtypes(exclude=["datetime64[as]"]) - - def test_select_dtypes_datetime_with_tz(self): - - df2 = DataFrame( - dict( - A=Timestamp("20130102", tz="US/Eastern"), - B=Timestamp("20130603", tz="CET"), - ), - index=range(5), - ) - df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1) - result = df3.select_dtypes(include=["datetime64[ns]"]) - expected = df3.reindex(columns=[]) - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize( - "dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"] - ) - @pytest.mark.parametrize("arg", ["include", "exclude"]) - def test_select_dtypes_str_raises(self, dtype, arg): - df = DataFrame( - { - "a": list("abc"), - "g": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.date_range("now", periods=3).values, - } - ) - msg = "string dtypes are not allowed" - kwargs = {arg: [dtype]} - - with pytest.raises(TypeError, match=msg): - df.select_dtypes(**kwargs) - - def test_select_dtypes_bad_arg_raises(self): - df = DataFrame( - { - "a": list("abc"), - "g": list("abc"), - "b": list(range(1, 4)), - "c": np.arange(3, 6).astype("u1"), - "d": np.arange(4.0, 7.0, dtype="float64"), - "e": [True, False, True], - "f": pd.date_range("now", periods=3).values, - } - ) - - msg = "data type.*not understood" - with pytest.raises(TypeError, match=msg): - df.select_dtypes(["blargy, blarg, blarg"]) - - def test_select_dtypes_typecodes(self): - # GH 11990 - df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random()) - expected = df - FLOAT_TYPES = list(np.typecodes["AllFloat"]) - tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected) - def test_dtypes_gh8722(self, float_string_frame): float_string_frame["bool"] = float_string_frame["A"] > 0 result = float_string_frame.dtypes diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index df40c2e7e2a11..542d9835bb5d3 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -685,25 +685,6 @@ def test_boolean_comparison(self): with pytest.raises(ValueError, match=msg1d): result = df == tup - def test_combine_generic(self, float_frame): - df1 = float_frame - df2 = float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]] - - combined = df1.combine(df2, np.add) - combined2 = df2.combine(df1, np.add) - assert combined["D"].isna().all() - assert combined2["D"].isna().all() - - chunk = combined.loc[combined.index[:-5], ["A", "B", "C"]] - chunk2 = combined2.loc[combined2.index[:-5], ["A", "B", "C"]] - - exp = ( - float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]].reindex_like(chunk) - * 2 - ) - tm.assert_frame_equal(chunk, exp) - tm.assert_frame_equal(chunk2, exp) - def test_inplace_ops_alignment(self): # inplace ops / ops alignment diff --git a/pandas/tests/frame/test_period.py b/pandas/tests/frame/test_period.py index a6b2b334d3ec8..1ce13fd31ba88 100644 --- a/pandas/tests/frame/test_period.py +++ b/pandas/tests/frame/test_period.py @@ -1,19 +1,6 @@ -from datetime import timedelta - import numpy as np -import pytest - -import pandas as pd -from pandas import ( - DataFrame, - DatetimeIndex, - Index, - PeriodIndex, - Timedelta, - date_range, - period_range, - to_datetime, -) + +from pandas import DataFrame, Index, PeriodIndex, period_range import pandas._testing as tm @@ -49,93 +36,6 @@ def test_frame_setitem(self): assert isinstance(rs.index, PeriodIndex) tm.assert_index_equal(rs.index, rng) - def test_frame_to_time_stamp(self): - K = 5 - index = period_range(freq="A", start="1/1/2001", end="12/1/2009") - df = DataFrame(np.random.randn(len(index), K), index=index) - df["mix"] = "a" - - exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC") - exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") - result = df.to_timestamp("D", "end") - tm.assert_index_equal(result.index, exp_index) - tm.assert_numpy_array_equal(result.values, df.values) - - exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN") - result = df.to_timestamp("D", "start") - tm.assert_index_equal(result.index, exp_index) - - def _get_with_delta(delta, freq="A-DEC"): - return date_range( - to_datetime("1/1/2001") + delta, - to_datetime("12/31/2009") + delta, - freq=freq, - ) - - delta = timedelta(hours=23) - result = df.to_timestamp("H", "end") - exp_index = _get_with_delta(delta) - exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns") - tm.assert_index_equal(result.index, exp_index) - - delta = timedelta(hours=23, minutes=59) - result = df.to_timestamp("T", "end") - exp_index = _get_with_delta(delta) - exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns") - tm.assert_index_equal(result.index, exp_index) - - result = df.to_timestamp("S", "end") - delta = timedelta(hours=23, minutes=59, seconds=59) - exp_index = _get_with_delta(delta) - exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") - tm.assert_index_equal(result.index, exp_index) - - # columns - df = df.T - - exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC") - exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns") - result = df.to_timestamp("D", "end", axis=1) - tm.assert_index_equal(result.columns, exp_index) - tm.assert_numpy_array_equal(result.values, df.values) - - exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN") - result = df.to_timestamp("D", "start", axis=1) - tm.assert_index_equal(result.columns, exp_index) - - delta = timedelta(hours=23) - result = df.to_timestamp("H", "end", axis=1) - exp_index = _get_with_delta(delta) - exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns") - tm.assert_index_equal(result.columns, exp_index) - - delta = timedelta(hours=23, minutes=59) - result = df.to_timestamp("T", "end", axis=1) - exp_index = _get_with_delta(delta) - exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns") - tm.assert_index_equal(result.columns, exp_index) - - result = df.to_timestamp("S", "end", axis=1) - delta = timedelta(hours=23, minutes=59, seconds=59) - exp_index = _get_with_delta(delta) - exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns") - tm.assert_index_equal(result.columns, exp_index) - - # invalid axis - with pytest.raises(ValueError, match="axis"): - df.to_timestamp(axis=2) - - result1 = df.to_timestamp("5t", axis=1) - result2 = df.to_timestamp("t", axis=1) - expected = pd.date_range("2001-01-01", "2009-01-01", freq="AS") - assert isinstance(result1.columns, DatetimeIndex) - assert isinstance(result2.columns, DatetimeIndex) - tm.assert_numpy_array_equal(result1.columns.asi8, expected.asi8) - tm.assert_numpy_array_equal(result2.columns.asi8, expected.asi8) - # PeriodIndex.to_timestamp always use 'infer' - assert result1.columns.freqstr == "AS-JAN" - assert result2.columns.freqstr == "AS-JAN" - def test_frame_index_to_string(self): index = PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M") frame = DataFrame(np.random.randn(3, 4), index=index) diff --git a/pandas/tests/frame/test_timeseries.py b/pandas/tests/frame/test_timeseries.py index 5e06b6402c34f..b713af92eac27 100644 --- a/pandas/tests/frame/test_timeseries.py +++ b/pandas/tests/frame/test_timeseries.py @@ -1,30 +1,10 @@ -from datetime import datetime, time -from itertools import product - import numpy as np import pytest -import pytz import pandas as pd -from pandas import ( - DataFrame, - DatetimeIndex, - Index, - MultiIndex, - Series, - date_range, - period_range, - to_datetime, -) +from pandas import DataFrame, Series, date_range, to_datetime import pandas._testing as tm -import pandas.tseries.offsets as offsets - - -@pytest.fixture(params=product([True, False], [True, False])) -def close_open_fixture(request): - return request.param - class TestDataFrameTimeSeriesMethods: def test_frame_ctor_datetime64_column(self): @@ -80,54 +60,6 @@ def test_frame_append_datetime64_col_other_units(self): assert (tmp["dates"].values == ex_vals).all() - def test_asfreq(self, datetime_frame): - offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd()) - rule_monthly = datetime_frame.asfreq("BM") - - tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"]) - - filled = rule_monthly.asfreq("B", method="pad") # noqa - # TODO: actually check that this worked. - - # don't forget! - filled_dep = rule_monthly.asfreq("B", method="pad") # noqa - - # test does not blow up on length-0 DataFrame - zero_length = datetime_frame.reindex([]) - result = zero_length.asfreq("BM") - assert result is not zero_length - - def test_asfreq_datetimeindex(self): - df = DataFrame( - {"A": [1, 2, 3]}, - index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)], - ) - df = df.asfreq("B") - assert isinstance(df.index, DatetimeIndex) - - ts = df["A"].asfreq("B") - assert isinstance(ts.index, DatetimeIndex) - - def test_asfreq_fillvalue(self): - # test for fill value during upsampling, related to issue 3715 - - # setup - rng = pd.date_range("1/1/2016", periods=10, freq="2S") - ts = pd.Series(np.arange(len(rng)), index=rng) - df = pd.DataFrame({"one": ts}) - - # insert pre-existing missing value - df.loc["2016-01-01 00:00:08", "one"] = None - - actual_df = df.asfreq(freq="1S", fill_value=9.0) - expected_df = df.asfreq(freq="1S").fillna(9.0) - expected_df.loc["2016-01-01 00:00:08", "one"] = None - tm.assert_frame_equal(expected_df, actual_df) - - expected_series = ts.asfreq(freq="1S").fillna(9.0) - actual_series = ts.asfreq(freq="1S", fill_value=9.0) - tm.assert_series_equal(expected_series, actual_series) - @pytest.mark.parametrize( "data,idx,expected_first,expected_last", [ @@ -239,183 +171,6 @@ def test_last_raises(self): with pytest.raises(TypeError): # index is not a DatetimeIndex df.last("1D") - def test_at_time(self): - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = DataFrame(np.random.randn(len(rng), 2), index=rng) - rs = ts.at_time(rng[1]) - assert (rs.index.hour == rng[1].hour).all() - assert (rs.index.minute == rng[1].minute).all() - assert (rs.index.second == rng[1].second).all() - - result = ts.at_time("9:30") - expected = ts.at_time(time(9, 30)) - tm.assert_frame_equal(result, expected) - - result = ts.loc[time(9, 30)] - expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)] - - tm.assert_frame_equal(result, expected) - - # midnight, everything - rng = date_range("1/1/2000", "1/31/2000") - ts = DataFrame(np.random.randn(len(rng), 3), index=rng) - - result = ts.at_time(time(0, 0)) - tm.assert_frame_equal(result, ts) - - # time doesn't exist - rng = date_range("1/1/2012", freq="23Min", periods=384) - ts = DataFrame(np.random.randn(len(rng), 2), rng) - rs = ts.at_time("16:00") - assert len(rs) == 0 - - @pytest.mark.parametrize( - "hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)] - ) - def test_at_time_errors(self, hour): - # GH 24043 - dti = pd.date_range("2018", periods=3, freq="H") - df = pd.DataFrame(list(range(len(dti))), index=dti) - if getattr(hour, "tzinfo", None) is None: - result = df.at_time(hour) - expected = df.iloc[1:2] - tm.assert_frame_equal(result, expected) - else: - with pytest.raises(ValueError, match="Index must be timezone"): - df.at_time(hour) - - def test_at_time_tz(self): - # GH 24043 - dti = pd.date_range("2018", periods=3, freq="H", tz="US/Pacific") - df = pd.DataFrame(list(range(len(dti))), index=dti) - result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern"))) - expected = df.iloc[1:2] - tm.assert_frame_equal(result, expected) - - def test_at_time_raises(self): - # GH20725 - df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) - with pytest.raises(TypeError): # index is not a DatetimeIndex - df.at_time("00:00") - - @pytest.mark.parametrize("axis", ["index", "columns", 0, 1]) - def test_at_time_axis(self, axis): - # issue 8839 - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = DataFrame(np.random.randn(len(rng), len(rng))) - ts.index, ts.columns = rng, rng - - indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)] - - if axis in ["index", 0]: - expected = ts.loc[indices, :] - elif axis in ["columns", 1]: - expected = ts.loc[:, indices] - - result = ts.at_time("9:30", axis=axis) - tm.assert_frame_equal(result, expected) - - def test_between_time(self, close_open_fixture): - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = DataFrame(np.random.randn(len(rng), 2), index=rng) - stime = time(0, 0) - etime = time(1, 0) - inc_start, inc_end = close_open_fixture - - filtered = ts.between_time(stime, etime, inc_start, inc_end) - exp_len = 13 * 4 + 1 - if not inc_start: - exp_len -= 5 - if not inc_end: - exp_len -= 4 - - assert len(filtered) == exp_len - for rs in filtered.index: - t = rs.time() - if inc_start: - assert t >= stime - else: - assert t > stime - - if inc_end: - assert t <= etime - else: - assert t < etime - - result = ts.between_time("00:00", "01:00") - expected = ts.between_time(stime, etime) - tm.assert_frame_equal(result, expected) - - # across midnight - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = DataFrame(np.random.randn(len(rng), 2), index=rng) - stime = time(22, 0) - etime = time(9, 0) - - filtered = ts.between_time(stime, etime, inc_start, inc_end) - exp_len = (12 * 11 + 1) * 4 + 1 - if not inc_start: - exp_len -= 4 - if not inc_end: - exp_len -= 4 - - assert len(filtered) == exp_len - for rs in filtered.index: - t = rs.time() - if inc_start: - assert (t >= stime) or (t <= etime) - else: - assert (t > stime) or (t <= etime) - - if inc_end: - assert (t <= etime) or (t >= stime) - else: - assert (t < etime) or (t >= stime) - - def test_between_time_raises(self): - # GH20725 - df = pd.DataFrame([[1, 2, 3], [4, 5, 6]]) - with pytest.raises(TypeError): # index is not a DatetimeIndex - df.between_time(start_time="00:00", end_time="12:00") - - def test_between_time_axis(self, axis): - # issue 8839 - rng = date_range("1/1/2000", periods=100, freq="10min") - ts = DataFrame(np.random.randn(len(rng), len(rng))) - stime, etime = ("08:00:00", "09:00:00") - exp_len = 7 - - if axis in ["index", 0]: - ts.index = rng - assert len(ts.between_time(stime, etime)) == exp_len - assert len(ts.between_time(stime, etime, axis=0)) == exp_len - - if axis in ["columns", 1]: - ts.columns = rng - selected = ts.between_time(stime, etime, axis=1).columns - assert len(selected) == exp_len - - def test_between_time_axis_raises(self, axis): - # issue 8839 - rng = date_range("1/1/2000", periods=100, freq="10min") - mask = np.arange(0, len(rng)) - rand_data = np.random.randn(len(rng), len(rng)) - ts = DataFrame(rand_data, index=rng, columns=rng) - stime, etime = ("08:00:00", "09:00:00") - - msg = "Index must be DatetimeIndex" - if axis in ["columns", 1]: - ts.index = mask - with pytest.raises(TypeError, match=msg): - ts.between_time(stime, etime) - with pytest.raises(TypeError, match=msg): - ts.between_time(stime, etime, axis=0) - - if axis in ["index", 0]: - ts.columns = mask - with pytest.raises(TypeError, match=msg): - ts.between_time(stime, etime, axis=1) - def test_operation_on_NaT(self): # Both NaT and Timestamp are in DataFrame. df = pd.DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]}) @@ -455,95 +210,3 @@ def test_datetime_assignment_with_NaT_and_diff_time_units(self): {0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]" ) tm.assert_frame_equal(result, expected) - - def test_frame_to_period(self): - K = 5 - - dr = date_range("1/1/2000", "1/1/2001") - pr = period_range("1/1/2000", "1/1/2001") - df = DataFrame(np.random.randn(len(dr), K), index=dr) - df["mix"] = "a" - - pts = df.to_period() - exp = df.copy() - exp.index = pr - tm.assert_frame_equal(pts, exp) - - pts = df.to_period("M") - tm.assert_index_equal(pts.index, exp.index.asfreq("M")) - - df = df.T - pts = df.to_period(axis=1) - exp = df.copy() - exp.columns = pr - tm.assert_frame_equal(pts, exp) - - pts = df.to_period("M", axis=1) - tm.assert_index_equal(pts.columns, exp.columns.asfreq("M")) - - msg = "No axis named 2 for object type <class 'pandas.core.frame.DataFrame'>" - with pytest.raises(ValueError, match=msg): - df.to_period(axis=2) - - @pytest.mark.parametrize("fn", ["tz_localize", "tz_convert"]) - def test_tz_convert_and_localize(self, fn): - l0 = date_range("20140701", periods=5, freq="D") - l1 = date_range("20140701", periods=5, freq="D") - - int_idx = Index(range(5)) - - if fn == "tz_convert": - l0 = l0.tz_localize("UTC") - l1 = l1.tz_localize("UTC") - - for idx in [l0, l1]: - - l0_expected = getattr(idx, fn)("US/Pacific") - l1_expected = getattr(idx, fn)("US/Pacific") - - df1 = DataFrame(np.ones(5), index=l0) - df1 = getattr(df1, fn)("US/Pacific") - tm.assert_index_equal(df1.index, l0_expected) - - # MultiIndex - # GH7846 - df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1])) - - df3 = getattr(df2, fn)("US/Pacific", level=0) - assert not df3.index.levels[0].equals(l0) - tm.assert_index_equal(df3.index.levels[0], l0_expected) - tm.assert_index_equal(df3.index.levels[1], l1) - assert not df3.index.levels[1].equals(l1_expected) - - df3 = getattr(df2, fn)("US/Pacific", level=1) - tm.assert_index_equal(df3.index.levels[0], l0) - assert not df3.index.levels[0].equals(l0_expected) - tm.assert_index_equal(df3.index.levels[1], l1_expected) - assert not df3.index.levels[1].equals(l1) - - df4 = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) - - # TODO: untested - df5 = getattr(df4, fn)("US/Pacific", level=1) # noqa - - tm.assert_index_equal(df3.index.levels[0], l0) - assert not df3.index.levels[0].equals(l0_expected) - tm.assert_index_equal(df3.index.levels[1], l1_expected) - assert not df3.index.levels[1].equals(l1) - - # Bad Inputs - - # Not DatetimeIndex / PeriodIndex - with pytest.raises(TypeError, match="DatetimeIndex"): - df = DataFrame(index=int_idx) - df = getattr(df, fn)("US/Pacific") - - # Not DatetimeIndex / PeriodIndex - with pytest.raises(TypeError, match="DatetimeIndex"): - df = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0])) - df = getattr(df, fn)("US/Pacific", level=0) - - # Invalid level - with pytest.raises(ValueError, match="not valid"): - df = DataFrame(index=l0) - df = getattr(df, fn)("US/Pacific", level=1) diff --git a/pandas/tests/frame/test_timezones.py b/pandas/tests/frame/test_timezones.py index b60f2052a988f..62e8a4b470218 100644 --- a/pandas/tests/frame/test_timezones.py +++ b/pandas/tests/frame/test_timezones.py @@ -59,34 +59,6 @@ def test_frame_from_records_utc(self): # it works DataFrame.from_records([rec], index="begin_time") - def test_frame_tz_localize(self): - rng = date_range("1/1/2011", periods=100, freq="H") - - df = DataFrame({"a": 1}, index=rng) - result = df.tz_localize("utc") - expected = DataFrame({"a": 1}, rng.tz_localize("UTC")) - assert result.index.tz.zone == "UTC" - tm.assert_frame_equal(result, expected) - - df = df.T - result = df.tz_localize("utc", axis=1) - assert result.columns.tz.zone == "UTC" - tm.assert_frame_equal(result, expected.T) - - def test_frame_tz_convert(self): - rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") - - df = DataFrame({"a": 1}, index=rng) - result = df.tz_convert("Europe/Berlin") - expected = DataFrame({"a": 1}, rng.tz_convert("Europe/Berlin")) - assert result.index.tz.zone == "Europe/Berlin" - tm.assert_frame_equal(result, expected) - - df = df.T - result = df.tz_convert("Europe/Berlin", axis=1) - assert result.columns.tz.zone == "Europe/Berlin" - tm.assert_frame_equal(result, expected.T) - def test_frame_join_tzaware(self): test1 = DataFrame( np.zeros((6, 3)), diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index 8e54de771a3e4..1b6cb8447c76d 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -187,8 +187,10 @@ def test_constructor_compound_dtypes(self): def f(dtype): return self._construct(shape=3, value=1, dtype=dtype) - msg = "compound dtypes are not implemented" - f"in the {self._typ.__name__} constructor" + msg = ( + "compound dtypes are not implemented " + f"in the {self._typ.__name__} constructor" + ) with pytest.raises(NotImplementedError, match=msg): f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")]) diff --git a/pandas/tests/generic/test_series.py b/pandas/tests/generic/test_series.py index 5aafd83da78fd..f119eb422a276 100644 --- a/pandas/tests/generic/test_series.py +++ b/pandas/tests/generic/test_series.py @@ -24,13 +24,6 @@ class TestSeries(Generic): _typ = Series _comparator = lambda self, x, y: tm.assert_series_equal(x, y) - def setup_method(self): - self.ts = tm.makeTimeSeries() # Was at top level in test_series - self.ts.name = "ts" - - self.series = tm.makeStringSeries() - self.series.name = "series" - def test_rename_mi(self): s = Series( [11, 21, 31], diff --git a/pandas/tests/indexes/categorical/test_constructors.py b/pandas/tests/indexes/categorical/test_constructors.py index 1df0874e2f947..ee3f85da22781 100644 --- a/pandas/tests/indexes/categorical/test_constructors.py +++ b/pandas/tests/indexes/categorical/test_constructors.py @@ -136,12 +136,3 @@ def test_construction_with_categorical_dtype(self): with pytest.raises(ValueError, match=msg): Index(data, ordered=ordered, dtype=dtype) - - def test_create_categorical(self): - # GH#17513 The public CI constructor doesn't hit this code path with - # instances of CategoricalIndex, but we still want to test the code - ci = CategoricalIndex(["a", "b", "c"]) - # First ci is self, second ci is data. - result = CategoricalIndex._create_categorical(ci, ci) - expected = Categorical(["a", "b", "c"]) - tm.assert_categorical_equal(result, expected) diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 6479b14e9521e..40c7ffba46450 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -128,15 +128,9 @@ def test_shallow_copy_empty(self): def test_shallow_copy_i8(self): # GH-24391 pi = period_range("2018-01-01", periods=3, freq="2D") - result = pi._shallow_copy(pi.asi8, freq=pi.freq) + result = pi._shallow_copy(pi.asi8) tm.assert_index_equal(result, pi) - def test_shallow_copy_changing_freq_raises(self): - pi = period_range("2018-01-01", periods=3, freq="2D") - msg = "specified freq and dtype are different" - with pytest.raises(IncompatibleFrequency, match=msg): - pi._shallow_copy(pi, freq="H") - def test_view_asi8(self): idx = PeriodIndex([], freq="M") diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index 4d3f1b0539aee..87520f5ab2577 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -162,10 +162,9 @@ def test_scalar_non_numeric(self, index_func, klass): s2.loc[3.0] = 10 assert s2.index.is_object() - for idxr in [lambda x: x]: - s2 = s.copy() - idxr(s2)[3.0] = 0 - assert s2.index.is_object() + s2 = s.copy() + s2[3.0] = 0 + assert s2.index.is_object() @pytest.mark.parametrize( "index_func", @@ -250,12 +249,7 @@ def test_scalar_integer(self, index_func, klass): # integer index i = index_func(5) - - if klass is Series: - # TODO: Should we be passing index=i here? - obj = Series(np.arange(len(i))) - else: - obj = DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i) + obj = gen_obj(klass, i) # coerce to equal int for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]: @@ -313,7 +307,7 @@ def test_scalar_float(self, klass): result = idxr(s2)[indexer] self.check(result, s, 3, getitem) - # random integer is a KeyError + # random float is a KeyError with pytest.raises(KeyError, match=r"^3\.5$"): idxr(s)[3.5] @@ -429,15 +423,6 @@ def test_slice_integer(self): indexer = slice(3, 5) self.check(result, s, indexer, False) - # positional indexing - msg = ( - "cannot do slice indexing " - fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of " - "type float" - ) - with pytest.raises(TypeError, match=msg): - s[l] - # getitem out-of-bounds for l in [slice(-6, 6), slice(-6.0, 6.0)]: @@ -485,23 +470,6 @@ def test_slice_integer(self): with pytest.raises(TypeError, match=msg): s[l] - # setitem - for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: - - sc = s.copy() - sc.loc[l] = 0 - result = sc.loc[l].values.ravel() - assert (result == 0).all() - - # positional indexing - msg = ( - "cannot do slice indexing " - fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of " - "type float" - ) - with pytest.raises(TypeError, match=msg): - s[l] = 0 - @pytest.mark.parametrize("l", [slice(2, 4.0), slice(2.0, 4), slice(2.0, 4.0)]) def test_integer_positional_indexing(self, l): """ make sure that we are raising on positional indexing @@ -584,22 +552,34 @@ def test_slice_integer_frame_getitem(self, index_func): with pytest.raises(TypeError, match=msg): s[l] + @pytest.mark.parametrize("l", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) + @pytest.mark.parametrize( + "index_func", [tm.makeIntIndex, tm.makeRangeIndex], + ) + def test_float_slice_getitem_with_integer_index_raises(self, l, index_func): + + # similar to above, but on the getitem dim (of a DataFrame) + index = index_func(5) + + s = DataFrame(np.random.randn(5, 2), index=index) + # setitem - for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: + sc = s.copy() + sc.loc[l] = 0 + result = sc.loc[l].values.ravel() + assert (result == 0).all() - sc = s.copy() - sc.loc[l] = 0 - result = sc.loc[l].values.ravel() - assert (result == 0).all() + # positional indexing + msg = ( + "cannot do slice indexing " + fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of " + "type float" + ) + with pytest.raises(TypeError, match=msg): + s[l] = 0 - # positional indexing - msg = ( - "cannot do slice indexing " - fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of " - "type float" - ) - with pytest.raises(TypeError, match=msg): - s[l] = 0 + with pytest.raises(TypeError, match=msg): + s[l] @pytest.mark.parametrize("l", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) @pytest.mark.parametrize("klass", [Series, DataFrame]) @@ -614,10 +594,9 @@ def test_slice_float(self, l, klass): # getitem result = idxr(s)[l] - if isinstance(s, Series): - tm.assert_series_equal(result, expected) - else: - tm.assert_frame_equal(result, expected) + assert isinstance(result, type(s)) + tm.assert_equal(result, expected) + # setitem s2 = s.copy() idxr(s2)[l] = 0 diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 0c9ddbf5473b3..27b0500983afd 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -203,12 +203,6 @@ def create_mgr(descr, item_shape=None): class TestBlock: def setup_method(self, method): - # self.fblock = get_float_ex() # a,c,e - # self.cblock = get_complex_ex() # - # self.oblock = get_obj_ex() - # self.bool_block = get_bool_ex() - # self.int_block = get_int_ex() - self.fblock = create_block("float", [0, 2, 4]) self.cblock = create_block("complex", [7]) self.oblock = create_block("object", [1, 3]) @@ -254,22 +248,11 @@ def test_merge(self): tm.assert_numpy_array_equal(merged.values[[0, 2]], np.array(avals)) tm.assert_numpy_array_equal(merged.values[[1, 3]], np.array(bvals)) - # TODO: merge with mixed type? - def test_copy(self): cop = self.fblock.copy() assert cop is not self.fblock assert_block_equal(self.fblock, cop) - def test_reindex_index(self): - pass - - def test_reindex_cast(self): - pass - - def test_insert(self): - pass - def test_delete(self): newb = self.fblock.copy() newb.delete(0) @@ -300,39 +283,7 @@ def test_delete(self): newb.delete(3) -class TestDatetimeBlock: - def test_can_hold_element(self): - block = create_block("datetime", [0]) - - # We will check that block._can_hold_element iff arr.__setitem__ works - arr = pd.array(block.values.ravel()) - - # coerce None - assert block._can_hold_element(None) - arr[0] = None - assert arr[0] is pd.NaT - - # coerce different types of datetime objects - vals = [np.datetime64("2010-10-10"), datetime(2010, 10, 10)] - for val in vals: - assert block._can_hold_element(val) - arr[0] = val - - val = date(2010, 10, 10) - assert not block._can_hold_element(val) - - msg = ( - "'value' should be a 'Timestamp', 'NaT', " - "or array of those. Got 'date' instead." - ) - with pytest.raises(TypeError, match=msg): - arr[0] = val - - class TestBlockManager: - def test_constructor_corner(self): - pass - def test_attrs(self): mgr = create_mgr("a,b,c: f8-1; d,e,f: f8-2") assert mgr.nblocks == 2 @@ -441,18 +392,6 @@ def test_set_change_dtype(self, mgr): mgr2.set("quux", tm.randn(N)) assert mgr2.get("quux").dtype == np.float_ - def test_set_change_dtype_slice(self): # GH8850 - cols = MultiIndex.from_tuples([("1st", "a"), ("2nd", "b"), ("3rd", "c")]) - df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols) - df["2nd"] = df["2nd"] * 2.0 - - blocks = df._to_dict_of_blocks() - assert sorted(blocks.keys()) == ["float64", "int64"] - tm.assert_frame_equal( - blocks["float64"], DataFrame([[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]) - ) - tm.assert_frame_equal(blocks["int64"], DataFrame([[3], [6]], columns=cols[2:])) - def test_copy(self, mgr): cp = mgr.copy(deep=False) for blk, cp_blk in zip(mgr.blocks, cp.blocks): @@ -486,7 +425,7 @@ def test_sparse_mixed(self): assert len(mgr.blocks) == 3 assert isinstance(mgr, BlockManager) - # what to test here? + # TODO: what to test here? def test_as_array_float(self): mgr = create_mgr("c: f4; d: f2; e: f8") @@ -650,22 +589,6 @@ def test_interleave(self): mgr = create_mgr("a: M8[ns]; b: m8[ns]") assert mgr.as_array().dtype == "object" - def test_interleave_non_unique_cols(self): - df = DataFrame( - [[pd.Timestamp("20130101"), 3.5], [pd.Timestamp("20130102"), 4.5]], - columns=["x", "x"], - index=[1, 2], - ) - - df_unique = df.copy() - df_unique.columns = ["x", "y"] - assert df_unique.values.shape == df.values.shape - tm.assert_numpy_array_equal(df_unique.values[0], df.values[0]) - tm.assert_numpy_array_equal(df_unique.values[1], df.values[1]) - - def test_consolidate(self): - pass - def test_consolidate_ordering_issues(self, mgr): mgr.set("f", tm.randn(N)) mgr.set("d", tm.randn(N)) @@ -683,10 +606,6 @@ def test_consolidate_ordering_issues(self, mgr): cons.blocks[0].mgr_locs.as_array, np.arange(len(cons.items), dtype=np.int64) ) - def test_reindex_index(self): - # TODO: should this be pytest.skip? - pass - def test_reindex_items(self): # mgr is not consolidated, f8 & f8-2 blocks mgr = create_mgr("a: f8; b: i8; c: f8; d: i8; e: f8; f: bool; g: f8-2") @@ -767,13 +686,6 @@ def test_get_bool_data(self): def test_unicode_repr_doesnt_raise(self): repr(create_mgr("b,\u05d0: object")) - def test_missing_unicode_key(self): - df = DataFrame({"a": [1]}) - try: - df.loc[:, "\u05d0"] # should not raise UnicodeEncodeError - except KeyError: - pass # this is the expected exception - def test_equals(self): # unique items bm1 = create_mgr("a,b,c: i8-1; d,e,f: i8-2") @@ -843,8 +755,6 @@ class TestIndexing: create_mgr("a,b: f8; c,d: i8; e,f: f8", item_shape=(N, N)), ] - # MANAGERS = [MANAGERS[6]] - @pytest.mark.parametrize("mgr", MANAGERS) def test_get_slice(self, mgr): def assert_slice_ok(mgr, axis, slobj): @@ -994,11 +904,6 @@ def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, fill_value): mgr, ax, pd.Index(["foo", "bar", "baz"]), [0, 1, 2], fill_value, ) - # test_get_slice(slice_like, axis) - # take(indexer, axis) - # reindex_axis(new_labels, axis) - # reindex_indexer(new_labels, indexer, axis) - class TestBlockPlacement: def test_slice_len(self): @@ -1151,6 +1056,33 @@ def any(self, axis=None): class TestCanHoldElement: + def test_datetime_block_can_hold_element(self): + block = create_block("datetime", [0]) + + # We will check that block._can_hold_element iff arr.__setitem__ works + arr = pd.array(block.values.ravel()) + + # coerce None + assert block._can_hold_element(None) + arr[0] = None + assert arr[0] is pd.NaT + + # coerce different types of datetime objects + vals = [np.datetime64("2010-10-10"), datetime(2010, 10, 10)] + for val in vals: + assert block._can_hold_element(val) + arr[0] = val + + val = date(2010, 10, 10) + assert not block._can_hold_element(val) + + msg = ( + "'value' should be a 'Timestamp', 'NaT', " + "or array of those. Got 'date' instead." + ) + with pytest.raises(TypeError, match=msg): + arr[0] = val + @pytest.mark.parametrize( "value, dtype", [ @@ -1280,3 +1212,37 @@ def test_dataframe_not_equal(): df1 = pd.DataFrame({"a": [1, 2], "b": ["s", "d"]}) df2 = pd.DataFrame({"a": ["s", "d"], "b": [1, 2]}) assert df1.equals(df2) is False + + +def test_missing_unicode_key(): + df = DataFrame({"a": [1]}) + with pytest.raises(KeyError, match="\u05d0"): + df.loc[:, "\u05d0"] # should not raise UnicodeEncodeError + + +def test_set_change_dtype_slice(): + # GH#8850 + cols = MultiIndex.from_tuples([("1st", "a"), ("2nd", "b"), ("3rd", "c")]) + df = DataFrame([[1.0, 2, 3], [4.0, 5, 6]], columns=cols) + df["2nd"] = df["2nd"] * 2.0 + + blocks = df._to_dict_of_blocks() + assert sorted(blocks.keys()) == ["float64", "int64"] + tm.assert_frame_equal( + blocks["float64"], DataFrame([[1.0, 4.0], [4.0, 10.0]], columns=cols[:2]) + ) + tm.assert_frame_equal(blocks["int64"], DataFrame([[3], [6]], columns=cols[2:])) + + +def test_interleave_non_unique_cols(): + df = DataFrame( + [[pd.Timestamp("20130101"), 3.5], [pd.Timestamp("20130102"), 4.5]], + columns=["x", "x"], + index=[1, 2], + ) + + df_unique = df.copy() + df_unique.columns = ["x", "y"] + assert df_unique.values.shape == df.values.shape + tm.assert_numpy_array_equal(df_unique.values[0], df.values[0]) + tm.assert_numpy_array_equal(df_unique.values[1], df.values[1]) diff --git a/pandas/tests/scalar/timestamp/test_constructors.py b/pandas/tests/scalar/timestamp/test_constructors.py index b4a7173da84d0..4c75d1ebcd377 100644 --- a/pandas/tests/scalar/timestamp/test_constructors.py +++ b/pandas/tests/scalar/timestamp/test_constructors.py @@ -548,3 +548,16 @@ def test_timestamp_constructor_identity(): expected = Timestamp("2017-01-01T12") result = Timestamp(expected) assert result is expected + + +@pytest.mark.parametrize("kwargs", [{}, {"year": 2020}, {"year": 2020, "month": 1}]) +def test_constructor_missing_keyword(kwargs): + # GH 31200 + + # The exact error message of datetime() depends on its version + msg1 = r"function missing required argument '(year|month|day)' \(pos [123]\)" + msg2 = r"Required argument '(year|month|day)' \(pos [123]\) not found" + msg = "|".join([msg1, msg2]) + + with pytest.raises(TypeError, match=msg): + Timestamp(**kwargs) diff --git a/pandas/tests/series/methods/test_append.py b/pandas/tests/series/methods/test_append.py index 4d64b5b397981..4742d6ae3544f 100644 --- a/pandas/tests/series/methods/test_append.py +++ b/pandas/tests/series/methods/test_append.py @@ -2,7 +2,7 @@ import pytest import pandas as pd -from pandas import DataFrame, DatetimeIndex, Series, date_range +from pandas import DataFrame, DatetimeIndex, Index, Series, Timestamp, date_range import pandas._testing as tm @@ -166,3 +166,87 @@ def test_append_tz_dateutil(self): appended = rng.append(rng2) tm.assert_index_equal(appended, rng3) + + def test_series_append_aware(self): + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") + rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") + ser1 = Series([1], index=rng1) + ser2 = Series([2], index=rng2) + ts_result = ser1.append(ser2) + + exp_index = DatetimeIndex( + ["2011-01-01 01:00", "2011-01-01 02:00"], tz="US/Eastern" + ) + exp = Series([1, 2], index=exp_index) + tm.assert_series_equal(ts_result, exp) + assert ts_result.index.tz == rng1.tz + + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="UTC") + rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="UTC") + ser1 = Series([1], index=rng1) + ser2 = Series([2], index=rng2) + ts_result = ser1.append(ser2) + + exp_index = DatetimeIndex(["2011-01-01 01:00", "2011-01-01 02:00"], tz="UTC") + exp = Series([1, 2], index=exp_index) + tm.assert_series_equal(ts_result, exp) + utc = rng1.tz + assert utc == ts_result.index.tz + + # GH#7795 + # different tz coerces to object dtype, not UTC + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") + rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Central") + ser1 = Series([1], index=rng1) + ser2 = Series([2], index=rng2) + ts_result = ser1.append(ser2) + exp_index = Index( + [ + Timestamp("1/1/2011 01:00", tz="US/Eastern"), + Timestamp("1/1/2011 02:00", tz="US/Central"), + ] + ) + exp = Series([1, 2], index=exp_index) + tm.assert_series_equal(ts_result, exp) + + def test_series_append_aware_naive(self): + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") + rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") + ser1 = Series(np.random.randn(len(rng1)), index=rng1) + ser2 = Series(np.random.randn(len(rng2)), index=rng2) + ts_result = ser1.append(ser2) + + expected = ser1.index.astype(object).append(ser2.index.astype(object)) + assert ts_result.index.equals(expected) + + # mixed + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") + rng2 = range(100) + ser1 = Series(np.random.randn(len(rng1)), index=rng1) + ser2 = Series(np.random.randn(len(rng2)), index=rng2) + ts_result = ser1.append(ser2) + + expected = ser1.index.astype(object).append(ser2.index) + assert ts_result.index.equals(expected) + + def test_series_append_dst(self): + rng1 = date_range("1/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") + rng2 = date_range("8/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") + ser1 = Series([1, 2, 3], index=rng1) + ser2 = Series([10, 11, 12], index=rng2) + ts_result = ser1.append(ser2) + + exp_index = DatetimeIndex( + [ + "2016-01-01 01:00", + "2016-01-01 02:00", + "2016-01-01 03:00", + "2016-08-01 01:00", + "2016-08-01 02:00", + "2016-08-01 03:00", + ], + tz="US/Eastern", + ) + exp = Series([1, 2, 3, 10, 11, 12], index=exp_index) + tm.assert_series_equal(ts_result, exp) + assert ts_result.index.tz == rng1.tz diff --git a/pandas/tests/series/methods/test_asfreq.py b/pandas/tests/series/methods/test_asfreq.py index 05ec56cf02182..d94b60384a07c 100644 --- a/pandas/tests/series/methods/test_asfreq.py +++ b/pandas/tests/series/methods/test_asfreq.py @@ -1,8 +1,13 @@ +from datetime import datetime + import numpy as np +import pytest -from pandas import DataFrame, Series, period_range +from pandas import DataFrame, DatetimeIndex, Series, date_range, period_range import pandas._testing as tm +from pandas.tseries.offsets import BDay, BMonthEnd + class TestAsFreq: # TODO: de-duplicate/parametrize or move DataFrame test @@ -21,3 +26,79 @@ def test_asfreq_ts(self): result = ts.asfreq("D", how="start") assert len(result) == len(ts) tm.assert_index_equal(result.index, index.asfreq("D", how="start")) + + @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) + def test_tz_aware_asfreq(self, tz): + dr = date_range("2011-12-01", "2012-07-20", freq="D", tz=tz) + + ser = Series(np.random.randn(len(dr)), index=dr) + + # it works! + ser.asfreq("T") + + def test_asfreq(self): + ts = Series( + [0.0, 1.0, 2.0], + index=[ + datetime(2009, 10, 30), + datetime(2009, 11, 30), + datetime(2009, 12, 31), + ], + ) + + daily_ts = ts.asfreq("B") + monthly_ts = daily_ts.asfreq("BM") + tm.assert_series_equal(monthly_ts, ts) + + daily_ts = ts.asfreq("B", method="pad") + monthly_ts = daily_ts.asfreq("BM") + tm.assert_series_equal(monthly_ts, ts) + + daily_ts = ts.asfreq(BDay()) + monthly_ts = daily_ts.asfreq(BMonthEnd()) + tm.assert_series_equal(monthly_ts, ts) + + result = ts[:0].asfreq("M") + assert len(result) == 0 + assert result is not ts + + daily_ts = ts.asfreq("D", fill_value=-1) + result = daily_ts.value_counts().sort_index() + expected = Series([60, 1, 1, 1], index=[-1.0, 2.0, 1.0, 0.0]).sort_index() + tm.assert_series_equal(result, expected) + + def test_asfreq_datetimeindex_empty_series(self): + # GH#14320 + index = DatetimeIndex(["2016-09-29 11:00"]) + expected = Series(index=index, dtype=object).asfreq("H") + result = Series([3], index=index.copy()).asfreq("H") + tm.assert_index_equal(expected.index, result.index) + + def test_asfreq_keep_index_name(self): + # GH#9854 + index_name = "bar" + index = date_range("20130101", periods=20, name=index_name) + df = DataFrame(list(range(20)), columns=["foo"], index=index) + + assert index_name == df.index.name + assert index_name == df.asfreq("10D").index.name + + def test_asfreq_normalize(self): + rng = date_range("1/1/2000 09:30", periods=20) + norm = date_range("1/1/2000", periods=20) + vals = np.random.randn(20) + ts = Series(vals, index=rng) + + result = ts.asfreq("D", normalize=True) + norm = date_range("1/1/2000", periods=20) + expected = Series(vals, index=norm) + + tm.assert_series_equal(result, expected) + + vals = np.random.randn(20, 3) + ts = DataFrame(vals, index=rng) + + result = ts.asfreq("D", normalize=True) + expected = DataFrame(vals, index=norm) + + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/series/methods/test_at_time.py b/pandas/tests/series/methods/test_at_time.py new file mode 100644 index 0000000000000..d9985cf33776a --- /dev/null +++ b/pandas/tests/series/methods/test_at_time.py @@ -0,0 +1,72 @@ +from datetime import time + +import numpy as np +import pytest + +from pandas._libs.tslibs import timezones + +from pandas import DataFrame, Series, date_range +import pandas._testing as tm + + +class TestAtTime: + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_localized_at_time(self, tzstr): + tz = timezones.maybe_get_tz(tzstr) + + rng = date_range("4/16/2012", "5/1/2012", freq="H") + ts = Series(np.random.randn(len(rng)), index=rng) + + ts_local = ts.tz_localize(tzstr) + + result = ts_local.at_time(time(10, 0)) + expected = ts.at_time(time(10, 0)).tz_localize(tzstr) + tm.assert_series_equal(result, expected) + assert timezones.tz_compare(result.index.tz, tz) + + def test_at_time(self): + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = Series(np.random.randn(len(rng)), index=rng) + rs = ts.at_time(rng[1]) + assert (rs.index.hour == rng[1].hour).all() + assert (rs.index.minute == rng[1].minute).all() + assert (rs.index.second == rng[1].second).all() + + result = ts.at_time("9:30") + expected = ts.at_time(time(9, 30)) + tm.assert_series_equal(result, expected) + + df = DataFrame(np.random.randn(len(rng), 3), index=rng) + + result = ts[time(9, 30)] + result_df = df.loc[time(9, 30)] + expected = ts[(rng.hour == 9) & (rng.minute == 30)] + exp_df = df[(rng.hour == 9) & (rng.minute == 30)] + + tm.assert_series_equal(result, expected) + tm.assert_frame_equal(result_df, exp_df) + + chunk = df.loc["1/4/2000":] + result = chunk.loc[time(9, 30)] + expected = result_df[-1:] + tm.assert_frame_equal(result, expected) + + # midnight, everything + rng = date_range("1/1/2000", "1/31/2000") + ts = Series(np.random.randn(len(rng)), index=rng) + + result = ts.at_time(time(0, 0)) + tm.assert_series_equal(result, ts) + + # time doesn't exist + rng = date_range("1/1/2012", freq="23Min", periods=384) + ts = Series(np.random.randn(len(rng)), rng) + rs = ts.at_time("16:00") + assert len(rs) == 0 + + def test_at_time_raises(self): + # GH20725 + ser = Series("a b c".split()) + msg = "Index must be DatetimeIndex" + with pytest.raises(TypeError, match=msg): + ser.at_time("00:00") diff --git a/pandas/tests/series/methods/test_between.py b/pandas/tests/series/methods/test_between.py new file mode 100644 index 0000000000000..350a3fe6ff009 --- /dev/null +++ b/pandas/tests/series/methods/test_between.py @@ -0,0 +1,35 @@ +import numpy as np + +from pandas import Series, bdate_range, date_range, period_range +import pandas._testing as tm + + +class TestBetween: + + # TODO: redundant with test_between_datetime_values? + def test_between(self): + series = Series(date_range("1/1/2000", periods=10)) + left, right = series[[2, 7]] + + result = series.between(left, right) + expected = (series >= left) & (series <= right) + tm.assert_series_equal(result, expected) + + def test_between_datetime_values(self): + ser = Series(bdate_range("1/1/2000", periods=20).astype(object)) + ser[::2] = np.nan + + result = ser[ser.between(ser[3], ser[17])] + expected = ser[3:18].dropna() + tm.assert_series_equal(result, expected) + + result = ser[ser.between(ser[3], ser[17], inclusive=False)] + expected = ser[5:16].dropna() + tm.assert_series_equal(result, expected) + + def test_between_period_values(self): + ser = Series(period_range("2000-01-01", periods=10, freq="D")) + left, right = ser[[2, 7]] + result = ser.between(left, right) + expected = (ser >= left) & (ser <= right) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/methods/test_between_time.py b/pandas/tests/series/methods/test_between_time.py new file mode 100644 index 0000000000000..3fa26afe77a1d --- /dev/null +++ b/pandas/tests/series/methods/test_between_time.py @@ -0,0 +1,144 @@ +from datetime import datetime, time +from itertools import product + +import numpy as np +import pytest + +from pandas._libs.tslibs import timezones +import pandas.util._test_decorators as td + +from pandas import DataFrame, Series, date_range +import pandas._testing as tm + + +class TestBetweenTime: + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_localized_between_time(self, tzstr): + tz = timezones.maybe_get_tz(tzstr) + + rng = date_range("4/16/2012", "5/1/2012", freq="H") + ts = Series(np.random.randn(len(rng)), index=rng) + + ts_local = ts.tz_localize(tzstr) + + t1, t2 = time(10, 0), time(11, 0) + result = ts_local.between_time(t1, t2) + expected = ts.between_time(t1, t2).tz_localize(tzstr) + tm.assert_series_equal(result, expected) + assert timezones.tz_compare(result.index.tz, tz) + + def test_between_time(self): + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = Series(np.random.randn(len(rng)), index=rng) + stime = time(0, 0) + etime = time(1, 0) + + close_open = product([True, False], [True, False]) + for inc_start, inc_end in close_open: + filtered = ts.between_time(stime, etime, inc_start, inc_end) + exp_len = 13 * 4 + 1 + if not inc_start: + exp_len -= 5 + if not inc_end: + exp_len -= 4 + + assert len(filtered) == exp_len + for rs in filtered.index: + t = rs.time() + if inc_start: + assert t >= stime + else: + assert t > stime + + if inc_end: + assert t <= etime + else: + assert t < etime + + result = ts.between_time("00:00", "01:00") + expected = ts.between_time(stime, etime) + tm.assert_series_equal(result, expected) + + # across midnight + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = Series(np.random.randn(len(rng)), index=rng) + stime = time(22, 0) + etime = time(9, 0) + + close_open = product([True, False], [True, False]) + for inc_start, inc_end in close_open: + filtered = ts.between_time(stime, etime, inc_start, inc_end) + exp_len = (12 * 11 + 1) * 4 + 1 + if not inc_start: + exp_len -= 4 + if not inc_end: + exp_len -= 4 + + assert len(filtered) == exp_len + for rs in filtered.index: + t = rs.time() + if inc_start: + assert (t >= stime) or (t <= etime) + else: + assert (t > stime) or (t <= etime) + + if inc_end: + assert (t <= etime) or (t >= stime) + else: + assert (t < etime) or (t >= stime) + + def test_between_time_raises(self): + # GH20725 + ser = Series("a b c".split()) + msg = "Index must be DatetimeIndex" + with pytest.raises(TypeError, match=msg): + ser.between_time(start_time="00:00", end_time="12:00") + + def test_between_time_types(self): + # GH11818 + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time" + with pytest.raises(ValueError, match=msg): + rng.indexer_between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) + + frame = DataFrame({"A": 0}, index=rng) + with pytest.raises(ValueError, match=msg): + frame.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) + + series = Series(0, index=rng) + with pytest.raises(ValueError, match=msg): + series.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) + + @td.skip_if_has_locale + def test_between_time_formats(self): + # GH11818 + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame(np.random.randn(len(rng), 2), index=rng) + + strings = [ + ("2:00", "2:30"), + ("0200", "0230"), + ("2:00am", "2:30am"), + ("0200am", "0230am"), + ("2:00:00", "2:30:00"), + ("020000", "023000"), + ("2:00:00am", "2:30:00am"), + ("020000am", "023000am"), + ] + expected_length = 28 + + for time_string in strings: + assert len(ts.between_time(*time_string)) == expected_length + + def test_between_time_axis(self): + # issue 8839 + rng = date_range("1/1/2000", periods=100, freq="10min") + ts = Series(np.random.randn(len(rng)), index=rng) + stime, etime = ("08:00:00", "09:00:00") + expected_length = 7 + + assert len(ts.between_time(stime, etime)) == expected_length + assert len(ts.between_time(stime, etime, axis=0)) == expected_length + msg = "No axis named 1 for object type <class 'pandas.core.series.Series'>" + with pytest.raises(ValueError, match=msg): + ts.between_time(stime, etime, axis=1) diff --git a/pandas/tests/series/methods/test_combine.py b/pandas/tests/series/methods/test_combine.py new file mode 100644 index 0000000000000..75d47e3daa103 --- /dev/null +++ b/pandas/tests/series/methods/test_combine.py @@ -0,0 +1,17 @@ +from pandas import Series +import pandas._testing as tm + + +class TestCombine: + def test_combine_scalar(self): + # GH#21248 + # Note - combine() with another Series is tested elsewhere because + # it is used when testing operators + ser = Series([i * 10 for i in range(5)]) + result = ser.combine(3, lambda x, y: x + y) + expected = Series([i * 10 + 3 for i in range(5)]) + tm.assert_series_equal(result, expected) + + result = ser.combine(22, lambda x, y: min(x, y)) + expected = Series([min(i * 10, 22) for i in range(5)]) + tm.assert_series_equal(result, expected) diff --git a/pandas/tests/series/methods/test_rename.py b/pandas/tests/series/methods/test_rename.py new file mode 100644 index 0000000000000..60182f509e657 --- /dev/null +++ b/pandas/tests/series/methods/test_rename.py @@ -0,0 +1,91 @@ +from datetime import datetime + +import numpy as np + +from pandas import Index, Series +import pandas._testing as tm + + +class TestRename: + def test_rename(self, datetime_series): + ts = datetime_series + renamer = lambda x: x.strftime("%Y%m%d") + renamed = ts.rename(renamer) + assert renamed.index[0] == renamer(ts.index[0]) + + # dict + rename_dict = dict(zip(ts.index, renamed.index)) + renamed2 = ts.rename(rename_dict) + tm.assert_series_equal(renamed, renamed2) + + # partial dict + s = Series(np.arange(4), index=["a", "b", "c", "d"], dtype="int64") + renamed = s.rename({"b": "foo", "d": "bar"}) + tm.assert_index_equal(renamed.index, Index(["a", "foo", "c", "bar"])) + + # index with name + renamer = Series( + np.arange(4), index=Index(["a", "b", "c", "d"], name="name"), dtype="int64" + ) + renamed = renamer.rename({}) + assert renamed.index.name == renamer.index.name + + def test_rename_by_series(self): + s = Series(range(5), name="foo") + renamer = Series({1: 10, 2: 20}) + result = s.rename(renamer) + expected = Series(range(5), index=[0, 10, 20, 3, 4], name="foo") + tm.assert_series_equal(result, expected) + + def test_rename_set_name(self): + s = Series(range(4), index=list("abcd")) + for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]: + result = s.rename(name) + assert result.name == name + tm.assert_numpy_array_equal(result.index.values, s.index.values) + assert s.name is None + + def test_rename_set_name_inplace(self): + s = Series(range(3), index=list("abc")) + for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]: + s.rename(name, inplace=True) + assert s.name == name + + exp = np.array(["a", "b", "c"], dtype=np.object_) + tm.assert_numpy_array_equal(s.index.values, exp) + + def test_rename_axis_supported(self): + # Supporting axis for compatibility, detailed in GH-18589 + s = Series(range(5)) + s.rename({}, axis=0) + s.rename({}, axis="index") + # FIXME: dont leave commenred-out + # TODO: clean up shared index validation + # with pytest.raises(ValueError, match="No axis named 5"): + # s.rename({}, axis=5) + + def test_rename_inplace(self, datetime_series): + renamer = lambda x: x.strftime("%Y%m%d") + expected = renamer(datetime_series.index[0]) + + datetime_series.rename(renamer, inplace=True) + assert datetime_series.index[0] == expected + + def test_rename_with_custom_indexer(self): + # GH 27814 + class MyIndexer: + pass + + ix = MyIndexer() + s = Series([1, 2, 3]).rename(ix) + assert s.name is ix + + def test_rename_with_custom_indexer_inplace(self): + # GH 27814 + class MyIndexer: + pass + + ix = MyIndexer() + s = Series([1, 2, 3]) + s.rename(ix, inplace=True) + assert s.name is ix diff --git a/pandas/tests/series/methods/test_reset_index.py b/pandas/tests/series/methods/test_reset_index.py new file mode 100644 index 0000000000000..f0c4895ad7c10 --- /dev/null +++ b/pandas/tests/series/methods/test_reset_index.py @@ -0,0 +1,110 @@ +import numpy as np +import pytest + +from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series +import pandas._testing as tm + + +class TestResetIndex: + def test_reset_index(self): + df = tm.makeDataFrame()[:5] + ser = df.stack() + ser.index.names = ["hash", "category"] + + ser.name = "value" + df = ser.reset_index() + assert "value" in df + + df = ser.reset_index(name="value2") + assert "value2" in df + + # check inplace + s = ser.reset_index(drop=True) + s2 = ser + s2.reset_index(drop=True, inplace=True) + tm.assert_series_equal(s, s2) + + # level + index = MultiIndex( + levels=[["bar"], ["one", "two", "three"], [0, 1]], + codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]], + ) + s = Series(np.random.randn(6), index=index) + rs = s.reset_index(level=1) + assert len(rs.columns) == 2 + + rs = s.reset_index(level=[0, 2], drop=True) + tm.assert_index_equal(rs.index, Index(index.get_level_values(1))) + assert isinstance(rs, Series) + + def test_reset_index_name(self): + s = Series([1, 2, 3], index=Index(range(3), name="x")) + assert s.reset_index().index.name is None + assert s.reset_index(drop=True).index.name is None + + def test_reset_index_level(self): + df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"]) + + for levels in ["A", "B"], [0, 1]: + # With MultiIndex + s = df.set_index(["A", "B"])["C"] + + result = s.reset_index(level=levels[0]) + tm.assert_frame_equal(result, df.set_index("B")) + + result = s.reset_index(level=levels[:1]) + tm.assert_frame_equal(result, df.set_index("B")) + + result = s.reset_index(level=levels) + tm.assert_frame_equal(result, df) + + result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True) + tm.assert_frame_equal(result, df[["C"]]) + + with pytest.raises(KeyError, match="Level E "): + s.reset_index(level=["A", "E"]) + + # With single-level Index + s = df.set_index("A")["B"] + + result = s.reset_index(level=levels[0]) + tm.assert_frame_equal(result, df[["A", "B"]]) + + result = s.reset_index(level=levels[:1]) + tm.assert_frame_equal(result, df[["A", "B"]]) + + result = s.reset_index(level=levels[0], drop=True) + tm.assert_series_equal(result, df["B"]) + + with pytest.raises(IndexError, match="Too many levels"): + s.reset_index(level=[0, 1, 2]) + + # Check that .reset_index([],drop=True) doesn't fail + result = Series(range(4)).reset_index([], drop=True) + expected = Series(range(4)) + tm.assert_series_equal(result, expected) + + def test_reset_index_range(self): + # GH 12071 + s = Series(range(2), name="A", dtype="int64") + series_result = s.reset_index() + assert isinstance(series_result.index, RangeIndex) + series_expected = DataFrame( + [[0, 0], [1, 1]], columns=["index", "A"], index=RangeIndex(stop=2) + ) + tm.assert_frame_equal(series_result, series_expected) + + def test_reset_index_drop_errors(self): + # GH 20925 + + # KeyError raised for series index when passed level name is missing + s = Series(range(4)) + with pytest.raises(KeyError, match="does not match index name"): + s.reset_index("wrong", drop=True) + with pytest.raises(KeyError, match="does not match index name"): + s.reset_index("wrong") + + # KeyError raised for series when level to be dropped is missing + s = Series(range(4), index=MultiIndex.from_product([[1, 2]] * 2)) + with pytest.raises(KeyError, match="not found"): + s.reset_index("wrong", drop=True) diff --git a/pandas/tests/series/methods/test_truncate.py b/pandas/tests/series/methods/test_truncate.py index d4e2890ed8bf0..c97369b349f56 100644 --- a/pandas/tests/series/methods/test_truncate.py +++ b/pandas/tests/series/methods/test_truncate.py @@ -1,7 +1,10 @@ +from datetime import datetime + import numpy as np import pytest import pandas as pd +from pandas import Series, date_range import pandas._testing as tm from pandas.tseries.offsets import BDay @@ -76,3 +79,33 @@ def test_truncate_nonsortedindex(self): with pytest.raises(ValueError, match=msg): ts.sort_values(ascending=False).truncate(before="2011-11", after="2011-12") + + def test_truncate_datetimeindex_tz(self): + # GH 9243 + idx = date_range("4/1/2005", "4/30/2005", freq="D", tz="US/Pacific") + s = Series(range(len(idx)), index=idx) + result = s.truncate(datetime(2005, 4, 2), datetime(2005, 4, 4)) + expected = Series([1, 2, 3], index=idx[1:4]) + tm.assert_series_equal(result, expected) + + def test_truncate_periodindex(self): + # GH 17717 + idx1 = pd.PeriodIndex( + [pd.Period("2017-09-02"), pd.Period("2017-09-02"), pd.Period("2017-09-03")] + ) + series1 = pd.Series([1, 2, 3], index=idx1) + result1 = series1.truncate(after="2017-09-02") + + expected_idx1 = pd.PeriodIndex( + [pd.Period("2017-09-02"), pd.Period("2017-09-02")] + ) + tm.assert_series_equal(result1, pd.Series([1, 2], index=expected_idx1)) + + idx2 = pd.PeriodIndex( + [pd.Period("2017-09-03"), pd.Period("2017-09-02"), pd.Period("2017-09-03")] + ) + series2 = pd.Series([1, 2, 3], index=idx2) + result2 = series2.sort_index().truncate(after="2017-09-02") + + expected_idx2 = pd.PeriodIndex([pd.Period("2017-09-02")]) + tm.assert_series_equal(result2, pd.Series([2], index=expected_idx2)) diff --git a/pandas/tests/series/methods/test_tz_convert.py b/pandas/tests/series/methods/test_tz_convert.py new file mode 100644 index 0000000000000..ce348d5323e62 --- /dev/null +++ b/pandas/tests/series/methods/test_tz_convert.py @@ -0,0 +1,29 @@ +import numpy as np +import pytest + +from pandas import DatetimeIndex, Series, date_range +import pandas._testing as tm + + +class TestTZConvert: + def test_series_tz_convert(self): + rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") + ts = Series(1, index=rng) + + result = ts.tz_convert("Europe/Berlin") + assert result.index.tz.zone == "Europe/Berlin" + + # can't convert tz-naive + rng = date_range("1/1/2011", periods=200, freq="D") + ts = Series(1, index=rng) + + with pytest.raises(TypeError, match="Cannot convert tz-naive"): + ts.tz_convert("US/Eastern") + + def test_series_tz_convert_to_utc(self): + base = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC") + idx1 = base.tz_convert("Asia/Tokyo")[:2] + idx2 = base.tz_convert("US/Eastern")[1:] + + res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2) + tm.assert_series_equal(res, Series([np.nan, 3, np.nan], index=base)) diff --git a/pandas/tests/series/methods/test_tz_localize.py b/pandas/tests/series/methods/test_tz_localize.py new file mode 100644 index 0000000000000..44c55edf77c0a --- /dev/null +++ b/pandas/tests/series/methods/test_tz_localize.py @@ -0,0 +1,88 @@ +import pytest +import pytz + +from pandas._libs.tslibs import timezones + +from pandas import DatetimeIndex, NaT, Series, Timestamp, date_range +import pandas._testing as tm + + +class TestTZLocalize: + def test_series_tz_localize(self): + + rng = date_range("1/1/2011", periods=100, freq="H") + ts = Series(1, index=rng) + + result = ts.tz_localize("utc") + assert result.index.tz.zone == "UTC" + + # Can't localize if already tz-aware + rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") + ts = Series(1, index=rng) + + with pytest.raises(TypeError, match="Already tz-aware"): + ts.tz_localize("US/Eastern") + + def test_series_tz_localize_ambiguous_bool(self): + # make sure that we are correctly accepting bool values as ambiguous + + # GH#14402 + ts = Timestamp("2015-11-01 01:00:03") + expected0 = Timestamp("2015-11-01 01:00:03-0500", tz="US/Central") + expected1 = Timestamp("2015-11-01 01:00:03-0600", tz="US/Central") + + ser = Series([ts]) + expected0 = Series([expected0]) + expected1 = Series([expected1]) + + with pytest.raises(pytz.AmbiguousTimeError): + ser.dt.tz_localize("US/Central") + + result = ser.dt.tz_localize("US/Central", ambiguous=True) + tm.assert_series_equal(result, expected0) + + result = ser.dt.tz_localize("US/Central", ambiguous=[True]) + tm.assert_series_equal(result, expected0) + + result = ser.dt.tz_localize("US/Central", ambiguous=False) + tm.assert_series_equal(result, expected1) + + result = ser.dt.tz_localize("US/Central", ambiguous=[False]) + tm.assert_series_equal(result, expected1) + + @pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"]) + @pytest.mark.parametrize( + "method, exp", + [ + ["shift_forward", "2015-03-29 03:00:00"], + ["NaT", NaT], + ["raise", None], + ["foo", "invalid"], + ], + ) + def test_series_tz_localize_nonexistent(self, tz, method, exp): + # GH 8917 + n = 60 + dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min") + s = Series(1, dti) + if method == "raise": + with pytest.raises(pytz.NonExistentTimeError): + s.tz_localize(tz, nonexistent=method) + elif exp == "invalid": + with pytest.raises(ValueError): + dti.tz_localize(tz, nonexistent=method) + else: + result = s.tz_localize(tz, nonexistent=method) + expected = Series(1, index=DatetimeIndex([exp] * n, tz=tz)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_series_tz_localize_empty(self, tzstr): + # GH#2248 + ser = Series(dtype=object) + + ser2 = ser.tz_localize("utc") + assert ser2.index.tz == pytz.utc + + ser2 = ser.tz_localize(tzstr) + timezones.tz_compare(ser2.index.tz, timezones.maybe_get_tz(tzstr)) diff --git a/pandas/tests/series/methods/test_update.py b/pandas/tests/series/methods/test_update.py new file mode 100644 index 0000000000000..b7f5f33294792 --- /dev/null +++ b/pandas/tests/series/methods/test_update.py @@ -0,0 +1,58 @@ +import numpy as np +import pytest + +from pandas import DataFrame, Series +import pandas._testing as tm + + +class TestUpdate: + def test_update(self): + s = Series([1.5, np.nan, 3.0, 4.0, np.nan]) + s2 = Series([np.nan, 3.5, np.nan, 5.0]) + s.update(s2) + + expected = Series([1.5, 3.5, 3.0, 5.0, np.nan]) + tm.assert_series_equal(s, expected) + + # GH 3217 + df = DataFrame([{"a": 1}, {"a": 3, "b": 2}]) + df["c"] = np.nan + + df["c"].update(Series(["foo"], index=[0])) + expected = DataFrame( + [[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"] + ) + tm.assert_frame_equal(df, expected) + + @pytest.mark.parametrize( + "other, dtype, expected", + [ + # other is int + ([61, 63], "int32", Series([10, 61, 12], dtype="int32")), + ([61, 63], "int64", Series([10, 61, 12])), + ([61, 63], float, Series([10.0, 61.0, 12.0])), + ([61, 63], object, Series([10, 61, 12], dtype=object)), + # other is float, but can be cast to int + ([61.0, 63.0], "int32", Series([10, 61, 12], dtype="int32")), + ([61.0, 63.0], "int64", Series([10, 61, 12])), + ([61.0, 63.0], float, Series([10.0, 61.0, 12.0])), + ([61.0, 63.0], object, Series([10, 61.0, 12], dtype=object)), + # others is float, cannot be cast to int + ([61.1, 63.1], "int32", Series([10.0, 61.1, 12.0])), + ([61.1, 63.1], "int64", Series([10.0, 61.1, 12.0])), + ([61.1, 63.1], float, Series([10.0, 61.1, 12.0])), + ([61.1, 63.1], object, Series([10, 61.1, 12], dtype=object)), + # other is object, cannot be cast + ([(61,), (63,)], "int32", Series([10, (61,), 12])), + ([(61,), (63,)], "int64", Series([10, (61,), 12])), + ([(61,), (63,)], float, Series([10.0, (61,), 12.0])), + ([(61,), (63,)], object, Series([10, (61,), 12])), + ], + ) + def test_update_dtypes(self, other, dtype, expected): + + ser = Series([10, 11, 12], dtype=dtype) + other = Series(other, index=[1, 3]) + ser.update(other) + + tm.assert_series_equal(ser, expected) diff --git a/pandas/tests/series/test_alter_axes.py b/pandas/tests/series/test_alter_axes.py index 71f6681e8c955..9be8744d7223f 100644 --- a/pandas/tests/series/test_alter_axes.py +++ b/pandas/tests/series/test_alter_axes.py @@ -3,7 +3,7 @@ import numpy as np import pytest -from pandas import DataFrame, Index, MultiIndex, RangeIndex, Series +from pandas import Index, MultiIndex, Series import pandas._testing as tm @@ -31,62 +31,6 @@ def test_setindex(self, string_series): # Renaming - def test_rename(self, datetime_series): - ts = datetime_series - renamer = lambda x: x.strftime("%Y%m%d") - renamed = ts.rename(renamer) - assert renamed.index[0] == renamer(ts.index[0]) - - # dict - rename_dict = dict(zip(ts.index, renamed.index)) - renamed2 = ts.rename(rename_dict) - tm.assert_series_equal(renamed, renamed2) - - # partial dict - s = Series(np.arange(4), index=["a", "b", "c", "d"], dtype="int64") - renamed = s.rename({"b": "foo", "d": "bar"}) - tm.assert_index_equal(renamed.index, Index(["a", "foo", "c", "bar"])) - - # index with name - renamer = Series( - np.arange(4), index=Index(["a", "b", "c", "d"], name="name"), dtype="int64" - ) - renamed = renamer.rename({}) - assert renamed.index.name == renamer.index.name - - def test_rename_by_series(self): - s = Series(range(5), name="foo") - renamer = Series({1: 10, 2: 20}) - result = s.rename(renamer) - expected = Series(range(5), index=[0, 10, 20, 3, 4], name="foo") - tm.assert_series_equal(result, expected) - - def test_rename_set_name(self): - s = Series(range(4), index=list("abcd")) - for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]: - result = s.rename(name) - assert result.name == name - tm.assert_numpy_array_equal(result.index.values, s.index.values) - assert s.name is None - - def test_rename_set_name_inplace(self): - s = Series(range(3), index=list("abc")) - for name in ["foo", 123, 123.0, datetime(2001, 11, 11), ("foo",)]: - s.rename(name, inplace=True) - assert s.name == name - - exp = np.array(["a", "b", "c"], dtype=np.object_) - tm.assert_numpy_array_equal(s.index.values, exp) - - def test_rename_axis_supported(self): - # Supporting axis for compatibility, detailed in GH-18589 - s = Series(range(5)) - s.rename({}, axis=0) - s.rename({}, axis="index") - # TODO: clean up shared index validation - # with pytest.raises(ValueError, match="No axis named 5"): - # s.rename({}, axis=5) - def test_set_name_attribute(self): s = Series([1, 2, 3]) s2 = Series([1, 2, 3], name="bar") @@ -103,13 +47,6 @@ def test_set_name(self): assert s.name is None assert s is not s2 - def test_rename_inplace(self, datetime_series): - renamer = lambda x: x.strftime("%Y%m%d") - expected = renamer(datetime_series.index[0]) - - datetime_series.rename(renamer, inplace=True) - assert datetime_series.index[0] == expected - def test_set_index_makes_timeseries(self): idx = tm.makeDateIndex(10) @@ -117,94 +54,6 @@ def test_set_index_makes_timeseries(self): s.index = idx assert s.index.is_all_dates - def test_reset_index(self): - df = tm.makeDataFrame()[:5] - ser = df.stack() - ser.index.names = ["hash", "category"] - - ser.name = "value" - df = ser.reset_index() - assert "value" in df - - df = ser.reset_index(name="value2") - assert "value2" in df - - # check inplace - s = ser.reset_index(drop=True) - s2 = ser - s2.reset_index(drop=True, inplace=True) - tm.assert_series_equal(s, s2) - - # level - index = MultiIndex( - levels=[["bar"], ["one", "two", "three"], [0, 1]], - codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]], - ) - s = Series(np.random.randn(6), index=index) - rs = s.reset_index(level=1) - assert len(rs.columns) == 2 - - rs = s.reset_index(level=[0, 2], drop=True) - tm.assert_index_equal(rs.index, Index(index.get_level_values(1))) - assert isinstance(rs, Series) - - def test_reset_index_name(self): - s = Series([1, 2, 3], index=Index(range(3), name="x")) - assert s.reset_index().index.name is None - assert s.reset_index(drop=True).index.name is None - - def test_reset_index_level(self): - df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"]) - - for levels in ["A", "B"], [0, 1]: - # With MultiIndex - s = df.set_index(["A", "B"])["C"] - - result = s.reset_index(level=levels[0]) - tm.assert_frame_equal(result, df.set_index("B")) - - result = s.reset_index(level=levels[:1]) - tm.assert_frame_equal(result, df.set_index("B")) - - result = s.reset_index(level=levels) - tm.assert_frame_equal(result, df) - - result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True) - tm.assert_frame_equal(result, df[["C"]]) - - with pytest.raises(KeyError, match="Level E "): - s.reset_index(level=["A", "E"]) - - # With single-level Index - s = df.set_index("A")["B"] - - result = s.reset_index(level=levels[0]) - tm.assert_frame_equal(result, df[["A", "B"]]) - - result = s.reset_index(level=levels[:1]) - tm.assert_frame_equal(result, df[["A", "B"]]) - - result = s.reset_index(level=levels[0], drop=True) - tm.assert_series_equal(result, df["B"]) - - with pytest.raises(IndexError, match="Too many levels"): - s.reset_index(level=[0, 1, 2]) - - # Check that .reset_index([],drop=True) doesn't fail - result = Series(range(4)).reset_index([], drop=True) - expected = Series(range(4)) - tm.assert_series_equal(result, expected) - - def test_reset_index_range(self): - # GH 12071 - s = Series(range(2), name="A", dtype="int64") - series_result = s.reset_index() - assert isinstance(series_result.index, RangeIndex) - series_expected = DataFrame( - [[0, 0], [1, 1]], columns=["index", "A"], index=RangeIndex(stop=2) - ) - tm.assert_frame_equal(series_result, series_expected) - def test_reorder_levels(self): index = MultiIndex( levels=[["bar"], ["one", "two", "three"], [0, 1]], @@ -268,25 +117,6 @@ def test_rename_axis_none(self, kwargs): expected = Series([1, 2, 3], index=expected_index) tm.assert_series_equal(result, expected) - def test_rename_with_custom_indexer(self): - # GH 27814 - class MyIndexer: - pass - - ix = MyIndexer() - s = Series([1, 2, 3]).rename(ix) - assert s.name is ix - - def test_rename_with_custom_indexer_inplace(self): - # GH 27814 - class MyIndexer: - pass - - ix = MyIndexer() - s = Series([1, 2, 3]) - s.rename(ix, inplace=True) - assert s.name is ix - def test_set_axis_inplace_axes(self, axis_series): # GH14636 ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64") @@ -323,21 +153,6 @@ def test_set_axis_inplace(self): with pytest.raises(ValueError, match="No axis named"): s.set_axis(list("abcd"), axis=axis, inplace=False) - def test_reset_index_drop_errors(self): - # GH 20925 - - # KeyError raised for series index when passed level name is missing - s = Series(range(4)) - with pytest.raises(KeyError, match="does not match index name"): - s.reset_index("wrong", drop=True) - with pytest.raises(KeyError, match="does not match index name"): - s.reset_index("wrong") - - # KeyError raised for series when level to be dropped is missing - s = Series(range(4), index=MultiIndex.from_product([[1, 2]] * 2)) - with pytest.raises(KeyError, match="not found"): - s.reset_index("wrong", drop=True) - def test_droplevel(self): # GH20342 ser = Series([1, 2, 3, 4]) diff --git a/pandas/tests/series/test_combine_concat.py b/pandas/tests/series/test_combine_concat.py index 4afa083e97c7c..adb79f69c2d81 100644 --- a/pandas/tests/series/test_combine_concat.py +++ b/pandas/tests/series/test_combine_concat.py @@ -2,84 +2,27 @@ import pytest import pandas as pd -from pandas import DataFrame, Series -import pandas._testing as tm +from pandas import Series class TestSeriesCombine: - def test_combine_scalar(self): - # GH 21248 - # Note - combine() with another Series is tested elsewhere because - # it is used when testing operators - s = pd.Series([i * 10 for i in range(5)]) - result = s.combine(3, lambda x, y: x + y) - expected = pd.Series([i * 10 + 3 for i in range(5)]) - tm.assert_series_equal(result, expected) - - result = s.combine(22, lambda x, y: min(x, y)) - expected = pd.Series([min(i * 10, 22) for i in range(5)]) - tm.assert_series_equal(result, expected) - - def test_update(self): - s = Series([1.5, np.nan, 3.0, 4.0, np.nan]) - s2 = Series([np.nan, 3.5, np.nan, 5.0]) - s.update(s2) - - expected = Series([1.5, 3.5, 3.0, 5.0, np.nan]) - tm.assert_series_equal(s, expected) - - # GH 3217 - df = DataFrame([{"a": 1}, {"a": 3, "b": 2}]) - df["c"] = np.nan - - df["c"].update(Series(["foo"], index=[0])) - expected = DataFrame( - [[1, np.nan, "foo"], [3, 2.0, np.nan]], columns=["a", "b", "c"] - ) - tm.assert_frame_equal(df, expected) - @pytest.mark.parametrize( - "other, dtype, expected", - [ - # other is int - ([61, 63], "int32", pd.Series([10, 61, 12], dtype="int32")), - ([61, 63], "int64", pd.Series([10, 61, 12])), - ([61, 63], float, pd.Series([10.0, 61.0, 12.0])), - ([61, 63], object, pd.Series([10, 61, 12], dtype=object)), - # other is float, but can be cast to int - ([61.0, 63.0], "int32", pd.Series([10, 61, 12], dtype="int32")), - ([61.0, 63.0], "int64", pd.Series([10, 61, 12])), - ([61.0, 63.0], float, pd.Series([10.0, 61.0, 12.0])), - ([61.0, 63.0], object, pd.Series([10, 61.0, 12], dtype=object)), - # others is float, cannot be cast to int - ([61.1, 63.1], "int32", pd.Series([10.0, 61.1, 12.0])), - ([61.1, 63.1], "int64", pd.Series([10.0, 61.1, 12.0])), - ([61.1, 63.1], float, pd.Series([10.0, 61.1, 12.0])), - ([61.1, 63.1], object, pd.Series([10, 61.1, 12], dtype=object)), - # other is object, cannot be cast - ([(61,), (63,)], "int32", pd.Series([10, (61,), 12])), - ([(61,), (63,)], "int64", pd.Series([10, (61,), 12])), - ([(61,), (63,)], float, pd.Series([10.0, (61,), 12.0])), - ([(61,), (63,)], object, pd.Series([10, (61,), 12])), - ], + "dtype", ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"] ) - def test_update_dtypes(self, other, dtype, expected): + def test_concat_empty_series_dtypes_match_roundtrips(self, dtype): + dtype = np.dtype(dtype) - s = Series([10, 11, 12], dtype=dtype) - other = Series(other, index=[1, 3]) - s.update(other) + result = pd.concat([Series(dtype=dtype)]) + assert result.dtype == dtype - tm.assert_series_equal(s, expected) + result = pd.concat([Series(dtype=dtype), Series(dtype=dtype)]) + assert result.dtype == dtype def test_concat_empty_series_dtypes_roundtrips(self): # round-tripping with self & like self dtypes = map(np.dtype, ["float64", "int8", "uint8", "bool", "m8[ns]", "M8[ns]"]) - for dtype in dtypes: - assert pd.concat([Series(dtype=dtype)]).dtype == dtype - assert pd.concat([Series(dtype=dtype), Series(dtype=dtype)]).dtype == dtype - def int_result_type(dtype, dtype2): typs = {dtype.kind, dtype2.kind} if not len(typs - {"i", "u", "b"}) and ( @@ -118,35 +61,28 @@ def get_result_type(dtype, dtype2): result = pd.concat([Series(dtype=dtype), Series(dtype=dtype2)]).dtype assert result.kind == expected - def test_concat_empty_series_dtypes(self): + @pytest.mark.parametrize( + "left,right,expected", + [ + # booleans + (np.bool_, np.int32, np.int32), + (np.bool_, np.float32, np.object_), + # datetime-like + ("m8[ns]", np.bool, np.object_), + ("m8[ns]", np.int64, np.object_), + ("M8[ns]", np.bool, np.object_), + ("M8[ns]", np.int64, np.object_), + # categorical + ("category", "category", "category"), + ("category", "object", "object"), + ], + ) + def test_concat_empty_series_dtypes(self, left, right, expected): + result = pd.concat([Series(dtype=left), Series(dtype=right)]) + assert result.dtype == expected - # booleans - assert ( - pd.concat([Series(dtype=np.bool_), Series(dtype=np.int32)]).dtype - == np.int32 - ) - assert ( - pd.concat([Series(dtype=np.bool_), Series(dtype=np.float32)]).dtype - == np.object_ - ) + def test_concat_empty_series_dtypes_triple(self): - # datetime-like - assert ( - pd.concat([Series(dtype="m8[ns]"), Series(dtype=np.bool)]).dtype - == np.object_ - ) - assert ( - pd.concat([Series(dtype="m8[ns]"), Series(dtype=np.int64)]).dtype - == np.object_ - ) - assert ( - pd.concat([Series(dtype="M8[ns]"), Series(dtype=np.bool)]).dtype - == np.object_ - ) - assert ( - pd.concat([Series(dtype="M8[ns]"), Series(dtype=np.int64)]).dtype - == np.object_ - ) assert ( pd.concat( [Series(dtype="M8[ns]"), Series(dtype=np.bool_), Series(dtype=np.int64)] @@ -154,11 +90,7 @@ def test_concat_empty_series_dtypes(self): == np.object_ ) - # categorical - assert ( - pd.concat([Series(dtype="category"), Series(dtype="category")]).dtype - == "category" - ) + def test_concat_empty_series_dtype_category_with_array(self): # GH 18515 assert ( pd.concat( @@ -166,13 +98,8 @@ def test_concat_empty_series_dtypes(self): ).dtype == "float64" ) - assert ( - pd.concat([Series(dtype="category"), Series(dtype="object")]).dtype - == "object" - ) - # sparse - # TODO: move? + def test_concat_empty_series_dtypes_sparse(self): result = pd.concat( [ Series(dtype="float64").astype("Sparse"), diff --git a/pandas/tests/series/test_datetime_values.py b/pandas/tests/series/test_datetime_values.py index b8be4ea137e3d..59ae0cd63690c 100644 --- a/pandas/tests/series/test_datetime_values.py +++ b/pandas/tests/series/test_datetime_values.py @@ -19,7 +19,6 @@ PeriodIndex, Series, TimedeltaIndex, - bdate_range, date_range, period_range, timedelta_range, @@ -622,18 +621,6 @@ def test_dt_accessor_updates_on_inplace(self): result = s.dt.date assert result[0] == result[2] - def test_between(self): - s = Series(bdate_range("1/1/2000", periods=20).astype(object)) - s[::2] = np.nan - - result = s[s.between(s[3], s[17])] - expected = s[3:18].dropna() - tm.assert_series_equal(result, expected) - - result = s[s.between(s[3], s[17], inclusive=False)] - expected = s[5:16].dropna() - tm.assert_series_equal(result, expected) - def test_date_tz(self): # GH11757 rng = pd.DatetimeIndex( diff --git a/pandas/tests/series/test_period.py b/pandas/tests/series/test_period.py index 03fee389542e3..f41245c2872a7 100644 --- a/pandas/tests/series/test_period.py +++ b/pandas/tests/series/test_period.py @@ -52,12 +52,6 @@ def test_dropna(self): s = Series([pd.Period("2011-01", freq="M"), pd.Period("NaT", freq="M")]) tm.assert_series_equal(s.dropna(), Series([pd.Period("2011-01", freq="M")])) - def test_between(self): - left, right = self.series[[2, 7]] - result = self.series.between(left, right) - expected = (self.series >= left) & (self.series <= right) - tm.assert_series_equal(result, expected) - # --------------------------------------------------------------------- # NaT support @@ -110,28 +104,6 @@ def test_align_series(self, join_type): ts.align(ts[::2], join=join_type) - def test_truncate(self): - # GH 17717 - idx1 = pd.PeriodIndex( - [pd.Period("2017-09-02"), pd.Period("2017-09-02"), pd.Period("2017-09-03")] - ) - series1 = pd.Series([1, 2, 3], index=idx1) - result1 = series1.truncate(after="2017-09-02") - - expected_idx1 = pd.PeriodIndex( - [pd.Period("2017-09-02"), pd.Period("2017-09-02")] - ) - tm.assert_series_equal(result1, pd.Series([1, 2], index=expected_idx1)) - - idx2 = pd.PeriodIndex( - [pd.Period("2017-09-03"), pd.Period("2017-09-02"), pd.Period("2017-09-03")] - ) - series2 = pd.Series([1, 2, 3], index=idx2) - result2 = series2.sort_index().truncate(after="2017-09-02") - - expected_idx2 = pd.PeriodIndex([pd.Period("2017-09-02")]) - tm.assert_series_equal(result2, pd.Series([2], index=expected_idx2)) - @pytest.mark.parametrize( "input_vals", [ diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 459377fb18f29..8f06ea69f5d66 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -1,13 +1,11 @@ -from datetime import datetime, time, timedelta +from datetime import datetime, timedelta from io import StringIO -from itertools import product import numpy as np import pytest from pandas._libs.tslib import iNaT from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime -import pandas.util._test_decorators as td import pandas as pd from pandas import ( @@ -23,8 +21,6 @@ ) import pandas._testing as tm -from pandas.tseries.offsets import BDay, BMonthEnd - def _simple_ts(start, end, freq="D"): rng = date_range(start, end, freq=freq) @@ -38,44 +34,6 @@ def assert_range_equal(left, right): class TestTimeSeries: - def test_asfreq(self): - ts = Series( - [0.0, 1.0, 2.0], - index=[ - datetime(2009, 10, 30), - datetime(2009, 11, 30), - datetime(2009, 12, 31), - ], - ) - - daily_ts = ts.asfreq("B") - monthly_ts = daily_ts.asfreq("BM") - tm.assert_series_equal(monthly_ts, ts) - - daily_ts = ts.asfreq("B", method="pad") - monthly_ts = daily_ts.asfreq("BM") - tm.assert_series_equal(monthly_ts, ts) - - daily_ts = ts.asfreq(BDay()) - monthly_ts = daily_ts.asfreq(BMonthEnd()) - tm.assert_series_equal(monthly_ts, ts) - - result = ts[:0].asfreq("M") - assert len(result) == 0 - assert result is not ts - - daily_ts = ts.asfreq("D", fill_value=-1) - result = daily_ts.value_counts().sort_index() - expected = Series([60, 1, 1, 1], index=[-1.0, 2.0, 1.0, 0.0]).sort_index() - tm.assert_series_equal(result, expected) - - def test_asfreq_datetimeindex_empty_series(self): - # GH 14320 - index = pd.DatetimeIndex(["2016-09-29 11:00"]) - expected = Series(index=index, dtype=object).asfreq("H") - result = Series([3], index=index.copy()).asfreq("H") - tm.assert_index_equal(expected.index, result.index) - def test_autocorr(self, datetime_series): # Just run the function corr1 = datetime_series.autocorr() @@ -268,15 +226,6 @@ def test_series_repr_nat(self): ) assert result == expected - def test_asfreq_keep_index_name(self): - # GH #9854 - index_name = "bar" - index = pd.date_range("20130101", periods=20, name=index_name) - df = pd.DataFrame(list(range(20)), columns=["foo"], index=index) - - assert index_name == df.index.name - assert index_name == df.asfreq("10D").index.name - def test_promote_datetime_date(self): rng = date_range("1/1/2000", periods=20) ts = Series(np.random.randn(20), index=rng) @@ -300,26 +249,6 @@ def test_promote_datetime_date(self): expected = rng.get_indexer(ts_slice.index) tm.assert_numpy_array_equal(result, expected) - def test_asfreq_normalize(self): - rng = date_range("1/1/2000 09:30", periods=20) - norm = date_range("1/1/2000", periods=20) - vals = np.random.randn(20) - ts = Series(vals, index=rng) - - result = ts.asfreq("D", normalize=True) - norm = date_range("1/1/2000", periods=20) - expected = Series(vals, index=norm) - - tm.assert_series_equal(result, expected) - - vals = np.random.randn(20, 3) - ts = DataFrame(vals, index=rng) - - result = ts.asfreq("D", normalize=True) - expected = DataFrame(vals, index=norm) - - tm.assert_frame_equal(result, expected) - def test_first_subset(self): ts = _simple_ts("1/1/2000", "1/1/2010", freq="12h") result = ts.first("10d") @@ -380,180 +309,6 @@ def test_format_pre_1900_dates(self): ts = Series(1, index=rng) repr(ts) - def test_at_time(self): - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = Series(np.random.randn(len(rng)), index=rng) - rs = ts.at_time(rng[1]) - assert (rs.index.hour == rng[1].hour).all() - assert (rs.index.minute == rng[1].minute).all() - assert (rs.index.second == rng[1].second).all() - - result = ts.at_time("9:30") - expected = ts.at_time(time(9, 30)) - tm.assert_series_equal(result, expected) - - df = DataFrame(np.random.randn(len(rng), 3), index=rng) - - result = ts[time(9, 30)] - result_df = df.loc[time(9, 30)] - expected = ts[(rng.hour == 9) & (rng.minute == 30)] - exp_df = df[(rng.hour == 9) & (rng.minute == 30)] - - # FIXME: dont leave commented-out - # expected.index = date_range('1/1/2000', '1/4/2000') - - tm.assert_series_equal(result, expected) - tm.assert_frame_equal(result_df, exp_df) - - chunk = df.loc["1/4/2000":] - result = chunk.loc[time(9, 30)] - expected = result_df[-1:] - tm.assert_frame_equal(result, expected) - - # midnight, everything - rng = date_range("1/1/2000", "1/31/2000") - ts = Series(np.random.randn(len(rng)), index=rng) - - result = ts.at_time(time(0, 0)) - tm.assert_series_equal(result, ts) - - # time doesn't exist - rng = date_range("1/1/2012", freq="23Min", periods=384) - ts = Series(np.random.randn(len(rng)), rng) - rs = ts.at_time("16:00") - assert len(rs) == 0 - - def test_at_time_raises(self): - # GH20725 - ser = pd.Series("a b c".split()) - msg = "Index must be DatetimeIndex" - with pytest.raises(TypeError, match=msg): - ser.at_time("00:00") - - def test_between(self): - series = Series(date_range("1/1/2000", periods=10)) - left, right = series[[2, 7]] - - result = series.between(left, right) - expected = (series >= left) & (series <= right) - tm.assert_series_equal(result, expected) - - def test_between_time(self): - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = Series(np.random.randn(len(rng)), index=rng) - stime = time(0, 0) - etime = time(1, 0) - - close_open = product([True, False], [True, False]) - for inc_start, inc_end in close_open: - filtered = ts.between_time(stime, etime, inc_start, inc_end) - exp_len = 13 * 4 + 1 - if not inc_start: - exp_len -= 5 - if not inc_end: - exp_len -= 4 - - assert len(filtered) == exp_len - for rs in filtered.index: - t = rs.time() - if inc_start: - assert t >= stime - else: - assert t > stime - - if inc_end: - assert t <= etime - else: - assert t < etime - - result = ts.between_time("00:00", "01:00") - expected = ts.between_time(stime, etime) - tm.assert_series_equal(result, expected) - - # across midnight - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = Series(np.random.randn(len(rng)), index=rng) - stime = time(22, 0) - etime = time(9, 0) - - close_open = product([True, False], [True, False]) - for inc_start, inc_end in close_open: - filtered = ts.between_time(stime, etime, inc_start, inc_end) - exp_len = (12 * 11 + 1) * 4 + 1 - if not inc_start: - exp_len -= 4 - if not inc_end: - exp_len -= 4 - - assert len(filtered) == exp_len - for rs in filtered.index: - t = rs.time() - if inc_start: - assert (t >= stime) or (t <= etime) - else: - assert (t > stime) or (t <= etime) - - if inc_end: - assert (t <= etime) or (t >= stime) - else: - assert (t < etime) or (t >= stime) - - def test_between_time_raises(self): - # GH20725 - ser = pd.Series("a b c".split()) - msg = "Index must be DatetimeIndex" - with pytest.raises(TypeError, match=msg): - ser.between_time(start_time="00:00", end_time="12:00") - - def test_between_time_types(self): - # GH11818 - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time" - with pytest.raises(ValueError, match=msg): - rng.indexer_between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) - - frame = DataFrame({"A": 0}, index=rng) - with pytest.raises(ValueError, match=msg): - frame.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) - - series = Series(0, index=rng) - with pytest.raises(ValueError, match=msg): - series.between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5)) - - @td.skip_if_has_locale - def test_between_time_formats(self): - # GH11818 - rng = date_range("1/1/2000", "1/5/2000", freq="5min") - ts = DataFrame(np.random.randn(len(rng), 2), index=rng) - - strings = [ - ("2:00", "2:30"), - ("0200", "0230"), - ("2:00am", "2:30am"), - ("0200am", "0230am"), - ("2:00:00", "2:30:00"), - ("020000", "023000"), - ("2:00:00am", "2:30:00am"), - ("020000am", "023000am"), - ] - expected_length = 28 - - for time_string in strings: - assert len(ts.between_time(*time_string)) == expected_length - - def test_between_time_axis(self): - # issue 8839 - rng = date_range("1/1/2000", periods=100, freq="10min") - ts = Series(np.random.randn(len(rng)), index=rng) - stime, etime = ("08:00:00", "09:00:00") - expected_length = 7 - - assert len(ts.between_time(stime, etime)) == expected_length - assert len(ts.between_time(stime, etime, axis=0)) == expected_length - msg = "No axis named 1 for object type <class 'pandas.core.series.Series'>" - with pytest.raises(ValueError, match=msg): - ts.between_time(stime, etime, axis=1) - def test_to_period(self): from pandas.core.indexes.period import period_range diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py index a363f927d10a9..e729ff91293a8 100644 --- a/pandas/tests/series/test_timezones.py +++ b/pandas/tests/series/test_timezones.py @@ -10,207 +10,12 @@ from pandas._libs.tslibs import conversion, timezones -from pandas import DatetimeIndex, Index, NaT, Series, Timestamp +from pandas import Series, Timestamp import pandas._testing as tm from pandas.core.indexes.datetimes import date_range class TestSeriesTimezones: - # ----------------------------------------------------------------- - # Series.tz_localize - def test_series_tz_localize(self): - - rng = date_range("1/1/2011", periods=100, freq="H") - ts = Series(1, index=rng) - - result = ts.tz_localize("utc") - assert result.index.tz.zone == "UTC" - - # Can't localize if already tz-aware - rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") - ts = Series(1, index=rng) - - with pytest.raises(TypeError, match="Already tz-aware"): - ts.tz_localize("US/Eastern") - - def test_series_tz_localize_ambiguous_bool(self): - # make sure that we are correctly accepting bool values as ambiguous - - # GH#14402 - ts = Timestamp("2015-11-01 01:00:03") - expected0 = Timestamp("2015-11-01 01:00:03-0500", tz="US/Central") - expected1 = Timestamp("2015-11-01 01:00:03-0600", tz="US/Central") - - ser = Series([ts]) - expected0 = Series([expected0]) - expected1 = Series([expected1]) - - with pytest.raises(pytz.AmbiguousTimeError): - ser.dt.tz_localize("US/Central") - - result = ser.dt.tz_localize("US/Central", ambiguous=True) - tm.assert_series_equal(result, expected0) - - result = ser.dt.tz_localize("US/Central", ambiguous=[True]) - tm.assert_series_equal(result, expected0) - - result = ser.dt.tz_localize("US/Central", ambiguous=False) - tm.assert_series_equal(result, expected1) - - result = ser.dt.tz_localize("US/Central", ambiguous=[False]) - tm.assert_series_equal(result, expected1) - - @pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"]) - @pytest.mark.parametrize( - "method, exp", - [ - ["shift_forward", "2015-03-29 03:00:00"], - ["NaT", NaT], - ["raise", None], - ["foo", "invalid"], - ], - ) - def test_series_tz_localize_nonexistent(self, tz, method, exp): - # GH 8917 - n = 60 - dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min") - s = Series(1, dti) - if method == "raise": - with pytest.raises(pytz.NonExistentTimeError): - s.tz_localize(tz, nonexistent=method) - elif exp == "invalid": - with pytest.raises(ValueError): - dti.tz_localize(tz, nonexistent=method) - else: - result = s.tz_localize(tz, nonexistent=method) - expected = Series(1, index=DatetimeIndex([exp] * n, tz=tz)) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_series_tz_localize_empty(self, tzstr): - # GH#2248 - ser = Series(dtype=object) - - ser2 = ser.tz_localize("utc") - assert ser2.index.tz == pytz.utc - - ser2 = ser.tz_localize(tzstr) - timezones.tz_compare(ser2.index.tz, timezones.maybe_get_tz(tzstr)) - - # ----------------------------------------------------------------- - # Series.tz_convert - - def test_series_tz_convert(self): - rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") - ts = Series(1, index=rng) - - result = ts.tz_convert("Europe/Berlin") - assert result.index.tz.zone == "Europe/Berlin" - - # can't convert tz-naive - rng = date_range("1/1/2011", periods=200, freq="D") - ts = Series(1, index=rng) - - with pytest.raises(TypeError, match="Cannot convert tz-naive"): - ts.tz_convert("US/Eastern") - - def test_series_tz_convert_to_utc(self): - base = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC") - idx1 = base.tz_convert("Asia/Tokyo")[:2] - idx2 = base.tz_convert("US/Eastern")[1:] - - res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2) - tm.assert_series_equal(res, Series([np.nan, 3, np.nan], index=base)) - - # ----------------------------------------------------------------- - # Series.append - - def test_series_append_aware(self): - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") - ser1 = Series([1], index=rng1) - ser2 = Series([2], index=rng2) - ts_result = ser1.append(ser2) - - exp_index = DatetimeIndex( - ["2011-01-01 01:00", "2011-01-01 02:00"], tz="US/Eastern" - ) - exp = Series([1, 2], index=exp_index) - tm.assert_series_equal(ts_result, exp) - assert ts_result.index.tz == rng1.tz - - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="UTC") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="UTC") - ser1 = Series([1], index=rng1) - ser2 = Series([2], index=rng2) - ts_result = ser1.append(ser2) - - exp_index = DatetimeIndex(["2011-01-01 01:00", "2011-01-01 02:00"], tz="UTC") - exp = Series([1, 2], index=exp_index) - tm.assert_series_equal(ts_result, exp) - utc = rng1.tz - assert utc == ts_result.index.tz - - # GH#7795 - # different tz coerces to object dtype, not UTC - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Central") - ser1 = Series([1], index=rng1) - ser2 = Series([2], index=rng2) - ts_result = ser1.append(ser2) - exp_index = Index( - [ - Timestamp("1/1/2011 01:00", tz="US/Eastern"), - Timestamp("1/1/2011 02:00", tz="US/Central"), - ] - ) - exp = Series([1, 2], index=exp_index) - tm.assert_series_equal(ts_result, exp) - - def test_series_append_aware_naive(self): - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") - ser1 = Series(np.random.randn(len(rng1)), index=rng1) - ser2 = Series(np.random.randn(len(rng2)), index=rng2) - ts_result = ser1.append(ser2) - - expected = ser1.index.astype(object).append(ser2.index.astype(object)) - assert ts_result.index.equals(expected) - - # mixed - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") - rng2 = range(100) - ser1 = Series(np.random.randn(len(rng1)), index=rng1) - ser2 = Series(np.random.randn(len(rng2)), index=rng2) - ts_result = ser1.append(ser2) - - expected = ser1.index.astype(object).append(ser2.index) - assert ts_result.index.equals(expected) - - def test_series_append_dst(self): - rng1 = date_range("1/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") - rng2 = date_range("8/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") - ser1 = Series([1, 2, 3], index=rng1) - ser2 = Series([10, 11, 12], index=rng2) - ts_result = ser1.append(ser2) - - exp_index = DatetimeIndex( - [ - "2016-01-01 01:00", - "2016-01-01 02:00", - "2016-01-01 03:00", - "2016-08-01 01:00", - "2016-08-01 02:00", - "2016-08-01 03:00", - ], - tz="US/Eastern", - ) - exp = Series([1, 2, 3, 10, 11, 12], index=exp_index) - tm.assert_series_equal(ts_result, exp) - assert ts_result.index.tz == rng1.tz - - # ----------------------------------------------------------------- - def test_dateutil_tzoffset_support(self): values = [188.5, 328.25] tzinfo = tzoffset(None, 7200) @@ -225,15 +30,6 @@ def test_dateutil_tzoffset_support(self): # it works! #2443 repr(series.index[0]) - @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) - def test_tz_aware_asfreq(self, tz): - dr = date_range("2011-12-01", "2012-07-20", freq="D", tz=tz) - - ser = Series(np.random.randn(len(dr)), index=dr) - - # it works! - ser.asfreq("T") - @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) def test_string_index_alias_tz_aware(self, tz): rng = date_range("1/1/2000", periods=10, tz=tz) @@ -299,28 +95,6 @@ def test_series_align_aware(self): assert new1.index.tz == pytz.UTC assert new2.index.tz == pytz.UTC - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_localized_at_time_between_time(self, tzstr): - from datetime import time - - tz = timezones.maybe_get_tz(tzstr) - - rng = date_range("4/16/2012", "5/1/2012", freq="H") - ts = Series(np.random.randn(len(rng)), index=rng) - - ts_local = ts.tz_localize(tzstr) - - result = ts_local.at_time(time(10, 0)) - expected = ts.at_time(time(10, 0)).tz_localize(tzstr) - tm.assert_series_equal(result, expected) - assert timezones.tz_compare(result.index.tz, tz) - - t1, t2 = time(10, 0), time(11, 0) - result = ts_local.between_time(t1, t2) - expected = ts.between_time(t1, t2).tz_localize(tzstr) - tm.assert_series_equal(result, expected) - assert timezones.tz_compare(result.index.tz, tz) - @pytest.mark.parametrize("tzstr", ["Europe/Berlin", "dateutil/Europe/Berlin"]) def test_getitem_pydatetime_tz(self, tzstr): tz = timezones.maybe_get_tz(tzstr) @@ -335,14 +109,6 @@ def test_getitem_pydatetime_tz(self, tzstr): time_datetime = conversion.localize_pydatetime(dt, tz) assert ts[time_pandas] == ts[time_datetime] - def test_series_truncate_datetimeindex_tz(self): - # GH 9243 - idx = date_range("4/1/2005", "4/30/2005", freq="D", tz="US/Pacific") - s = Series(range(len(idx)), index=idx) - result = s.truncate(datetime(2005, 4, 2), datetime(2005, 4, 4)) - expected = Series([1, 2, 3], index=idx[1:4]) - tm.assert_series_equal(result, expected) - @pytest.mark.parametrize("copy", [True, False]) @pytest.mark.parametrize( "method, tz", [["tz_localize", None], ["tz_convert", "Europe/Berlin"]] diff --git a/pandas/tests/test_errors.py b/pandas/tests/test_errors.py index d72c00ceb0045..515d798fe4322 100644 --- a/pandas/tests/test_errors.py +++ b/pandas/tests/test_errors.py @@ -17,6 +17,7 @@ "EmptyDataError", "ParserWarning", "MergeError", + "OptionError", ], ) def test_exception_importable(exc): diff --git a/pandas/tests/util/test_show_versions.py b/pandas/tests/util/test_show_versions.py index 0d2c81c4ea6c7..e36ea662fac8b 100644 --- a/pandas/tests/util/test_show_versions.py +++ b/pandas/tests/util/test_show_versions.py @@ -1,8 +1,26 @@ import re +import pytest + import pandas as pd +@pytest.mark.filterwarnings( + # openpyxl + "ignore:defusedxml.lxml is no longer supported:DeprecationWarning" +) +@pytest.mark.filterwarnings( + # html5lib + "ignore:Using or importing the ABCs from:DeprecationWarning" +) +@pytest.mark.filterwarnings( + # fastparquet + "ignore:pandas.core.index is deprecated:FutureWarning" +) +@pytest.mark.filterwarnings( + # pandas_datareader + "ignore:pandas.util.testing is deprecated:FutureWarning" +) def test_show_versions(capsys): # gh-32041 pd.show_versions() diff --git a/pandas/util/_print_versions.py b/pandas/util/_print_versions.py index 99b2b9e9f5f6e..f9502cc22b0c6 100644 --- a/pandas/util/_print_versions.py +++ b/pandas/util/_print_versions.py @@ -4,13 +4,23 @@ import os import platform import struct -import subprocess import sys from typing import List, Optional, Tuple, Union from pandas.compat._optional import VERSIONS, _get_version, import_optional_dependency +def _get_commit_hash() -> Optional[str]: + """ + Use vendored versioneer code to get git hash, which handles + git worktree correctly. + """ + from pandas._version import get_versions + + versions = get_versions() + return versions["full-revisionid"] + + def get_sys_info() -> List[Tuple[str, Optional[Union[str, int]]]]: """ Returns system information as a list @@ -18,20 +28,7 @@ def get_sys_info() -> List[Tuple[str, Optional[Union[str, int]]]]: blob: List[Tuple[str, Optional[Union[str, int]]]] = [] # get full commit hash - commit = None - if os.path.isdir(".git") and os.path.isdir("pandas"): - try: - pipe = subprocess.Popen( - 'git log --format="%H" -n 1'.split(" "), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - so, serr = pipe.communicate() - except (OSError, ValueError): - pass - else: - if pipe.returncode == 0: - commit = so.decode("utf-8").strip().strip('"') + commit = _get_commit_hash() blob.append(("commit", commit))
Edit `_from_nested_dict()` by using `defaultdict` for marginal optimization - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32209
2020-02-23T21:18:20Z
2020-02-26T22:02:22Z
null
2020-02-26T22:02:22Z
CLN: simplify CategoricalIndex._simple_new
diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index caa6a9a93141f..8ca3427168577 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -17,7 +17,6 @@ is_scalar, ) from pandas.core.dtypes.dtypes import CategoricalDtype -from pandas.core.dtypes.generic import ABCCategorical, ABCSeries from pandas.core.dtypes.missing import isna from pandas.core import accessor @@ -193,7 +192,9 @@ def __new__( raise cls._scalar_data_error(data) data = [] - data = cls._create_categorical(data, dtype=dtype) + assert isinstance(dtype, CategoricalDtype), dtype + if not isinstance(data, Categorical) or data.dtype != dtype: + data = Categorical(data, dtype=dtype) data = data.copy() if copy else data @@ -223,37 +224,11 @@ def _create_from_codes(self, codes, dtype=None, name=None): return CategoricalIndex(cat, name=name) @classmethod - def _create_categorical(cls, data, dtype=None): - """ - *this is an internal non-public method* - - create the correct categorical from data and the properties - - Parameters - ---------- - data : data for new Categorical - dtype : CategoricalDtype, defaults to existing - - Returns - ------- - Categorical - """ - if isinstance(data, (cls, ABCSeries)) and is_categorical_dtype(data): - data = data.values - - if not isinstance(data, ABCCategorical): - return Categorical(data, dtype=dtype) - - if isinstance(dtype, CategoricalDtype) and dtype != data.dtype: - # we want to silently ignore dtype='category' - data = data._set_dtype(dtype) - return data - - @classmethod - def _simple_new(cls, values, name=None, dtype=None): + def _simple_new(cls, values: Categorical, name=None, dtype=None): + # GH#32204 dtype is included for compat with Index._simple_new + assert isinstance(values, Categorical), type(values) result = object.__new__(cls) - values = cls._create_categorical(values, dtype=dtype) result._data = values result.name = name @@ -295,7 +270,8 @@ def _is_dtype_compat(self, other) -> bool: values = other if not is_list_like(values): values = [values] - other = CategoricalIndex(self._create_categorical(other, dtype=self.dtype)) + cat = Categorical(other, dtype=self.dtype) + other = CategoricalIndex(cat) if not other.isin(values).all(): raise TypeError( "cannot append a non-category item to a CategoricalIndex" diff --git a/pandas/tests/indexes/categorical/test_constructors.py b/pandas/tests/indexes/categorical/test_constructors.py index 1df0874e2f947..ee3f85da22781 100644 --- a/pandas/tests/indexes/categorical/test_constructors.py +++ b/pandas/tests/indexes/categorical/test_constructors.py @@ -136,12 +136,3 @@ def test_construction_with_categorical_dtype(self): with pytest.raises(ValueError, match=msg): Index(data, ordered=ordered, dtype=dtype) - - def test_create_categorical(self): - # GH#17513 The public CI constructor doesn't hit this code path with - # instances of CategoricalIndex, but we still want to test the code - ci = CategoricalIndex(["a", "b", "c"]) - # First ci is self, second ci is data. - result = CategoricalIndex._create_categorical(ci, ci) - expected = Categorical(["a", "b", "c"]) - tm.assert_categorical_equal(result, expected)
https://api.github.com/repos/pandas-dev/pandas/pulls/32204
2020-02-23T17:30:58Z
2020-02-26T02:14:24Z
2020-02-26T02:14:24Z
2020-02-26T02:16:00Z
BUG: DataFrame fail to construct when data is list and columns is nested list for MI
diff --git a/doc/source/whatsnew/v1.1.0.rst b/doc/source/whatsnew/v1.1.0.rst index 7cb7db27ae603..2df732d67b5da 100644 --- a/doc/source/whatsnew/v1.1.0.rst +++ b/doc/source/whatsnew/v1.1.0.rst @@ -471,6 +471,7 @@ Other instead of ``TypeError: Can only append a Series if ignore_index=True or if the Series has a name`` (:issue:`30871`) - Set operations on an object-dtype :class:`Index` now always return object-dtype results (:issue:`31401`) - Bug in :meth:`AbstractHolidayCalendar.holidays` when no rules were defined (:issue:`31415`) +- Bug in :class:`DataFrame` when initiating a frame with lists and assign ``columns`` with nested list for ``MultiIndex`` (:issue:`32173`) - Bug in :meth:`DataFrame.to_records` incorrectly losing timezone information in timezone-aware ``datetime64`` columns (:issue:`32535`) - Fixed :func:`pandas.testing.assert_series_equal` to correctly raise if left object is a different subclass with ``check_series_type=True`` (:issue:`32670`). - :meth:`IntegerArray.astype` now supports ``datetime64`` dtype (:issue:32538`) diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index fc7da4155db36..5c9e4b96047ee 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -3,12 +3,13 @@ constructors before passing them to a BlockManager. """ from collections import abc -from typing import Tuple +from typing import Dict, List, Optional, Tuple, Union import numpy as np import numpy.ma as ma from pandas._libs import lib +from pandas._typing import Axis, Dtype, Scalar from pandas.core.dtypes.cast import ( construct_1d_arraylike_from_scalar, @@ -522,7 +523,12 @@ def to_arrays(data, columns, coerce_float=False, dtype=None): return _list_to_arrays(data, columns, coerce_float=coerce_float, dtype=dtype) -def _list_to_arrays(data, columns, coerce_float=False, dtype=None): +def _list_to_arrays( + data: List[Scalar], + columns: Union[Index, List], + coerce_float: bool = False, + dtype: Optional[Dtype] = None, +) -> Tuple[List[Scalar], Union[Index, List[Axis]]]: if len(data) > 0 and isinstance(data[0], tuple): content = list(lib.to_object_array_tuples(data).T) else: @@ -530,21 +536,25 @@ def _list_to_arrays(data, columns, coerce_float=False, dtype=None): content = list(lib.to_object_array(data).T) # gh-26429 do not raise user-facing AssertionError try: - result = _convert_object_array( - content, columns, dtype=dtype, coerce_float=coerce_float - ) + columns = _validate_or_indexify_columns(content, columns) + result = _convert_object_array(content, dtype=dtype, coerce_float=coerce_float) except AssertionError as e: raise ValueError(e) from e - return result + return result, columns -def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): +def _list_of_series_to_arrays( + data: List, + columns: Union[Index, List], + coerce_float: bool = False, + dtype: Optional[Dtype] = None, +) -> Tuple[List[Scalar], Union[Index, List[Axis]]]: if columns is None: # We know pass_data is non-empty because data[0] is a Series pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))] columns = get_objs_combined_axis(pass_data, sort=False) - indexer_cache = {} + indexer_cache: Dict[int, Scalar] = {} aligned_values = [] for s in data: @@ -564,14 +574,19 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): if values.dtype == np.object_: content = list(values.T) - return _convert_object_array( - content, columns, dtype=dtype, coerce_float=coerce_float - ) + columns = _validate_or_indexify_columns(content, columns) + content = _convert_object_array(content, dtype=dtype, coerce_float=coerce_float) + return content, columns else: return values.T, columns -def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): +def _list_of_dict_to_arrays( + data: List, + columns: Union[Index, List], + coerce_float: bool = False, + dtype: Optional[Dtype] = None, +) -> Tuple[List[Scalar], Union[Index, List[Axis]]]: """ Convert list of dicts to numpy arrays @@ -603,22 +618,85 @@ def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): data = [(type(d) is dict) and d or dict(d) for d in data] content = list(lib.dicts_to_array(data, list(columns)).T) - return _convert_object_array( - content, columns, dtype=dtype, coerce_float=coerce_float - ) + columns = _validate_or_indexify_columns(content, columns) + content = _convert_object_array(content, dtype=dtype, coerce_float=coerce_float) + return content, columns -def _convert_object_array(content, columns, coerce_float=False, dtype=None): +def _validate_or_indexify_columns( + content: List, columns: Union[Index, List, None] +) -> Union[Index, List[Axis]]: + """ + If columns is None, make numbers as column names; Otherwise, validate that + columns have valid length. + + Parameters + ---------- + content: list of data + columns: Iterable or None + + Returns + ------- + columns: If columns is Iterable, return as is; If columns is None, assign + positional column index value as columns. + + Raises + ------ + 1. AssertionError when content is not composed of list of lists, and if + length of columns is not equal to length of content. + 2. ValueError when content is list of lists, but length of each sub-list + is not equal + 3. ValueError when content is list of lists, but length of sub-list is + not equal to length of content + """ if columns is None: columns = ibase.default_index(len(content)) else: - if len(columns) != len(content): # pragma: no cover + + # Add mask for data which is composed of list of lists + is_mi_list = isinstance(columns, list) and all( + isinstance(col, list) for col in columns + ) + + if not is_mi_list and len(columns) != len(content): # pragma: no cover # caller's responsibility to check for this... raise AssertionError( f"{len(columns)} columns passed, passed data had " f"{len(content)} columns" ) + elif is_mi_list: + + # check if nested list column, length of each sub-list should be equal + if len({len(col) for col in columns}) > 1: + raise ValueError( + "Length of columns passed for MultiIndex columns is different" + ) + + # if columns is not empty and length of sublist is not equal to content + elif columns and len(columns[0]) != len(content): + raise ValueError( + f"{len(columns[0])} columns passed, passed data had " + f"{len(content)} columns" + ) + return columns + + +def _convert_object_array( + content: List[Scalar], coerce_float: bool = False, dtype: Optional[Dtype] = None +) -> List[Scalar]: + """ + Internal function ot convert object array. + + Parameters + ---------- + content: list of processed data records + coerce_float: bool, to coerce floats or not, default is False + dtype: np.dtype, default is None + Returns + ------- + arrays: casted content if not object dtype, otherwise return as is in list. + """ # provide soft conversion of object dtypes def convert(arr): if dtype != object and dtype != np.object: @@ -628,7 +706,7 @@ def convert(arr): arrays = [convert(arr) for arr in content] - return arrays, columns + return arrays # --------------------------------------------------------------------- diff --git a/pandas/tests/frame/test_constructors.py b/pandas/tests/frame/test_constructors.py index fc27f19490a9b..baac87755c6d2 100644 --- a/pandas/tests/frame/test_constructors.py +++ b/pandas/tests/frame/test_constructors.py @@ -1063,6 +1063,32 @@ def test_constructor_list_of_lists(self): result = DataFrame(data) tm.assert_frame_equal(result, expected) + def test_constructor_list_like_data_nested_list_column(self): + # GH 32173 + arrays = [list("abcd"), list("cdef")] + result = pd.DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=arrays) + + mi = MultiIndex.from_arrays(arrays) + expected = pd.DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=mi) + + tm.assert_frame_equal(result, expected) + + def test_constructor_wrong_length_nested_list_column(self): + # GH 32173 + arrays = [list("abc"), list("cde")] + + msg = "3 columns passed, passed data had 4" + with pytest.raises(ValueError, match=msg): + DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=arrays) + + def test_constructor_unequal_length_nested_list_column(self): + # GH 32173 + arrays = [list("abcd"), list("cde")] + + msg = "Length of columns passed for MultiIndex columns is different" + with pytest.raises(ValueError, match=msg): + DataFrame([[1, 2, 3, 4], [4, 5, 6, 7]], columns=arrays) + def test_constructor_sequence_like(self): # GH 3783 # collections.Squence like
- [ ] closes #32173 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32202
2020-02-23T15:35:13Z
2020-04-06T21:15:19Z
2020-04-06T21:15:19Z
2020-04-06T21:15:25Z
Backport PR #32175 on branch 1.0.x (BUG: groupby nunique changing values)
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst index affe019d0ac86..b6ac1a275a08b 100644 --- a/doc/source/whatsnew/v1.0.2.rst +++ b/doc/source/whatsnew/v1.0.2.rst @@ -19,6 +19,7 @@ Fixed regressions - Fixed regression in :meth:`Series.align` when ``other`` is a DataFrame and ``method`` is not None (:issue:`31785`) - Fixed regression in :meth:`pandas.core.groupby.RollingGroupby.apply` where the ``raw`` parameter was ignored (:issue:`31754`) - Fixed regression in :meth:`rolling(..).corr() <pandas.core.window.Rolling.corr>` when using a time offset (:issue:`31789`) +- Fixed regression in :meth:`DataFrameGroupBy.nunique` which was modifying the original values if ``NaN`` values were present (:issue:`31950`) - Fixed regression where :func:`read_pickle` raised a ``UnicodeDecodeError`` when reading a py27 pickle with :class:`MultiIndex` column (:issue:`31988`). - Fixed regression in :class:`DataFrame` arithmetic operations with mis-matched columns (:issue:`31623`) - Fixed regression in :meth:`GroupBy.agg` calling a user-provided function an extra time on an empty input (:issue:`31760`) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 1014723c53a7a..d346a0ccbcd3f 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -588,30 +588,18 @@ def nunique(self, dropna: bool = True) -> Series: val = self.obj._internal_get_values() - # GH 27951 - # temporary fix while we wait for NumPy bug 12629 to be fixed - val[isna(val)] = np.datetime64("NaT") - - try: - sorter = np.lexsort((val, ids)) - except TypeError: # catches object dtypes - msg = f"val.dtype must be object, got {val.dtype}" - assert val.dtype == object, msg - val, _ = algorithms.factorize(val, sort=False) - sorter = np.lexsort((val, ids)) - _isna = lambda a: a == -1 - else: - _isna = isna - - ids, val = ids[sorter], val[sorter] + codes, _ = algorithms.factorize(val, sort=False) + sorter = np.lexsort((codes, ids)) + codes = codes[sorter] + ids = ids[sorter] # group boundaries are where group ids change # unique observations are where sorted values change idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] - inc = np.r_[1, val[1:] != val[:-1]] + inc = np.r_[1, codes[1:] != codes[:-1]] # 1st item of each group is a new unique observation - mask = _isna(val) + mask = codes == -1 if dropna: inc[idx] = 1 inc[mask] = 0 diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 087b1286151d7..1ecff003b545b 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -972,6 +972,7 @@ def test_frame_describe_unstacked_format(): @pytest.mark.parametrize("dropna", [False, True]) def test_series_groupby_nunique(n, m, sort, dropna): def check_nunique(df, keys, as_index=True): + original_df = df.copy() gr = df.groupby(keys, as_index=as_index, sort=sort) left = gr["julie"].nunique(dropna=dropna) @@ -981,6 +982,7 @@ def check_nunique(df, keys, as_index=True): right = right.reset_index(drop=True) tm.assert_series_equal(left, right, check_names=False) + tm.assert_frame_equal(df, original_df) days = date_range("2015-08-23", periods=10)
Backport PR #32175: BUG: groupby nunique changing values
https://api.github.com/repos/pandas-dev/pandas/pulls/32201
2020-02-23T15:02:26Z
2020-02-23T16:59:31Z
2020-02-23T16:59:31Z
2020-02-23T16:59:31Z
Backport PR #31939 on branch 1.0.x (BUG: Fix construction of Categorical from pd.NA)
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst index affe019d0ac86..dc47e010dacdc 100644 --- a/doc/source/whatsnew/v1.0.2.rst +++ b/doc/source/whatsnew/v1.0.2.rst @@ -65,6 +65,7 @@ Bug fixes **Categorical** - Fixed bug where :meth:`Categorical.from_codes` improperly raised a ``ValueError`` when passed nullable integer codes. (:issue:`31779`) +- Fixed bug where :meth:`Categorical` constructor would raise a ``TypeError`` when given a numpy array containing ``pd.NA``. (:issue:`31927`) - Bug in :class:`Categorical` that would ignore or crash when calling :meth:`Series.replace` with a list-like ``to_replace`` (:issue:`31720`) **I/O** @@ -85,4 +86,4 @@ Bug fixes Contributors ~~~~~~~~~~~~ -.. contributors:: v1.0.1..v1.0.2|HEAD \ No newline at end of file +.. contributors:: v1.0.1..v1.0.2|HEAD diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index 6671375f628e7..811025a4b5764 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -10,6 +10,7 @@ WARNING: DO NOT edit .pxi FILE directly, .pxi is generated from .pxi.in # ---------------------------------------------------------------------- from pandas._libs.tslibs.util cimport get_c_string +from pandas._libs.missing cimport C_NA {{py: @@ -1032,8 +1033,12 @@ cdef class PyObjectHashTable(HashTable): val = values[i] hash(val) - if ignore_na and ((val != val or val is None) - or (use_na_value and val == na_value)): + if ignore_na and ( + (val is C_NA) + or (val != val) + or (val is None) + or (use_na_value and val == na_value) + ): # if missing values do not count as unique values (i.e. if # ignore_na is True), skip the hashtable entry for them, and # replace the corresponding label with na_sentinel diff --git a/pandas/tests/arrays/categorical/test_constructors.py b/pandas/tests/arrays/categorical/test_constructors.py index dbd8fd8df67c1..d5537359d6948 100644 --- a/pandas/tests/arrays/categorical/test_constructors.py +++ b/pandas/tests/arrays/categorical/test_constructors.py @@ -458,6 +458,18 @@ def test_constructor_with_categorical_categories(self): result = Categorical(["a", "b"], categories=CategoricalIndex(["a", "b", "c"])) tm.assert_categorical_equal(result, expected) + @pytest.mark.parametrize("klass", [lambda x: np.array(x, dtype=object), list]) + def test_construction_with_null(self, klass, nulls_fixture): + # https://github.com/pandas-dev/pandas/issues/31927 + values = klass(["a", nulls_fixture, "b"]) + result = Categorical(values) + + dtype = CategoricalDtype(["a", "b"]) + codes = [0, -1, 1] + expected = Categorical.from_codes(codes=codes, dtype=dtype) + + tm.assert_categorical_equal(result, expected) + def test_from_codes(self): # too few categories
Backport PR #31939: BUG: Fix construction of Categorical from pd.NA
https://api.github.com/repos/pandas-dev/pandas/pulls/32200
2020-02-23T15:00:31Z
2020-02-23T16:02:55Z
2020-02-23T16:02:55Z
2020-02-23T16:02:55Z
Backport PR #32124 on branch 1.0.x (BUG: Avoid ambiguous condition in GroupBy.first / last)
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst index affe019d0ac86..82d603cfb8c15 100644 --- a/doc/source/whatsnew/v1.0.2.rst +++ b/doc/source/whatsnew/v1.0.2.rst @@ -76,6 +76,7 @@ Bug fixes - Fix bug in :meth:`DataFrame.convert_dtypes` for columns that were already using the ``"string"`` dtype (:issue:`31731`). - Fixed bug in setting values using a slice indexer with string dtype (:issue:`31772`) +- Fixed bug where :meth:`GroupBy.first` and :meth:`GroupBy.last` would raise a ``TypeError`` when groups contained ``pd.NA`` in a column of object dtype (:issue:`32123`) - Fix bug in :meth:`Series.convert_dtypes` for series with mix of integers and strings (:issue:`32117`) .. --------------------------------------------------------------------------- diff --git a/pandas/_libs/groupby.pyx b/pandas/_libs/groupby.pyx index abb8a6d388d26..2fe8137788f61 100644 --- a/pandas/_libs/groupby.pyx +++ b/pandas/_libs/groupby.pyx @@ -22,6 +22,8 @@ from pandas._libs.algos cimport (swap, TiebreakEnumType, TIEBREAK_AVERAGE, from pandas._libs.algos import (take_2d_axis1_float64_float64, groupsort_indexer, tiebreakers) +from pandas._libs.missing cimport checknull + cdef int64_t NPY_NAT = get_nat() _int64_max = np.iinfo(np.int64).max @@ -888,7 +890,7 @@ def group_last(rank_t[:, :] out, for j in range(K): val = values[i, j] - if val == val: + if not checknull(val): # NB: use _treat_as_na here once # conditional-nogil is available. nobs[lab, j] += 1 @@ -977,7 +979,7 @@ def group_nth(rank_t[:, :] out, for j in range(K): val = values[i, j] - if val == val: + if not checknull(val): # NB: use _treat_as_na here once # conditional-nogil is available. nobs[lab, j] += 1 diff --git a/pandas/tests/groupby/test_nth.py b/pandas/tests/groupby/test_nth.py index 0f850f2e94581..b1476f1059d84 100644 --- a/pandas/tests/groupby/test_nth.py +++ b/pandas/tests/groupby/test_nth.py @@ -54,6 +54,46 @@ def test_first_last_nth(df): tm.assert_frame_equal(result, expected) +@pytest.mark.parametrize("method", ["first", "last"]) +def test_first_last_with_na_object(method, nulls_fixture): + # https://github.com/pandas-dev/pandas/issues/32123 + groups = pd.DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby( + "a" + ) + result = getattr(groups, method)() + + if method == "first": + values = [1, 3] + else: + values = [2, 3] + + values = np.array(values, dtype=result["b"].dtype) + idx = pd.Index([1, 2], name="a") + expected = pd.DataFrame({"b": values}, index=idx) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("index", [0, -1]) +def test_nth_with_na_object(index, nulls_fixture): + # https://github.com/pandas-dev/pandas/issues/32123 + groups = pd.DataFrame({"a": [1, 1, 2, 2], "b": [1, 2, 3, nulls_fixture]}).groupby( + "a" + ) + result = groups.nth(index) + + if index == 0: + values = [1, 3] + else: + values = [2, nulls_fixture] + + values = np.array(values, dtype=result["b"].dtype) + idx = pd.Index([1, 2], name="a") + expected = pd.DataFrame({"b": values}, index=idx) + + tm.assert_frame_equal(result, expected) + + def test_first_last_nth_dtypes(df_mixed_floats): df = df_mixed_floats.copy()
Backport PR #32124: BUG: Avoid ambiguous condition in GroupBy.first / last
https://api.github.com/repos/pandas-dev/pandas/pulls/32199
2020-02-23T15:00:19Z
2020-02-23T15:45:54Z
2020-02-23T15:45:54Z
2020-02-23T15:45:54Z
DOC: add missing links to introduction to pandas
diff --git a/doc/source/getting_started/10min.rst b/doc/source/getting_started/10min.rst index a635b5656bd2d..9994287c827e3 100644 --- a/doc/source/getting_started/10min.rst +++ b/doc/source/getting_started/10min.rst @@ -39,7 +39,7 @@ and labeled columns: df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD')) df -Creating a ``DataFrame`` by passing a dict of objects that can be converted to series-like. +Creating a :class:`DataFrame` by passing a dict of objects that can be converted to series-like. .. ipython:: python @@ -51,7 +51,7 @@ Creating a ``DataFrame`` by passing a dict of objects that can be converted to s 'F': 'foo'}) df2 -The columns of the resulting ``DataFrame`` have different +The columns of the resulting :class:`DataFrame` have different :ref:`dtypes <basics.dtypes>`. .. ipython:: python @@ -169,7 +169,7 @@ See the indexing documentation :ref:`Indexing and Selecting Data <indexing>` and Getting ~~~~~~~ -Selecting a single column, which yields a ``Series``, +Selecting a single column, which yields a :class:`Series`, equivalent to ``df.A``: .. ipython:: python @@ -469,10 +469,10 @@ Concatenating pandas objects together with :func:`concat`: pd.concat(pieces) .. note:: - Adding a column to a ``DataFrame`` is relatively fast. However, adding + Adding a column to a :class:`DataFrame` is relatively fast. However, adding a row requires a copy, and may be expensive. We recommend passing a - pre-built list of records to the ``DataFrame`` constructor instead - of building a ``DataFrame`` by iteratively appending records to it. + pre-built list of records to the :class:`DataFrame` constructor instead + of building a :class:`DataFrame` by iteratively appending records to it. See :ref:`Appending to dataframe <merging.concatenation>` for more. Join @@ -520,7 +520,7 @@ See the :ref:`Grouping section <groupby>`. 'D': np.random.randn(8)}) df -Grouping and then applying the :meth:`~DataFrame.sum` function to the resulting +Grouping and then applying the :meth:`~pandas.core.groupby.GroupBy.sum` function to the resulting groups. .. ipython:: python @@ -528,7 +528,7 @@ groups. df.groupby('A').sum() Grouping by multiple columns forms a hierarchical index, and again we can -apply the ``sum`` function. +apply the :meth:`~pandas.core.groupby.GroupBy.sum` function. .. ipython:: python @@ -648,7 +648,7 @@ the quarter end: Categoricals ------------ -pandas can include categorical data in a ``DataFrame``. For full docs, see the +pandas can include categorical data in a :class:`DataFrame`. For full docs, see the :ref:`categorical introduction <categorical>` and the :ref:`API documentation <api.arrays.categorical>`. .. ipython:: python @@ -664,14 +664,13 @@ Convert the raw grades to a categorical data type. df["grade"] Rename the categories to more meaningful names (assigning to -``Series.cat.categories`` is inplace!). +:meth:`Series.cat.categories` is inplace!). .. ipython:: python df["grade"].cat.categories = ["very good", "good", "very bad"] -Reorder the categories and simultaneously add the missing categories (methods under ``Series -.cat`` return a new ``Series`` by default). +Reorder the categories and simultaneously add the missing categories (methods under :meth:`Series.cat` return a new :class:`Series` by default). .. ipython:: python
I noticed that there were some links missing in `10 minutes to pandas'.
https://api.github.com/repos/pandas-dev/pandas/pulls/32198
2020-02-23T14:35:26Z
2020-02-27T01:15:50Z
2020-02-27T01:15:50Z
2020-02-27T01:15:56Z
TYP: Add annotation for df.pivot
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index f1e1ebcaca1c4..dfb329b3ddbdf 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -387,7 +387,7 @@ def _verify_integrity( return new_codes @classmethod - def from_arrays(cls, arrays, sortorder=None, names=lib.no_default): + def from_arrays(cls, arrays, sortorder=None, names=lib.no_default) -> "MultiIndex": """ Convert arrays to MultiIndex. diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index c8d5eecf0e496..ea5916eff3afa 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -1,7 +1,18 @@ -from typing import TYPE_CHECKING, Callable, Dict, List, Tuple, Union +from typing import ( + TYPE_CHECKING, + Callable, + Dict, + List, + Optional, + Sequence, + Tuple, + Union, + cast, +) import numpy as np +from pandas._typing import Label from pandas.util._decorators import Appender, Substitution from pandas.core.dtypes.cast import maybe_downcast_to_dtype @@ -424,19 +435,22 @@ def _convert_by(by): @Substitution("\ndata : DataFrame") @Appender(_shared_docs["pivot"], indents=1) -def pivot(data: "DataFrame", index=None, columns=None, values=None) -> "DataFrame": +def pivot( + data: "DataFrame", + index: Optional[Union[Label, Sequence[Label]]] = None, + columns: Optional[Union[Label, Sequence[Label]]] = None, + values: Optional[Union[Label, Sequence[Label]]] = None, +) -> "DataFrame": if columns is None: raise TypeError("pivot() missing 1 required argument: 'columns'") - columns = columns if is_list_like(columns) else [columns] + + columns = com.convert_to_list_like(columns) if values is None: - cols: List[str] = [] - if index is None: - pass - elif is_list_like(index): - cols = list(index) + if index is not None: + cols = com.convert_to_list_like(index) else: - cols = [index] + cols = [] cols.extend(columns) append = index is None @@ -444,10 +458,9 @@ def pivot(data: "DataFrame", index=None, columns=None, values=None) -> "DataFram else: if index is None: index = [Series(data.index, name=data.index.name)] - elif is_list_like(index): - index = [data[idx] for idx in index] else: - index = [data[index]] + index = com.convert_to_list_like(index) + index = [data[idx] for idx in index] data_columns = [data[col] for col in columns] index.extend(data_columns) @@ -455,6 +468,7 @@ def pivot(data: "DataFrame", index=None, columns=None, values=None) -> "DataFram if is_list_like(values) and not isinstance(values, tuple): # Exclude tuple because it is seen as a single column name + values = cast(Sequence[Label], values) indexed = data._constructor( data[values]._values, index=index, columns=values )
- [ ] xref #30928 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32197
2020-02-23T10:31:51Z
2020-05-18T22:53:35Z
2020-05-18T22:53:35Z
2020-05-18T22:53:40Z
TYP: annotations
diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index 69ceb95985140..d4f9c15a9f73f 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -8,6 +8,7 @@ import numpy as np from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib +from pandas._typing import DtypeObj from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import ( @@ -847,7 +848,7 @@ def to_dict(self, copy: bool = True): return {dtype: self.combine(blocks, copy=copy) for dtype, blocks in bd.items()} - def fast_xs(self, loc): + def fast_xs(self, loc: int): """ get a cross sectional for a given location in the items ; handle dups @@ -883,12 +884,12 @@ def fast_xs(self, loc): for i, rl in enumerate(blk.mgr_locs): result[rl] = blk.iget((i, loc)) - if is_extension_array_dtype(dtype): + if isinstance(dtype, ExtensionDtype): result = dtype.construct_array_type()._from_sequence(result, dtype=dtype) return result - def consolidate(self): + def consolidate(self) -> "BlockManager": """ Join together blocks having same dtype @@ -940,7 +941,7 @@ def get(self, item): new_axis=self.items[indexer], indexer=indexer, axis=0, allow_dups=True ) - def iget(self, i): + def iget(self, i: int) -> "SingleBlockManager": """ Return the data as a SingleBlockManager. """ @@ -1377,7 +1378,7 @@ def canonicalize(block): block.equals(oblock) for block, oblock in zip(self_blocks, other_blocks) ) - def unstack(self, unstacker_func, fill_value): + def unstack(self, unstacker_func, fill_value) -> "BlockManager": """ Return a BlockManager with all blocks unstacked.. @@ -1396,8 +1397,8 @@ def unstack(self, unstacker_func, fill_value): dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items) new_columns = dummy.get_new_columns() new_index = dummy.get_new_index() - new_blocks = [] - columns_mask = [] + new_blocks: List[Block] = [] + columns_mask: List[np.ndarray] = [] for blk in self.blocks: blocks, mask = blk._unstack( @@ -1478,7 +1479,7 @@ def _post_setstate(self): pass @property - def _block(self): + def _block(self) -> Block: return self.blocks[0] @property @@ -1495,14 +1496,14 @@ def _blklocs(self): """ compat with BlockManager """ return None - def get_slice(self, slobj, axis=0): + def get_slice(self, slobj: slice, axis: int = 0) -> "SingleBlockManager": if axis >= self.ndim: raise IndexError("Requested axis not found in manager") - return type(self)(self._block._slice(slobj), self.index[slobj], fastpath=True,) + return type(self)(self._block._slice(slobj), self.index[slobj], fastpath=True) @property - def index(self): + def index(self) -> Index: return self.axes[0] @property @@ -1516,7 +1517,7 @@ def array_dtype(self): def get_dtype_counts(self): return {self.dtype.name: 1} - def get_dtypes(self): + def get_dtypes(self) -> np.ndarray: return np.array([self._block.dtype]) def external_values(self): @@ -1527,7 +1528,7 @@ def internal_values(self): """The array that Series._values returns""" return self._block.internal_values() - def get_values(self): + def get_values(self) -> np.ndarray: """ return a dense type view """ return np.array(self._block.to_dense(), copy=False) @@ -1535,7 +1536,7 @@ def get_values(self): def _can_hold_na(self) -> bool: return self._block._can_hold_na - def is_consolidated(self): + def is_consolidated(self) -> bool: return True def _consolidate_check(self): @@ -1813,9 +1814,7 @@ def _shape_compat(x): return stacked, placement -def _interleaved_dtype( - blocks: List[Block], -) -> Optional[Union[np.dtype, ExtensionDtype]]: +def _interleaved_dtype(blocks: Sequence[Block]) -> Optional[DtypeObj]: """ Find the common dtype for `blocks`. @@ -1825,7 +1824,7 @@ def _interleaved_dtype( Returns ------- - dtype : Optional[Union[np.dtype, ExtensionDtype]] + dtype : np.dtype, ExtensionDtype, or None None is returned when `blocks` is empty. """ if not len(blocks): diff --git a/pandas/core/ops/__init__.py b/pandas/core/ops/__init__.py index b74dea686a89f..1ef3889703341 100644 --- a/pandas/core/ops/__init__.py +++ b/pandas/core/ops/__init__.py @@ -5,22 +5,17 @@ """ import datetime import operator -from typing import TYPE_CHECKING, Optional, Set, Tuple, Union +from typing import TYPE_CHECKING, Optional, Set, Tuple import numpy as np from pandas._libs import Timedelta, Timestamp, lib from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op # noqa:F401 -from pandas._typing import Level +from pandas._typing import ArrayLike, Level from pandas.util._decorators import Appender from pandas.core.dtypes.common import is_list_like, is_timedelta64_dtype -from pandas.core.dtypes.generic import ( - ABCDataFrame, - ABCExtensionArray, - ABCIndexClass, - ABCSeries, -) +from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.missing import isna from pandas.core.construction import extract_array @@ -451,10 +446,7 @@ def _align_method_SERIES(left, right, align_asobject=False): def _construct_result( - left: ABCSeries, - result: Union[np.ndarray, ABCExtensionArray], - index: ABCIndexClass, - name, + left: ABCSeries, result: ArrayLike, index: ABCIndexClass, name, ): """ Construct an appropriately-labelled Series from the result of an op. diff --git a/pandas/core/ops/array_ops.py b/pandas/core/ops/array_ops.py index 37a4a6eddaebe..10e3f32de3958 100644 --- a/pandas/core/ops/array_ops.py +++ b/pandas/core/ops/array_ops.py @@ -4,11 +4,12 @@ """ from functools import partial import operator -from typing import Any, Optional, Union +from typing import Any, Optional import numpy as np from pandas._libs import Timedelta, Timestamp, lib, ops as libops +from pandas._typing import ArrayLike from pandas.core.dtypes.cast import ( construct_1d_object_array_from_listlike, @@ -155,9 +156,7 @@ def na_arithmetic_op(left, right, op, str_rep: str): return missing.dispatch_fill_zeros(op, left, right, result) -def arithmetic_op( - left: Union[np.ndarray, ABCExtensionArray], right: Any, op, str_rep: str -): +def arithmetic_op(left: ArrayLike, right: Any, op, str_rep: str): """ Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ... @@ -200,9 +199,7 @@ def arithmetic_op( return res_values -def comparison_op( - left: Union[np.ndarray, ABCExtensionArray], right: Any, op -) -> Union[np.ndarray, ABCExtensionArray]: +def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike: """ Evaluate a comparison operation `=`, `!=`, `>=`, `>`, `<=`, or `<`. @@ -215,7 +212,7 @@ def comparison_op( Returns ------- - ndarrray or ExtensionArray + ndarray or ExtensionArray """ # NB: We assume extract_array has already been called on left and right lvalues = left @@ -302,9 +299,7 @@ def na_logical_op(x: np.ndarray, y, op): return result.reshape(x.shape) -def logical_op( - left: Union[np.ndarray, ABCExtensionArray], right: Any, op -) -> Union[np.ndarray, ABCExtensionArray]: +def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike: """ Evaluate a logical operation `|`, `&`, or `^`.
https://api.github.com/repos/pandas-dev/pandas/pulls/32193
2020-02-23T04:41:25Z
2020-02-23T15:23:40Z
2020-02-23T15:23:40Z
2020-02-23T17:04:04Z
Backport PR #31591 on branch 1.0.x
diff --git a/doc/source/user_guide/boolean.rst b/doc/source/user_guide/boolean.rst index 5276bc6142206..95f1931419d04 100644 --- a/doc/source/user_guide/boolean.rst +++ b/doc/source/user_guide/boolean.rst @@ -20,8 +20,9 @@ Nullable Boolean Data Type Indexing with NA values ----------------------- -pandas does not allow indexing with NA values. Attempting to do so -will raise a ``ValueError``. +pandas allows indexing with ``NA`` values in a boolean array, which are treated as ``False``. + +.. versionchanged:: 1.0.2 .. ipython:: python :okexcept: @@ -30,12 +31,11 @@ will raise a ``ValueError``. mask = pd.array([True, False, pd.NA], dtype="boolean") s[mask] -The missing values will need to be explicitly filled with True or False prior -to using the array as a mask. +If you would prefer to keep the ``NA`` values you can manually fill them with ``fillna(True)``. .. ipython:: python - s[mask.fillna(False)] + s[mask.fillna(True)] .. _boolean.kleene: diff --git a/doc/source/user_guide/indexing.rst b/doc/source/user_guide/indexing.rst index a8cdf4a61073d..2bd3ff626f2e1 100644 --- a/doc/source/user_guide/indexing.rst +++ b/doc/source/user_guide/indexing.rst @@ -59,7 +59,7 @@ of multi-axis indexing. slices, **both** the start and the stop are included, when present in the index! See :ref:`Slicing with labels <indexing.slicing_with_labels>` and :ref:`Endpoints are inclusive <advanced.endpoints_are_inclusive>`.) - * A boolean array + * A boolean array (any ``NA`` values will be treated as ``False``). * A ``callable`` function with one argument (the calling Series or DataFrame) and that returns valid output for indexing (one of the above). @@ -75,7 +75,7 @@ of multi-axis indexing. * An integer e.g. ``5``. * A list or array of integers ``[4, 3, 0]``. * A slice object with ints ``1:7``. - * A boolean array. + * A boolean array (any ``NA`` values will be treated as ``False``). * A ``callable`` function with one argument (the calling Series or DataFrame) and that returns valid output for indexing (one of the above). @@ -374,6 +374,14 @@ For getting values with a boolean array: df1.loc['a'] > 0 df1.loc[:, df1.loc['a'] > 0] +NA values in a boolean array propogate as ``False``: + +.. versionchanged:: 1.0.2 + + mask = pd.array([True, False, True, False, pd.NA, False], dtype="boolean") + mask + df1[mask] + For getting a value explicitly: .. ipython:: python diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst index 07afe60c9c22a..affe019d0ac86 100644 --- a/doc/source/whatsnew/v1.0.2.rst +++ b/doc/source/whatsnew/v1.0.2.rst @@ -26,6 +26,33 @@ Fixed regressions .. --------------------------------------------------------------------------- +Indexing with Nullable Boolean Arrays +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Previously indexing with a nullable Boolean array containing ``NA`` would raise a ``ValueError``, however this is now permitted with ``NA`` being treated as ``False``. (:issue:`31503`) + +.. ipython:: python + + s = pd.Series([1, 2, 3, 4]) + mask = pd.array([True, True, False, None], dtype="boolean") + s + mask + +*pandas 1.0.0-1.0.1* + +.. code-block:: python + + >>> s[mask] + Traceback (most recent call last): + ... + ValueError: cannot mask with array containing NA / NaN values + +*pandas 1.0.2* + +.. ipython:: python + + s[mask] + .. _whatsnew_102.bug_fixes: Bug fixes @@ -45,8 +72,6 @@ Bug fixes - Using ``pd.NA`` with :meth:`DataFrame.to_json` now correctly outputs a null value instead of an empty object (:issue:`31615`) - Fixed bug in parquet roundtrip with nullable unsigned integer dtypes (:issue:`31896`). - - **Experimental dtypes** - Fix bug in :meth:`DataFrame.convert_dtypes` for columns that were already using the ``"string"`` dtype (:issue:`31731`). diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index e8d5890d2564f..ec954e5721f1d 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -520,7 +520,9 @@ def __getitem__(self, key): if com.is_bool_indexer(key): # first convert to boolean, because check_array_indexer doesn't # allow object dtype - key = np.asarray(key, dtype=bool) + if is_object_dtype(key): + key = np.asarray(key, dtype=bool) + key = check_array_indexer(self, key) if key.all(): key = slice(0, None, None) diff --git a/pandas/core/common.py b/pandas/core/common.py index d8b082e7c0f79..673e223b835d2 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -124,7 +124,6 @@ def is_bool_indexer(key: Any) -> bool: check_array_indexer : Check that `key` is a valid array to index, and convert to an ndarray. """ - na_msg = "cannot mask with array containing NA / NaN values" if isinstance(key, (ABCSeries, np.ndarray, ABCIndex)) or ( is_array_like(key) and is_extension_array_dtype(key.dtype) ): @@ -132,16 +131,12 @@ def is_bool_indexer(key: Any) -> bool: key = np.asarray(values_from_object(key)) if not lib.is_bool_array(key): + na_msg = "Cannot mask with non-boolean array containing NA / NaN values" if isna(key).any(): raise ValueError(na_msg) return False return True elif is_bool_dtype(key.dtype): - # an ndarray with bool-dtype by definition has no missing values. - # So we only need to check for NAs in ExtensionArrays - if is_extension_array_dtype(key.dtype): - if np.any(key.isna()): - raise ValueError(na_msg) return True elif isinstance(key, list): try: diff --git a/pandas/core/indexers.py b/pandas/core/indexers.py index fe475527f4596..e9bdc99cef3ed 100644 --- a/pandas/core/indexers.py +++ b/pandas/core/indexers.py @@ -10,6 +10,7 @@ from pandas.core.dtypes.common import ( is_array_like, is_bool_dtype, + is_extension_array_dtype, is_integer_dtype, is_list_like, ) @@ -333,14 +334,11 @@ def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any: ... IndexError: Boolean index has wrong length: 3 instead of 2. - A ValueError is raised when the mask cannot be converted to - a bool-dtype ndarray. + NA values in a boolean array are treated as False. >>> mask = pd.array([True, pd.NA]) >>> pd.api.indexers.check_array_indexer(arr, mask) - Traceback (most recent call last): - ... - ValueError: Cannot mask with a boolean indexer containing NA values + array([ True, False]) A numpy boolean mask will get passed through (if the length is correct): @@ -392,10 +390,10 @@ def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any: dtype = indexer.dtype if is_bool_dtype(dtype): - try: + if is_extension_array_dtype(dtype): + indexer = indexer.to_numpy(dtype=bool, na_value=False) + else: indexer = np.asarray(indexer, dtype=bool) - except ValueError: - raise ValueError("Cannot mask with a boolean indexer containing NA values") # GH26658 if len(indexer) != len(array): diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 35909e1693b3f..7a67280f9c82d 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -13,6 +13,7 @@ is_iterator, is_list_like, is_numeric_dtype, + is_object_dtype, is_scalar, is_sequence, ) @@ -2319,10 +2320,12 @@ def check_bool_indexer(index: Index, key) -> np.ndarray: "the indexed object do not match)." ) result = result.astype(bool)._values - else: - # key might be sparse / object-dtype bool, check_array_indexer needs bool array + elif is_object_dtype(key): + # key might be object-dtype bool, check_array_indexer needs bool array result = np.asarray(result, dtype=bool) result = check_array_indexer(index, result) + else: + result = check_array_indexer(index, result) return result diff --git a/pandas/tests/arrays/categorical/test_indexing.py b/pandas/tests/arrays/categorical/test_indexing.py index 85d5a6a3dc3ac..3d9469c252914 100644 --- a/pandas/tests/arrays/categorical/test_indexing.py +++ b/pandas/tests/arrays/categorical/test_indexing.py @@ -240,14 +240,17 @@ def test_mask_with_boolean(index): @pytest.mark.parametrize("index", [True, False]) -def test_mask_with_boolean_raises(index): +def test_mask_with_boolean_na_treated_as_false(index): + # https://github.com/pandas-dev/pandas/issues/31503 s = Series(range(3)) idx = Categorical([True, False, None]) if index: idx = CategoricalIndex(idx) - with pytest.raises(ValueError, match="NA / NaN"): - s[idx] + result = s[idx] + expected = s[idx.fillna(False)] + + tm.assert_series_equal(result, expected) @pytest.fixture diff --git a/pandas/tests/extension/base/getitem.py b/pandas/tests/extension/base/getitem.py index 8615a8df22dcc..b08a64cc076b6 100644 --- a/pandas/tests/extension/base/getitem.py +++ b/pandas/tests/extension/base/getitem.py @@ -158,21 +158,23 @@ def test_getitem_boolean_array_mask(self, data): result = pd.Series(data)[mask] self.assert_series_equal(result, expected) - def test_getitem_boolean_array_mask_raises(self, data): + def test_getitem_boolean_na_treated_as_false(self, data): + # https://github.com/pandas-dev/pandas/issues/31503 mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean") mask[:2] = pd.NA + mask[2:4] = True - msg = ( - "Cannot mask with a boolean indexer containing NA values|" - "cannot mask with array containing NA / NaN values" - ) - with pytest.raises(ValueError, match=msg): - data[mask] + result = data[mask] + expected = data[mask.fillna(False)] + + self.assert_extension_array_equal(result, expected) s = pd.Series(data) - with pytest.raises(ValueError): - s[mask] + result = s[mask] + expected = s[mask.fillna(False)] + + self.assert_series_equal(result, expected) @pytest.mark.parametrize( "idx", diff --git a/pandas/tests/extension/base/setitem.py b/pandas/tests/extension/base/setitem.py index 590bcd586900a..2b46f8ec3d1c3 100644 --- a/pandas/tests/extension/base/setitem.py +++ b/pandas/tests/extension/base/setitem.py @@ -93,6 +93,90 @@ def test_setitem_iloc_scalar_multiple_homogoneous(self, data): df.iloc[10, 1] = data[1] assert df.loc[10, "B"] == data[1] + @pytest.mark.parametrize( + "mask", + [ + np.array([True, True, True, False, False]), + pd.array([True, True, True, False, False], dtype="boolean"), + pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"), + ], + ids=["numpy-array", "boolean-array", "boolean-array-na"], + ) + def test_setitem_mask(self, data, mask, box_in_series): + arr = data[:5].copy() + expected = arr.take([0, 0, 0, 3, 4]) + if box_in_series: + arr = pd.Series(arr) + expected = pd.Series(expected) + arr[mask] = data[0] + self.assert_equal(expected, arr) + + def test_setitem_mask_raises(self, data, box_in_series): + # wrong length + mask = np.array([True, False]) + + if box_in_series: + data = pd.Series(data) + + with pytest.raises(IndexError, match="wrong length"): + data[mask] = data[0] + + mask = pd.array(mask, dtype="boolean") + with pytest.raises(IndexError, match="wrong length"): + data[mask] = data[0] + + def test_setitem_mask_boolean_array_with_na(self, data, box_in_series): + mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean") + mask[:3] = True + mask[3:5] = pd.NA + + if box_in_series: + data = pd.Series(data) + + data[mask] = data[0] + + assert (data[:3] == data[0]).all() + + @pytest.mark.parametrize( + "idx", + [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])], + ids=["list", "integer-array", "numpy-array"], + ) + def test_setitem_integer_array(self, data, idx, box_in_series): + arr = data[:5].copy() + expected = data.take([0, 0, 0, 3, 4]) + + if box_in_series: + arr = pd.Series(arr) + expected = pd.Series(expected) + + arr[idx] = arr[0] + self.assert_equal(arr, expected) + + @pytest.mark.parametrize( + "idx, box_in_series", + [ + ([0, 1, 2, pd.NA], False), + pytest.param( + [0, 1, 2, pd.NA], True, marks=pytest.mark.xfail(reason="GH-31948") + ), + (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False), + (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False), + ], + ids=["list-False", "list-True", "integer-array-False", "integer-array-True"], + ) + def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series): + arr = data.copy() + + # TODO(xfail) this raises KeyError about labels not found (it tries label-based) + # for list of labels with Series + if box_in_series: + arr = pd.Series(data, index=[tm.rands(4) for _ in range(len(data))]) + + msg = "Cannot index with an integer indexer containing NA values" + with pytest.raises(ValueError, match=msg): + arr[idx] = arr[0] + @pytest.mark.parametrize("as_callable", [True, False]) @pytest.mark.parametrize("setter", ["loc", None]) def test_setitem_mask_aligned(self, data, as_callable, setter): diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/test_numpy.py index 76573242a2506..61c5925383f88 100644 --- a/pandas/tests/extension/test_numpy.py +++ b/pandas/tests/extension/test_numpy.py @@ -396,6 +396,48 @@ def test_setitem_scalar_key_sequence_raise(self, data): # Failed: DID NOT RAISE <class 'ValueError'> super().test_setitem_scalar_key_sequence_raise(data) + # TODO: there is some issue with PandasArray, therefore, + # skip the setitem test for now, and fix it later (GH 31446) + + @skip_nested + @pytest.mark.parametrize( + "mask", + [ + np.array([True, True, True, False, False]), + pd.array([True, True, True, False, False], dtype="boolean"), + ], + ids=["numpy-array", "boolean-array"], + ) + def test_setitem_mask(self, data, mask, box_in_series): + super().test_setitem_mask(data, mask, box_in_series) + + @skip_nested + def test_setitem_mask_raises(self, data, box_in_series): + super().test_setitem_mask_raises(data, box_in_series) + + @skip_nested + @pytest.mark.parametrize( + "idx", + [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])], + ids=["list", "integer-array", "numpy-array"], + ) + def test_setitem_integer_array(self, data, idx, box_in_series): + super().test_setitem_integer_array(data, idx, box_in_series) + + @skip_nested + @pytest.mark.parametrize( + "idx, box_in_series", + [ + ([0, 1, 2, pd.NA], False), + pytest.param([0, 1, 2, pd.NA], True, marks=pytest.mark.xfail), + (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False), + (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False), + ], + ids=["list-False", "list-True", "integer-array-False", "integer-array-True"], + ) + def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series): + super().test_setitem_integer_with_missing_raises(data, idx, box_in_series) + @skip_nested def test_setitem_slice(self, data, box_in_series): super().test_setitem_slice(data, box_in_series) diff --git a/pandas/tests/indexing/test_check_indexer.py b/pandas/tests/indexing/test_check_indexer.py index 82f8c12229824..69d4065234d93 100644 --- a/pandas/tests/indexing/test_check_indexer.py +++ b/pandas/tests/indexing/test_check_indexer.py @@ -34,12 +34,14 @@ def test_valid_input(indexer, expected): @pytest.mark.parametrize( "indexer", [[True, False, None], pd.array([True, False, None], dtype="boolean")], ) -def test_bool_raise_missing_values(indexer): - array = np.array([1, 2, 3]) +def test_boolean_na_returns_indexer(indexer): + # https://github.com/pandas-dev/pandas/issues/31503 + arr = np.array([1, 2, 3]) - msg = "Cannot mask with a boolean indexer containing NA values" - with pytest.raises(ValueError, match=msg): - check_array_indexer(array, indexer) + result = check_array_indexer(arr, indexer) + expected = np.array([True, False, False], dtype=bool) + + tm.assert_numpy_array_equal(result, expected) @pytest.mark.parametrize( diff --git a/pandas/tests/indexing/test_na_indexing.py b/pandas/tests/indexing/test_na_indexing.py index befe4fee8ecf8..345ca30ec77eb 100644 --- a/pandas/tests/indexing/test_na_indexing.py +++ b/pandas/tests/indexing/test_na_indexing.py @@ -62,18 +62,29 @@ def test_series_mask_boolean(values, dtype, mask, box_mask, frame): @pytest.mark.parametrize("frame", [True, False]) -def test_indexing_with_na_raises(frame): +def test_na_treated_as_false(frame): + # https://github.com/pandas-dev/pandas/issues/31503 s = pd.Series([1, 2, 3], name="name") if frame: s = s.to_frame() + mask = pd.array([True, False, None], dtype="boolean") - match = "cannot mask with array containing NA / NaN values" - with pytest.raises(ValueError, match=match): - s[mask] - with pytest.raises(ValueError, match=match): - s.loc[mask] + result = s[mask] + expected = s[mask.fillna(False)] + + result_loc = s.loc[mask] + expected_loc = s.loc[mask.fillna(False)] - with pytest.raises(ValueError, match=match): - s.iloc[mask] + result_iloc = s.iloc[mask] + expected_iloc = s.iloc[mask.fillna(False)] + + if frame: + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result_loc, expected_loc) + tm.assert_frame_equal(result_iloc, expected_iloc) + else: + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result_loc, expected_loc) + tm.assert_series_equal(result_iloc, expected_iloc) diff --git a/pandas/tests/series/indexing/test_boolean.py b/pandas/tests/series/indexing/test_boolean.py index d75efcf52c271..232c9cbc1541c 100644 --- a/pandas/tests/series/indexing/test_boolean.py +++ b/pandas/tests/series/indexing/test_boolean.py @@ -75,7 +75,7 @@ def test_getitem_boolean_object(string_series): # nans raise exception omask[5:10] = np.nan - msg = "cannot mask with array containing NA / NaN values" + msg = "Cannot mask with non-boolean array containing NA / NaN values" with pytest.raises(ValueError, match=msg): s[omask] with pytest.raises(ValueError, match=msg):
xref #31591
https://api.github.com/repos/pandas-dev/pandas/pulls/32192
2020-02-23T03:09:40Z
2020-02-23T14:54:45Z
2020-02-23T14:54:45Z
2020-02-23T15:06:31Z
BLD: Init PyDateTimeAPI Warnings
diff --git a/pandas/_libs/src/ujson/python/ujson.c b/pandas/_libs/src/ujson/python/ujson.c index a40f2709c0c61..afdb44428169c 100644 --- a/pandas/_libs/src/ujson/python/ujson.c +++ b/pandas/_libs/src/ujson/python/ujson.c @@ -38,6 +38,7 @@ Numeric decoder derived from from TCL library #include "version.h" #define PY_SSIZE_T_CLEAN #include <Python.h> +#include "datetime.h" /* objToJSON */ PyObject *objToJSON(PyObject *self, PyObject *args, PyObject *kwargs); @@ -71,8 +72,14 @@ static PyModuleDef moduledef = { .m_methods = ujsonMethods }; +void pydatetime_import(void) +{ + PyDateTime_IMPORT; + return; +} PyMODINIT_FUNC PyInit_json(void) { + pydatetime_import(); initObjToJSON(); // TODO: clean up, maybe via tp_free? return PyModuleDef_Init(&moduledef);
- Fixes below warning: ``` In file included from pandas/_libs/src/ujson/python/date_conversions.h:7:0, from pandas/_libs/src/ujson/python/date_conversions.c:4: /home/vsts/miniconda3/envs/pandas-dev/include/python3.7m/datetime.h:204:25: warning: ‘PyDateTimeAPI’ defined but not used [-Wunused-variable] static PyDateTime_CAPI *PyDateTimeAPI = NULL; ``` https://github.com/pandas-dev/pandas/pull/32163#issuecomment-590010308 (will check CI) Still same warning coming from np_datetime and np_datetime_strings
https://api.github.com/repos/pandas-dev/pandas/pulls/32191
2020-02-23T01:55:35Z
2020-02-23T02:09:06Z
null
2020-05-01T22:02:02Z
CLN: F-string in pandas/tests/indexes/datetimes/test_to_period.py (#29547)
diff --git a/pandas/tests/indexes/datetimes/test_to_period.py b/pandas/tests/indexes/datetimes/test_to_period.py index 5567f98c52211..ddbb43787abb4 100644 --- a/pandas/tests/indexes/datetimes/test_to_period.py +++ b/pandas/tests/indexes/datetimes/test_to_period.py @@ -43,7 +43,7 @@ def test_dti_to_period(self): @pytest.mark.parametrize("month", MONTHS) def test_to_period_quarterly(self, month): # make sure we can make the round trip - freq = "Q-{month}".format(month=month) + freq = f"Q-{month}" rng = period_range("1989Q3", "1991Q3", freq=freq) stamps = rng.to_timestamp() result = stamps.to_period(freq)
Issue #29547 - [x] tests passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/32189
2020-02-22T22:50:16Z
2020-02-23T01:00:57Z
2020-02-23T01:00:57Z
2020-02-23T01:01:03Z
CLN/TST: parametrize some tests in tests.indexing.test_float
diff --git a/pandas/tests/indexing/test_floats.py b/pandas/tests/indexing/test_floats.py index 4d3f1b0539aee..87520f5ab2577 100644 --- a/pandas/tests/indexing/test_floats.py +++ b/pandas/tests/indexing/test_floats.py @@ -162,10 +162,9 @@ def test_scalar_non_numeric(self, index_func, klass): s2.loc[3.0] = 10 assert s2.index.is_object() - for idxr in [lambda x: x]: - s2 = s.copy() - idxr(s2)[3.0] = 0 - assert s2.index.is_object() + s2 = s.copy() + s2[3.0] = 0 + assert s2.index.is_object() @pytest.mark.parametrize( "index_func", @@ -250,12 +249,7 @@ def test_scalar_integer(self, index_func, klass): # integer index i = index_func(5) - - if klass is Series: - # TODO: Should we be passing index=i here? - obj = Series(np.arange(len(i))) - else: - obj = DataFrame(np.random.randn(len(i), len(i)), index=i, columns=i) + obj = gen_obj(klass, i) # coerce to equal int for idxr, getitem in [(lambda x: x.loc, False), (lambda x: x, True)]: @@ -313,7 +307,7 @@ def test_scalar_float(self, klass): result = idxr(s2)[indexer] self.check(result, s, 3, getitem) - # random integer is a KeyError + # random float is a KeyError with pytest.raises(KeyError, match=r"^3\.5$"): idxr(s)[3.5] @@ -429,15 +423,6 @@ def test_slice_integer(self): indexer = slice(3, 5) self.check(result, s, indexer, False) - # positional indexing - msg = ( - "cannot do slice indexing " - fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of " - "type float" - ) - with pytest.raises(TypeError, match=msg): - s[l] - # getitem out-of-bounds for l in [slice(-6, 6), slice(-6.0, 6.0)]: @@ -485,23 +470,6 @@ def test_slice_integer(self): with pytest.raises(TypeError, match=msg): s[l] - # setitem - for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: - - sc = s.copy() - sc.loc[l] = 0 - result = sc.loc[l].values.ravel() - assert (result == 0).all() - - # positional indexing - msg = ( - "cannot do slice indexing " - fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of " - "type float" - ) - with pytest.raises(TypeError, match=msg): - s[l] = 0 - @pytest.mark.parametrize("l", [slice(2, 4.0), slice(2.0, 4), slice(2.0, 4.0)]) def test_integer_positional_indexing(self, l): """ make sure that we are raising on positional indexing @@ -584,22 +552,34 @@ def test_slice_integer_frame_getitem(self, index_func): with pytest.raises(TypeError, match=msg): s[l] + @pytest.mark.parametrize("l", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) + @pytest.mark.parametrize( + "index_func", [tm.makeIntIndex, tm.makeRangeIndex], + ) + def test_float_slice_getitem_with_integer_index_raises(self, l, index_func): + + # similar to above, but on the getitem dim (of a DataFrame) + index = index_func(5) + + s = DataFrame(np.random.randn(5, 2), index=index) + # setitem - for l in [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]: + sc = s.copy() + sc.loc[l] = 0 + result = sc.loc[l].values.ravel() + assert (result == 0).all() - sc = s.copy() - sc.loc[l] = 0 - result = sc.loc[l].values.ravel() - assert (result == 0).all() + # positional indexing + msg = ( + "cannot do slice indexing " + fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of " + "type float" + ) + with pytest.raises(TypeError, match=msg): + s[l] = 0 - # positional indexing - msg = ( - "cannot do slice indexing " - fr"on {type(index).__name__} with these indexers \[(3|4)\.0\] of " - "type float" - ) - with pytest.raises(TypeError, match=msg): - s[l] = 0 + with pytest.raises(TypeError, match=msg): + s[l] @pytest.mark.parametrize("l", [slice(3.0, 4), slice(3, 4.0), slice(3.0, 4.0)]) @pytest.mark.parametrize("klass", [Series, DataFrame]) @@ -614,10 +594,9 @@ def test_slice_float(self, l, klass): # getitem result = idxr(s)[l] - if isinstance(s, Series): - tm.assert_series_equal(result, expected) - else: - tm.assert_frame_equal(result, expected) + assert isinstance(result, type(s)) + tm.assert_equal(result, expected) + # setitem s2 = s.copy() idxr(s2)[l] = 0
https://api.github.com/repos/pandas-dev/pandas/pulls/32187
2020-02-22T21:27:38Z
2020-02-26T04:43:20Z
2020-02-26T04:43:20Z
2020-02-26T04:43:27Z
CLN: Removed class in pandas/tests/series/test_timezones.py
diff --git a/pandas/tests/series/test_timezones.py b/pandas/tests/series/test_timezones.py index a363f927d10a9..46c873e6a6095 100644 --- a/pandas/tests/series/test_timezones.py +++ b/pandas/tests/series/test_timezones.py @@ -15,352 +15,368 @@ from pandas.core.indexes.datetimes import date_range -class TestSeriesTimezones: - # ----------------------------------------------------------------- - # Series.tz_localize - def test_series_tz_localize(self): +# ----------------------------------------------------------------- +# Series.tz_localize +def test_series_tz_localize(): + + rng = date_range("1/1/2011", periods=100, freq="H") + ts = Series(1, index=rng) + + result = ts.tz_localize("utc") + assert result.index.tz.zone == "UTC" + + # Can't localize if already tz-aware + rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") + ts = Series(1, index=rng) + + with pytest.raises(TypeError, match="Already tz-aware"): + ts.tz_localize("US/Eastern") + + +def test_series_tz_localize_ambiguous_bool(): + # make sure that we are correctly accepting bool values as ambiguous + + # GH#14402 + ts = Timestamp("2015-11-01 01:00:03") + expected0 = Timestamp("2015-11-01 01:00:03-0500", tz="US/Central") + expected1 = Timestamp("2015-11-01 01:00:03-0600", tz="US/Central") + + ser = Series([ts]) + expected0 = Series([expected0]) + expected1 = Series([expected1]) + + with pytest.raises(pytz.AmbiguousTimeError): + ser.dt.tz_localize("US/Central") + + result = ser.dt.tz_localize("US/Central", ambiguous=True) + tm.assert_series_equal(result, expected0) + + result = ser.dt.tz_localize("US/Central", ambiguous=[True]) + tm.assert_series_equal(result, expected0) + + result = ser.dt.tz_localize("US/Central", ambiguous=False) + tm.assert_series_equal(result, expected1) + + result = ser.dt.tz_localize("US/Central", ambiguous=[False]) + tm.assert_series_equal(result, expected1) + + +@pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"]) +@pytest.mark.parametrize( + "method, exp", + [ + ["shift_forward", "2015-03-29 03:00:00"], + ["NaT", NaT], + ["raise", None], + ["foo", "invalid"], + ], +) +def test_series_tz_localize_nonexistent(tz, method, exp): + # GH 8917 + n = 60 + dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min") + s = Series(1, dti) + if method == "raise": + with pytest.raises(pytz.NonExistentTimeError): + s.tz_localize(tz, nonexistent=method) + elif exp == "invalid": + with pytest.raises(ValueError): + dti.tz_localize(tz, nonexistent=method) + else: + result = s.tz_localize(tz, nonexistent=method) + expected = Series(1, index=DatetimeIndex([exp] * n, tz=tz)) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) +def test_series_tz_localize_empty(tzstr): + # GH#2248 + ser = Series(dtype=object) + + ser2 = ser.tz_localize("utc") + assert ser2.index.tz == pytz.utc + + ser2 = ser.tz_localize(tzstr) + timezones.tz_compare(ser2.index.tz, timezones.maybe_get_tz(tzstr)) + + +# ----------------------------------------------------------------- +# Series.tz_convert + + +def test_series_tz_convert(): + rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") + ts = Series(1, index=rng) + + result = ts.tz_convert("Europe/Berlin") + assert result.index.tz.zone == "Europe/Berlin" + + # can't convert tz-naive + rng = date_range("1/1/2011", periods=200, freq="D") + ts = Series(1, index=rng) - rng = date_range("1/1/2011", periods=100, freq="H") - ts = Series(1, index=rng) + with pytest.raises(TypeError, match="Cannot convert tz-naive"): + ts.tz_convert("US/Eastern") - result = ts.tz_localize("utc") - assert result.index.tz.zone == "UTC" - # Can't localize if already tz-aware - rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") - ts = Series(1, index=rng) +def test_series_tz_convert_to_utc(): + base = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC") + idx1 = base.tz_convert("Asia/Tokyo")[:2] + idx2 = base.tz_convert("US/Eastern")[1:] - with pytest.raises(TypeError, match="Already tz-aware"): - ts.tz_localize("US/Eastern") + res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2) + tm.assert_series_equal(res, Series([np.nan, 3, np.nan], index=base)) - def test_series_tz_localize_ambiguous_bool(self): - # make sure that we are correctly accepting bool values as ambiguous - # GH#14402 - ts = Timestamp("2015-11-01 01:00:03") - expected0 = Timestamp("2015-11-01 01:00:03-0500", tz="US/Central") - expected1 = Timestamp("2015-11-01 01:00:03-0600", tz="US/Central") +# ----------------------------------------------------------------- +# Series.append - ser = Series([ts]) - expected0 = Series([expected0]) - expected1 = Series([expected1]) - with pytest.raises(pytz.AmbiguousTimeError): - ser.dt.tz_localize("US/Central") +def test_series_append_aware(): + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") + rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") + ser1 = Series([1], index=rng1) + ser2 = Series([2], index=rng2) + ts_result = ser1.append(ser2) - result = ser.dt.tz_localize("US/Central", ambiguous=True) - tm.assert_series_equal(result, expected0) + exp_index = DatetimeIndex(["2011-01-01 01:00", "2011-01-01 02:00"], tz="US/Eastern") + exp = Series([1, 2], index=exp_index) + tm.assert_series_equal(ts_result, exp) + assert ts_result.index.tz == rng1.tz - result = ser.dt.tz_localize("US/Central", ambiguous=[True]) - tm.assert_series_equal(result, expected0) + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="UTC") + rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="UTC") + ser1 = Series([1], index=rng1) + ser2 = Series([2], index=rng2) + ts_result = ser1.append(ser2) - result = ser.dt.tz_localize("US/Central", ambiguous=False) - tm.assert_series_equal(result, expected1) + exp_index = DatetimeIndex(["2011-01-01 01:00", "2011-01-01 02:00"], tz="UTC") + exp = Series([1, 2], index=exp_index) + tm.assert_series_equal(ts_result, exp) + utc = rng1.tz + assert utc == ts_result.index.tz - result = ser.dt.tz_localize("US/Central", ambiguous=[False]) - tm.assert_series_equal(result, expected1) + # GH#7795 + # different tz coerces to object dtype, not UTC + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") + rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Central") + ser1 = Series([1], index=rng1) + ser2 = Series([2], index=rng2) + ts_result = ser1.append(ser2) + exp_index = Index( + [ + Timestamp("1/1/2011 01:00", tz="US/Eastern"), + Timestamp("1/1/2011 02:00", tz="US/Central"), + ] + ) + exp = Series([1, 2], index=exp_index) + tm.assert_series_equal(ts_result, exp) + + +def test_series_append_aware_naive(): + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") + rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") + ser1 = Series(np.random.randn(len(rng1)), index=rng1) + ser2 = Series(np.random.randn(len(rng2)), index=rng2) + ts_result = ser1.append(ser2) + + expected = ser1.index.astype(object).append(ser2.index.astype(object)) + assert ts_result.index.equals(expected) - @pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"]) - @pytest.mark.parametrize( - "method, exp", + # mixed + rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") + rng2 = range(100) + ser1 = Series(np.random.randn(len(rng1)), index=rng1) + ser2 = Series(np.random.randn(len(rng2)), index=rng2) + ts_result = ser1.append(ser2) + + expected = ser1.index.astype(object).append(ser2.index) + assert ts_result.index.equals(expected) + + +def test_series_append_dst(): + rng1 = date_range("1/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") + rng2 = date_range("8/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") + ser1 = Series([1, 2, 3], index=rng1) + ser2 = Series([10, 11, 12], index=rng2) + ts_result = ser1.append(ser2) + + exp_index = DatetimeIndex( [ - ["shift_forward", "2015-03-29 03:00:00"], - ["NaT", NaT], - ["raise", None], - ["foo", "invalid"], + "2016-01-01 01:00", + "2016-01-01 02:00", + "2016-01-01 03:00", + "2016-08-01 01:00", + "2016-08-01 02:00", + "2016-08-01 03:00", ], + tz="US/Eastern", ) - def test_series_tz_localize_nonexistent(self, tz, method, exp): - # GH 8917 - n = 60 - dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min") - s = Series(1, dti) - if method == "raise": - with pytest.raises(pytz.NonExistentTimeError): - s.tz_localize(tz, nonexistent=method) - elif exp == "invalid": - with pytest.raises(ValueError): - dti.tz_localize(tz, nonexistent=method) - else: - result = s.tz_localize(tz, nonexistent=method) - expected = Series(1, index=DatetimeIndex([exp] * n, tz=tz)) - tm.assert_series_equal(result, expected) - - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_series_tz_localize_empty(self, tzstr): - # GH#2248 - ser = Series(dtype=object) - - ser2 = ser.tz_localize("utc") - assert ser2.index.tz == pytz.utc - - ser2 = ser.tz_localize(tzstr) - timezones.tz_compare(ser2.index.tz, timezones.maybe_get_tz(tzstr)) - - # ----------------------------------------------------------------- - # Series.tz_convert - - def test_series_tz_convert(self): - rng = date_range("1/1/2011", periods=200, freq="D", tz="US/Eastern") - ts = Series(1, index=rng) - - result = ts.tz_convert("Europe/Berlin") - assert result.index.tz.zone == "Europe/Berlin" - - # can't convert tz-naive - rng = date_range("1/1/2011", periods=200, freq="D") - ts = Series(1, index=rng) - - with pytest.raises(TypeError, match="Cannot convert tz-naive"): - ts.tz_convert("US/Eastern") - - def test_series_tz_convert_to_utc(self): - base = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], tz="UTC") - idx1 = base.tz_convert("Asia/Tokyo")[:2] - idx2 = base.tz_convert("US/Eastern")[1:] - - res = Series([1, 2], index=idx1) + Series([1, 1], index=idx2) - tm.assert_series_equal(res, Series([np.nan, 3, np.nan], index=base)) - - # ----------------------------------------------------------------- - # Series.append - - def test_series_append_aware(self): - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") - ser1 = Series([1], index=rng1) - ser2 = Series([2], index=rng2) - ts_result = ser1.append(ser2) - - exp_index = DatetimeIndex( - ["2011-01-01 01:00", "2011-01-01 02:00"], tz="US/Eastern" - ) - exp = Series([1, 2], index=exp_index) - tm.assert_series_equal(ts_result, exp) - assert ts_result.index.tz == rng1.tz - - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="UTC") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="UTC") - ser1 = Series([1], index=rng1) - ser2 = Series([2], index=rng2) - ts_result = ser1.append(ser2) - - exp_index = DatetimeIndex(["2011-01-01 01:00", "2011-01-01 02:00"], tz="UTC") - exp = Series([1, 2], index=exp_index) - tm.assert_series_equal(ts_result, exp) - utc = rng1.tz - assert utc == ts_result.index.tz - - # GH#7795 - # different tz coerces to object dtype, not UTC - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H", tz="US/Eastern") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Central") - ser1 = Series([1], index=rng1) - ser2 = Series([2], index=rng2) - ts_result = ser1.append(ser2) - exp_index = Index( - [ - Timestamp("1/1/2011 01:00", tz="US/Eastern"), - Timestamp("1/1/2011 02:00", tz="US/Central"), - ] - ) - exp = Series([1, 2], index=exp_index) - tm.assert_series_equal(ts_result, exp) - - def test_series_append_aware_naive(self): - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") - rng2 = date_range("1/1/2011 02:00", periods=1, freq="H", tz="US/Eastern") - ser1 = Series(np.random.randn(len(rng1)), index=rng1) - ser2 = Series(np.random.randn(len(rng2)), index=rng2) - ts_result = ser1.append(ser2) - - expected = ser1.index.astype(object).append(ser2.index.astype(object)) - assert ts_result.index.equals(expected) - - # mixed - rng1 = date_range("1/1/2011 01:00", periods=1, freq="H") - rng2 = range(100) - ser1 = Series(np.random.randn(len(rng1)), index=rng1) - ser2 = Series(np.random.randn(len(rng2)), index=rng2) - ts_result = ser1.append(ser2) - - expected = ser1.index.astype(object).append(ser2.index) - assert ts_result.index.equals(expected) - - def test_series_append_dst(self): - rng1 = date_range("1/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") - rng2 = date_range("8/1/2016 01:00", periods=3, freq="H", tz="US/Eastern") - ser1 = Series([1, 2, 3], index=rng1) - ser2 = Series([10, 11, 12], index=rng2) - ts_result = ser1.append(ser2) - - exp_index = DatetimeIndex( - [ - "2016-01-01 01:00", - "2016-01-01 02:00", - "2016-01-01 03:00", - "2016-08-01 01:00", - "2016-08-01 02:00", - "2016-08-01 03:00", - ], - tz="US/Eastern", - ) - exp = Series([1, 2, 3, 10, 11, 12], index=exp_index) - tm.assert_series_equal(ts_result, exp) - assert ts_result.index.tz == rng1.tz - - # ----------------------------------------------------------------- - - def test_dateutil_tzoffset_support(self): - values = [188.5, 328.25] - tzinfo = tzoffset(None, 7200) - index = [ - datetime(2012, 5, 11, 11, tzinfo=tzinfo), - datetime(2012, 5, 11, 12, tzinfo=tzinfo), - ] - series = Series(data=values, index=index) + exp = Series([1, 2, 3, 10, 11, 12], index=exp_index) + tm.assert_series_equal(ts_result, exp) + assert ts_result.index.tz == rng1.tz - assert series.index.tz == tzinfo - # it works! #2443 - repr(series.index[0]) +# ----------------------------------------------------------------- - @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) - def test_tz_aware_asfreq(self, tz): - dr = date_range("2011-12-01", "2012-07-20", freq="D", tz=tz) - ser = Series(np.random.randn(len(dr)), index=dr) +def test_dateutil_tzoffset_support(): + values = [188.5, 328.25] + tzinfo = tzoffset(None, 7200) + index = [ + datetime(2012, 5, 11, 11, tzinfo=tzinfo), + datetime(2012, 5, 11, 12, tzinfo=tzinfo), + ] + series = Series(data=values, index=index) - # it works! - ser.asfreq("T") + assert series.index.tz == tzinfo - @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) - def test_string_index_alias_tz_aware(self, tz): - rng = date_range("1/1/2000", periods=10, tz=tz) - ser = Series(np.random.randn(len(rng)), index=rng) + # it works! #2443 + repr(series.index[0]) - result = ser["1/3/2000"] - tm.assert_almost_equal(result, ser[2]) - # TODO: De-duplicate with test below - def test_series_add_tz_mismatch_converts_to_utc_duplicate(self): - rng = date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern") - ser = Series(np.random.randn(len(rng)), index=rng) +@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) +def test_tz_aware_asfreq(tz): + dr = date_range("2011-12-01", "2012-07-20", freq="D", tz=tz) - ts_moscow = ser.tz_convert("Europe/Moscow") + ser = Series(np.random.randn(len(dr)), index=dr) - result = ser + ts_moscow - assert result.index.tz is pytz.utc + # it works! + ser.asfreq("T") - result = ts_moscow + ser - assert result.index.tz is pytz.utc - def test_series_add_tz_mismatch_converts_to_utc(self): - rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") +@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) +def test_string_index_alias_tz_aware(tz): + rng = date_range("1/1/2000", periods=10, tz=tz) + ser = Series(np.random.randn(len(rng)), index=rng) - perm = np.random.permutation(100)[:90] - ser1 = Series( - np.random.randn(90), index=rng.take(perm).tz_convert("US/Eastern") - ) + result = ser["1/3/2000"] + tm.assert_almost_equal(result, ser[2]) - perm = np.random.permutation(100)[:90] - ser2 = Series( - np.random.randn(90), index=rng.take(perm).tz_convert("Europe/Berlin") - ) - result = ser1 + ser2 +# TODO: De-duplicate with test below +def test_series_add_tz_mismatch_converts_to_utc_duplicate(): + rng = date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern") + ser = Series(np.random.randn(len(rng)), index=rng) - uts1 = ser1.tz_convert("utc") - uts2 = ser2.tz_convert("utc") - expected = uts1 + uts2 + ts_moscow = ser.tz_convert("Europe/Moscow") - assert result.index.tz == pytz.UTC - tm.assert_series_equal(result, expected) + result = ser + ts_moscow + assert result.index.tz is pytz.utc - def test_series_add_aware_naive_raises(self): - rng = date_range("1/1/2011", periods=10, freq="H") - ser = Series(np.random.randn(len(rng)), index=rng) + result = ts_moscow + ser + assert result.index.tz is pytz.utc - ser_utc = ser.tz_localize("utc") - with pytest.raises(Exception): - ser + ser_utc +def test_series_add_tz_mismatch_converts_to_utc(): + rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") - with pytest.raises(Exception): - ser_utc + ser + perm = np.random.permutation(100)[:90] + ser1 = Series(np.random.randn(90), index=rng.take(perm).tz_convert("US/Eastern")) - def test_series_align_aware(self): - idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern") - ser = Series(np.random.randn(len(idx1)), index=idx1) - ser_central = ser.tz_convert("US/Central") - # # different timezones convert to UTC + perm = np.random.permutation(100)[:90] + ser2 = Series(np.random.randn(90), index=rng.take(perm).tz_convert("Europe/Berlin")) - new1, new2 = ser.align(ser_central) - assert new1.index.tz == pytz.UTC - assert new2.index.tz == pytz.UTC + result = ser1 + ser2 - @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) - def test_localized_at_time_between_time(self, tzstr): - from datetime import time + uts1 = ser1.tz_convert("utc") + uts2 = ser2.tz_convert("utc") + expected = uts1 + uts2 - tz = timezones.maybe_get_tz(tzstr) + assert result.index.tz == pytz.UTC + tm.assert_series_equal(result, expected) - rng = date_range("4/16/2012", "5/1/2012", freq="H") - ts = Series(np.random.randn(len(rng)), index=rng) - ts_local = ts.tz_localize(tzstr) +def test_series_add_aware_naive_raises(): + rng = date_range("1/1/2011", periods=10, freq="H") + ser = Series(np.random.randn(len(rng)), index=rng) - result = ts_local.at_time(time(10, 0)) - expected = ts.at_time(time(10, 0)).tz_localize(tzstr) - tm.assert_series_equal(result, expected) - assert timezones.tz_compare(result.index.tz, tz) + ser_utc = ser.tz_localize("utc") - t1, t2 = time(10, 0), time(11, 0) - result = ts_local.between_time(t1, t2) - expected = ts.between_time(t1, t2).tz_localize(tzstr) - tm.assert_series_equal(result, expected) - assert timezones.tz_compare(result.index.tz, tz) - - @pytest.mark.parametrize("tzstr", ["Europe/Berlin", "dateutil/Europe/Berlin"]) - def test_getitem_pydatetime_tz(self, tzstr): - tz = timezones.maybe_get_tz(tzstr) - - index = date_range( - start="2012-12-24 16:00", end="2012-12-24 18:00", freq="H", tz=tzstr - ) - ts = Series(index=index, data=index.hour) - time_pandas = Timestamp("2012-12-24 17:00", tz=tzstr) - - dt = datetime(2012, 12, 24, 17, 0) - time_datetime = conversion.localize_pydatetime(dt, tz) - assert ts[time_pandas] == ts[time_datetime] - - def test_series_truncate_datetimeindex_tz(self): - # GH 9243 - idx = date_range("4/1/2005", "4/30/2005", freq="D", tz="US/Pacific") - s = Series(range(len(idx)), index=idx) - result = s.truncate(datetime(2005, 4, 2), datetime(2005, 4, 4)) - expected = Series([1, 2, 3], index=idx[1:4]) - tm.assert_series_equal(result, expected) + with pytest.raises(Exception): + ser + ser_utc + + with pytest.raises(Exception): + ser_utc + ser + + +def test_series_align_aware(): + idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern") + ser = Series(np.random.randn(len(idx1)), index=idx1) + ser_central = ser.tz_convert("US/Central") + # # different timezones convert to UTC + + new1, new2 = ser.align(ser_central) + assert new1.index.tz == pytz.UTC + assert new2.index.tz == pytz.UTC + + +@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) +def test_localized_at_time_between_time(tzstr): + from datetime import time + + tz = timezones.maybe_get_tz(tzstr) - @pytest.mark.parametrize("copy", [True, False]) - @pytest.mark.parametrize( - "method, tz", [["tz_localize", None], ["tz_convert", "Europe/Berlin"]] + rng = date_range("4/16/2012", "5/1/2012", freq="H") + ts = Series(np.random.randn(len(rng)), index=rng) + + ts_local = ts.tz_localize(tzstr) + + result = ts_local.at_time(time(10, 0)) + expected = ts.at_time(time(10, 0)).tz_localize(tzstr) + tm.assert_series_equal(result, expected) + assert timezones.tz_compare(result.index.tz, tz) + + t1, t2 = time(10, 0), time(11, 0) + result = ts_local.between_time(t1, t2) + expected = ts.between_time(t1, t2).tz_localize(tzstr) + tm.assert_series_equal(result, expected) + assert timezones.tz_compare(result.index.tz, tz) + + +@pytest.mark.parametrize("tzstr", ["Europe/Berlin", "dateutil/Europe/Berlin"]) +def test_getitem_pydatetime_tz(tzstr): + tz = timezones.maybe_get_tz(tzstr) + + index = date_range( + start="2012-12-24 16:00", end="2012-12-24 18:00", freq="H", tz=tzstr ) - def test_tz_localize_convert_copy_inplace_mutate(self, copy, method, tz): - # GH 6326 - result = Series( - np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=tz) - ) - getattr(result, method)("UTC", copy=copy) - expected = Series( - np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=tz) - ) - tm.assert_series_equal(result, expected) + ts = Series(index=index, data=index.hour) + time_pandas = Timestamp("2012-12-24 17:00", tz=tzstr) + + dt = datetime(2012, 12, 24, 17, 0) + time_datetime = conversion.localize_pydatetime(dt, tz) + assert ts[time_pandas] == ts[time_datetime] + + +def test_series_truncate_datetimeindex_tz(): + # GH 9243 + idx = date_range("4/1/2005", "4/30/2005", freq="D", tz="US/Pacific") + s = Series(range(len(idx)), index=idx) + result = s.truncate(datetime(2005, 4, 2), datetime(2005, 4, 4)) + expected = Series([1, 2, 3], index=idx[1:4]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("copy", [True, False]) +@pytest.mark.parametrize( + "method, tz", [["tz_localize", None], ["tz_convert", "Europe/Berlin"]] +) +def test_tz_localize_convert_copy_inplace_mutate(copy, method, tz): + # GH 6326 + result = Series( + np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=tz) + ) + getattr(result, method)("UTC", copy=copy) + expected = Series( + np.arange(0, 5), index=date_range("20131027", periods=5, freq="1H", tz=tz) + ) + tm.assert_series_equal(result, expected) - def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture): - # GH 25843 - tz = tz_aware_fixture - result = Series([Timestamp("2019", tz=tz)], dtype="datetime64[ns]") - expected = Series([Timestamp("2019")]) - tm.assert_series_equal(result, expected) + +def test_constructor_data_aware_dtype_naive(tz_aware_fixture): + # GH 25843 + tz = tz_aware_fixture + result = Series([Timestamp("2019", tz=tz)], dtype="datetime64[ns]") + expected = Series([Timestamp("2019")]) + tm.assert_series_equal(result, expected)
- [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/32186
2020-02-22T21:19:02Z
2020-02-23T16:15:29Z
null
2020-02-23T16:15:29Z
BUG: 2D DTA/TDA arithmetic with object-dtype
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index f637e16caa4c6..8c870c6255200 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -42,6 +42,7 @@ from pandas.core.algorithms import checked_add_with_arr, take, unique1d, value_counts from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin import pandas.core.common as com +from pandas.core.construction import array, extract_array from pandas.core.indexers import check_array_indexer from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.ops.invalid import invalid_comparison, make_invalid_op @@ -623,7 +624,7 @@ def astype(self, dtype, copy=True): dtype = pandas_dtype(dtype) if is_object_dtype(dtype): - return self._box_values(self.asi8) + return self._box_values(self.asi8.ravel()).reshape(self.shape) elif is_string_dtype(dtype) and not is_categorical_dtype(dtype): return self._format_native_types() elif is_integer_dtype(dtype): @@ -1256,19 +1257,13 @@ def _addsub_object_array(self, other: np.ndarray, op): PerformanceWarning, ) - # For EA self.astype('O') returns a numpy array, not an Index - left = self.astype("O") + # Caller is responsible for broadcasting if necessary + assert self.shape == other.shape, (self.shape, other.shape) - res_values = op(left, np.array(other)) - kwargs = {} - if not is_period_dtype(self): - kwargs["freq"] = "infer" - try: - res = type(self)._from_sequence(res_values, **kwargs) - except ValueError: - # e.g. we've passed a Timestamp to TimedeltaArray - res = res_values - return res + res_values = op(self.astype("O"), np.array(other)) + result = array(res_values.ravel()) + result = extract_array(result, extract_numpy=True).reshape(self.shape) + return result def _time_shift(self, periods, freq=None): """ diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index d3f9ac4f3f8b2..f7211ab5f9fd4 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -27,6 +27,7 @@ date_range, ) import pandas._testing as tm +from pandas.core.arrays import DatetimeArray, TimedeltaArray from pandas.core.ops import roperator from pandas.tests.arithmetic.common import ( assert_invalid_addsub_type, @@ -956,6 +957,18 @@ def test_dt64arr_sub_NaT(self, box_with_array): # ------------------------------------------------------------- # Subtraction of datetime-like array-like + def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture): + dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture) + expected = dti - dti + + obj = tm.box_expected(dti, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + warn = PerformanceWarning if box_with_array is not pd.DataFrame else None + with tm.assert_produces_warning(warn): + result = obj - obj.astype(object) + tm.assert_equal(result, expected) + def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array): dti = pd.date_range("2016-01-01", periods=3, tz=None) dt64vals = dti.values @@ -2395,3 +2408,31 @@ def test_shift_months(years, months): raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti] expected = DatetimeIndex(raw) tm.assert_index_equal(actual, expected) + + +def test_dt64arr_addsub_object_dtype_2d(): + # block-wise DataFrame operations will require operating on 2D + # DatetimeArray/TimedeltaArray, so check that specifically. + dti = pd.date_range("1994-02-13", freq="2W", periods=4) + dta = dti._data.reshape((4, 1)) + + other = np.array([[pd.offsets.Day(n)] for n in range(4)]) + assert other.shape == dta.shape + + with tm.assert_produces_warning(PerformanceWarning): + result = dta + other + with tm.assert_produces_warning(PerformanceWarning): + expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1) + + assert isinstance(result, DatetimeArray) + assert result.freq is None + tm.assert_numpy_array_equal(result._data, expected._data) + + with tm.assert_produces_warning(PerformanceWarning): + # Case where we expect to get a TimedeltaArray back + result2 = dta - dta.astype(object) + + assert isinstance(result2, TimedeltaArray) + assert result2.shape == (4, 1) + assert result2.freq is None + assert (result2.asi8 == 0).all() diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index 300e468c34e65..b11fcfd20b8c4 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -532,6 +532,20 @@ def test_tda_add_sub_index(self): expected = tdi - tdi tm.assert_index_equal(result, expected) + def test_tda_add_dt64_object_array(self, box_df_fail, tz_naive_fixture): + # Result should be cast back to DatetimeArray + dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture) + dti._set_freq(None) + tdi = dti - dti + + obj = tm.box_expected(tdi, box_df_fail) + other = tm.box_expected(dti, box_df_fail) + + warn = PerformanceWarning if box_df_fail is not pd.DataFrame else None + with tm.assert_produces_warning(warn): + result = obj + other.astype(object) + tm.assert_equal(result, other) + # ------------------------------------------------------------- # Binary operations TimedeltaIndex and timedelta-like
The main motivation for this is that it is prerequisite for implementing frame-with-frame ops blockwise. It also fixes a few broken cases: - dta - dta.astype(object) currently raises TypeError - ditto dti - dti.astype(object) - tda + dta.astype(object) returns ndarray of timestamps instead of DatetimeArray
https://api.github.com/repos/pandas-dev/pandas/pulls/32185
2020-02-22T21:07:32Z
2020-03-03T01:50:02Z
2020-03-03T01:50:02Z
2020-03-09T17:45:22Z
CLN: Removed class in pandas/tests/series/test_validate.py
diff --git a/pandas/tests/series/test_validate.py b/pandas/tests/series/test_validate.py index c4311f507f7ee..511d24ca7fa29 100644 --- a/pandas/tests/series/test_validate.py +++ b/pandas/tests/series/test_validate.py @@ -1,20 +1,18 @@ import pytest -class TestSeriesValidate: +@pytest.mark.parametrize( + "func", + ["reset_index", "_set_name", "sort_values", "sort_index", "rename", "dropna"], +) +@pytest.mark.parametrize("inplace", [1, "True", [1, 2, 3], 5.0]) +def test_validate_bool_args(string_series, func, inplace): """Tests for error handling related to data types of method arguments.""" + msg = 'For argument "inplace" expected type bool' + kwargs = dict(inplace=inplace) - @pytest.mark.parametrize( - "func", - ["reset_index", "_set_name", "sort_values", "sort_index", "rename", "dropna"], - ) - @pytest.mark.parametrize("inplace", [1, "True", [1, 2, 3], 5.0]) - def test_validate_bool_args(self, string_series, func, inplace): - msg = 'For argument "inplace" expected type bool' - kwargs = dict(inplace=inplace) + if func == "_set_name": + kwargs["name"] = "hello" - if func == "_set_name": - kwargs["name"] = "hello" - - with pytest.raises(ValueError, match=msg): - getattr(string_series, func)(**kwargs) + with pytest.raises(ValueError, match=msg): + getattr(string_series, func)(**kwargs)
I've seen quite a few test files that only use a class for historic reasons. As functions are easier to comprehend I think it makes sense to move in this direction. - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/32184
2020-02-22T18:16:10Z
2020-02-22T20:59:05Z
2020-02-22T20:59:05Z
2020-02-22T20:59:11Z
DOC: Fix SA04 errors in docstrings #28792
diff --git a/pandas/core/base.py b/pandas/core/base.py index b9aeb32eea5c1..3560bc29869eb 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -1033,7 +1033,8 @@ def argmin(self, axis=None, skipna=True, *args, **kwargs): See Also -------- - numpy.ndarray.argmin + numpy.ndarray.argmin : Return indices of the minimum values along + the given axis. """ nv.validate_minmax_axis(axis) nv.validate_argmax_with_skipna(skipna, args, kwargs) @@ -1053,7 +1054,8 @@ def tolist(self): See Also -------- - numpy.ndarray.tolist + numpy.ndarray.tolist : Return the array as an a.ndim-levels deep + nested list of Python scalars. """ if not isinstance(self._values, np.ndarray): # check for ndarray instead of dtype to catch DTA/TDA @@ -1400,7 +1402,8 @@ def memory_usage(self, deep=False): See Also -------- - numpy.ndarray.nbytes + numpy.ndarray.nbytes : Total bytes consumed by the elements of the + array. Notes ----- @@ -1471,8 +1474,8 @@ def factorize(self, sort=False, na_sentinel=-1): See Also -------- - sort_values - numpy.searchsorted + sort_values : Sort by the values along either axis. + numpy.searchsorted : Similar method from NumPy. Notes ----- diff --git a/pandas/core/computation/eval.py b/pandas/core/computation/eval.py index f6947d5ec6233..56e7e081729a7 100644 --- a/pandas/core/computation/eval.py +++ b/pandas/core/computation/eval.py @@ -266,8 +266,10 @@ def eval( See Also -------- - DataFrame.query - DataFrame.eval + DataFrame.query : Evaluates a boolean expression to query the columns + of a frame. + DataFrame.eval : Evaluate a string describing operations on + DataFrame columns. Notes ----- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index ff7c481d550d4..79ffce648918d 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1750,8 +1750,9 @@ def empty(self) -> bool_t: See Also -------- - Series.dropna - DataFrame.dropna + Series.dropna : Return series without null values. + DataFrame.dropna : Return DataFrame with labels on given axis omitted + where (all or any) data are missing. Notes ----- @@ -2170,7 +2171,7 @@ def to_json( See Also -------- - read_json + read_json : Convert a JSON string to pandas object. Notes ----- @@ -4449,7 +4450,8 @@ def filter( See Also -------- - DataFrame.loc + DataFrame.loc : Access a group of rows and columns + by label(s) or a boolean array. Notes ----- @@ -4881,9 +4883,10 @@ def sample( See Also -------- - DataFrame.apply - DataFrame.applymap - Series.map + DataFrame.apply : Apply a function along input axis of DataFrame. + DataFrame.applymap : Apply a function elementwise on a whole DataFrame. + Series.map : Apply a mapping correspondence on a + :class:`~pandas.Series`. Notes -----
- [x] xref #28792 - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/32182
2020-02-22T17:54:49Z
2020-03-03T01:22:02Z
2020-03-03T01:22:02Z
2020-03-03T01:22:10Z
CLN: unnecessary kwargs for take_with_is_copy
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 40e59f04192a6..a6ab0d4034ddb 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -3262,9 +3262,7 @@ class max_speed ) return self._constructor(new_data).__finalize__(self) - def _take_with_is_copy( - self: FrameOrSeries, indices, axis=0, **kwargs - ) -> FrameOrSeries: + def _take_with_is_copy(self: FrameOrSeries, indices, axis=0) -> FrameOrSeries: """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing @@ -3272,7 +3270,7 @@ def _take_with_is_copy( See the docstring of `take` for full explanation of the parameters. """ - result = self.take(indices=indices, axis=axis, **kwargs) + result = self.take(indices=indices, axis=axis) # Maybe set copy if we didn't actually change the index. if not result._get_axis(axis).equals(self._get_axis(axis)): result._set_is_copy(self) diff --git a/pandas/core/series.py b/pandas/core/series.py index 2182374337c84..67634a13e9ad0 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -812,7 +812,7 @@ def take(self, indices, axis=0, is_copy=None, **kwargs) -> "Series": new_values, index=new_index, fastpath=True ).__finalize__(self) - def _take_with_is_copy(self, indices, axis=0, **kwargs): + def _take_with_is_copy(self, indices, axis=0): """ Internal version of the `take` method that sets the `_is_copy` attribute to keep track of the parent dataframe (using in indexing @@ -821,7 +821,7 @@ def _take_with_is_copy(self, indices, axis=0, **kwargs): See the docstring of `take` for full explanation of the parameters. """ - return self.take(indices=indices, axis=axis, **kwargs) + return self.take(indices=indices, axis=axis) def _ixs(self, i: int, axis: int = 0): """
https://api.github.com/repos/pandas-dev/pandas/pulls/32181
2020-02-22T17:21:25Z
2020-02-22T21:07:30Z
2020-02-22T21:07:30Z
2020-02-22T21:08:47Z
DOC: Fix SA04 errors in docstrings xref #28792
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index 7201629cb086e..f9059054ba59f 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -313,8 +313,8 @@ def unique(values): See Also -------- - Index.unique - Series.unique + Index.unique : Return unique values from an Index. + Series.unique : Return unique values of Series object. Examples -------- @@ -1515,7 +1515,7 @@ def take(arr, indices, axis: int = 0, allow_fill: bool = False, fill_value=None) See Also -------- - numpy.take + numpy.take : Take elements from an array along an axis. Examples -------- diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f304fadbab871..406ba894d9ff5 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -358,9 +358,9 @@ class DataFrame(NDFrame): -------- DataFrame.from_records : Constructor from tuples, also record arrays. DataFrame.from_dict : From dicts of Series, arrays, or dicts. - read_csv - read_table - read_clipboard + read_csv : Read a comma-separated values (csv) file into DataFrame. + read_table : Read general delimited file into DataFrame. + read_clipboard : Read text from clipboard into DataFrame. Examples -------- @@ -7393,8 +7393,9 @@ def corr(self, method="pearson", min_periods=1) -> "DataFrame": See Also -------- - DataFrame.corrwith - Series.corr + DataFrame.corrwith : Compute pairwise correlation with another + DataFrame or Series. + Series.corr : Compute the correlation between two Series. Examples -------- @@ -7596,7 +7597,7 @@ def corrwith(self, other, axis=0, drop=False, method="pearson") -> Series: See Also -------- - DataFrame.corr + DataFrame.corr : Compute pairwise correlation of columns. """ axis = self._get_axis_number(axis) this = self._get_numeric_data() @@ -8001,7 +8002,7 @@ def idxmin(self, axis=0, skipna=True) -> Series: See Also -------- - Series.idxmin + Series.idxmin : Return index of the minimum element. Notes ----- @@ -8039,7 +8040,7 @@ def idxmax(self, axis=0, skipna=True) -> Series: See Also -------- - Series.idxmax + Series.idxmax : Return index of the maximum element. Notes ----- diff --git a/pandas/core/series.py b/pandas/core/series.py index d984225f8fd89..91316a0ce1b43 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -552,7 +552,7 @@ def ravel(self, order="C"): See Also -------- - numpy.ndarray.ravel + numpy.ndarray.ravel : Return a flattened array. """ return self._values.ravel(order=order) @@ -2076,6 +2076,9 @@ def round(self, decimals=0, *args, **kwargs) -> "Series": decimals : int, default 0 Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. + *args, **kwargs + Additional arguments and keywords have no effect but might be + accepted for compatibility with NumPy. Returns ------- @@ -2130,8 +2133,8 @@ def quantile(self, q=0.5, interpolation="linear"): See Also -------- - core.window.Rolling.quantile - numpy.percentile + core.window.Rolling.quantile : Calculate the rolling quantile. + numpy.percentile : Returns the q-th percentile(s) of the array elements. Examples -------- @@ -3149,7 +3152,7 @@ def argsort(self, axis=0, kind="quicksort", order=None) -> "Series": See Also -------- - numpy.ndarray.argsort + numpy.ndarray.argsort : Returns the indices that would sort this array. """ values = self._values mask = isna(values)
- [x] xref #28792 - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/32180
2020-02-22T15:40:10Z
2020-03-03T14:22:34Z
2020-03-03T14:22:34Z
2020-03-03T14:22:43Z
DOC: Added recommanded "msg" creation format
diff --git a/doc/source/development/code_style.rst b/doc/source/development/code_style.rst index 17f8783f71bfb..415ea49fdbcf3 100644 --- a/doc/source/development/code_style.rst +++ b/doc/source/development/code_style.rst @@ -41,6 +41,38 @@ For example: foo.__class__ +Error message capturing (in tests) +---------------------------------- + +There are times when writing a test case, a statement/function/method can raise +multiple error messages, we should join the diffrent error messages +instead of having one giant string. + +For example: + +**Good:** + +.. code-block:: python + + msg = "|".join( + [ + "foo", + "bar", + "baz" + ] + ) + +**Bad:** + +.. code-block:: python + + msg = ( + "foo|" + "bar|" + "baz" + ) + + String formatting =================
- [x] ref https://github.com/pandas-dev/pandas/pull/32158#discussion_r382710851 - [ ] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32178
2020-02-22T13:03:56Z
2020-02-22T21:21:04Z
null
2020-02-28T09:40:25Z
CLN: some code cleanups
diff --git a/pandas/_libs/hashtable.pyx b/pandas/_libs/hashtable.pyx index 884db9ee931d4..e80f134290a7e 100644 --- a/pandas/_libs/hashtable.pyx +++ b/pandas/_libs/hashtable.pyx @@ -86,7 +86,10 @@ cdef class Factorizer: self, ndarray[object] values, sort=False, na_sentinel=-1, na_value=None ): """ + Examples + -------- Factorize values with nans replaced by na_sentinel + >>> factorize(np.array([1,2,np.nan], dtype='O'), na_sentinel=20) array([ 0, 1, 20]) """ @@ -131,7 +134,10 @@ cdef class Int64Factorizer: def factorize(self, const int64_t[:] values, sort=False, na_sentinel=-1, na_value=None): """ + Examples + -------- Factorize values with nans replaced by na_sentinel + >>> factorize(np.array([1,2,np.nan], dtype='O'), na_sentinel=20) array([ 0, 1, 20]) """ diff --git a/pandas/_libs/internals.pyx b/pandas/_libs/internals.pyx index 8bbbc6db94842..437406cbbd819 100644 --- a/pandas/_libs/internals.pyx +++ b/pandas/_libs/internals.pyx @@ -105,8 +105,7 @@ cdef class BlockPlacement: Py_ssize_t start, stop, end, _ if not self._has_array: start, stop, step, _ = slice_get_indices_ex(self._as_slice) - self._as_array = np.arange(start, stop, step, - dtype=np.int64) + self._as_array = np.arange(start, stop, step, dtype=np.int64) self._has_array = True return self._as_array @@ -283,8 +282,7 @@ cdef slice_getitem(slice slc, ind): s_start, s_stop, s_step, s_len = slice_get_indices_ex(slc) if isinstance(ind, slice): - ind_start, ind_stop, ind_step, ind_len = slice_get_indices_ex(ind, - s_len) + ind_start, ind_stop, ind_step, ind_len = slice_get_indices_ex(ind, s_len) if ind_step > 0 and ind_len == s_len: # short-cut for no-op slice diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx index 1166768472449..55999f2d6fd74 100644 --- a/pandas/_libs/interval.pyx +++ b/pandas/_libs/interval.pyx @@ -481,8 +481,7 @@ cdef class Interval(IntervalMixin): @cython.wraparound(False) @cython.boundscheck(False) -def intervals_to_interval_bounds(ndarray intervals, - bint validate_closed=True): +def intervals_to_interval_bounds(ndarray intervals, bint validate_closed=True): """ Parameters ---------- diff --git a/pandas/_libs/join.pyx b/pandas/_libs/join.pyx index f696591cf3bd1..cbe0e71153565 100644 --- a/pandas/_libs/join.pyx +++ b/pandas/_libs/join.pyx @@ -817,18 +817,22 @@ def asof_join_nearest_on_X_by_Y(asof_t[:] left_values, right_indexer = np.empty(left_size, dtype=np.int64) # search both forward and backward - bli, bri = asof_join_backward_on_X_by_Y(left_values, - right_values, - left_by_values, - right_by_values, - allow_exact_matches, - tolerance) - fli, fri = asof_join_forward_on_X_by_Y(left_values, - right_values, - left_by_values, - right_by_values, - allow_exact_matches, - tolerance) + bli, bri = asof_join_backward_on_X_by_Y( + left_values, + right_values, + left_by_values, + right_by_values, + allow_exact_matches, + tolerance, + ) + fli, fri = asof_join_forward_on_X_by_Y( + left_values, + right_values, + left_by_values, + right_by_values, + allow_exact_matches, + tolerance, + ) for i in range(len(bri)): # choose timestamp from right with smaller difference diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index a176c4e41e834..b78b623bfa187 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -1,27 +1,44 @@ import cython -from cpython.datetime cimport (PyDateTime_Check, PyDate_Check, - PyDateTime_IMPORT, - timedelta, datetime, date, time) +from cpython.datetime cimport ( + PyDate_Check, + PyDateTime_Check, + PyDateTime_IMPORT, + date, + datetime, + time, + timedelta, +) # import datetime C API PyDateTime_IMPORT cimport numpy as cnp -from numpy cimport int64_t, ndarray, float64_t +from numpy cimport float64_t, int64_t, ndarray import numpy as np cnp.import_array() import pytz from pandas._libs.util cimport ( - is_integer_object, is_float_object, is_datetime64_object) + is_datetime64_object, + is_float_object, + is_integer_object, +) from pandas._libs.tslibs.c_timestamp cimport _Timestamp from pandas._libs.tslibs.np_datetime cimport ( - check_dts_bounds, npy_datetimestruct, _string_to_dts, dt64_to_dtstruct, - dtstruct_to_dt64, pydatetime_to_dt64, pydate_to_dt64, get_datetime64_value) + _string_to_dts, + check_dts_bounds, + dt64_to_dtstruct, + dtstruct_to_dt64, + get_datetime64_value, + npy_datetimestruct, + pydate_to_dt64, + pydatetime_to_dt64, +) + from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime from pandas._libs.tslibs.parsing import parse_datetime_string @@ -44,45 +61,71 @@ from pandas._libs.tslibs.timestamps cimport create_timestamp_from_ts from pandas._libs.tslibs.timestamps import Timestamp from pandas._libs.tslibs.tzconversion cimport ( - tz_convert_single, tz_convert_utc_to_tzlocal) + tz_convert_single, + tz_convert_utc_to_tzlocal, +) cdef inline object create_datetime_from_ts( - int64_t value, npy_datetimestruct dts, - object tz, object freq, bint fold): - """ convenience routine to construct a datetime.datetime from its parts """ - return datetime(dts.year, dts.month, dts.day, dts.hour, - dts.min, dts.sec, dts.us, tz, fold=fold) + int64_t value, + npy_datetimestruct dts, + object tz, + object freq, + bint fold +): + """ + Convenience routine to construct a datetime.datetime from its parts. + """ + return datetime( + dts.year, dts.month, dts.day, dts.hour, dts.min, dts.sec, dts.us, tz, fold=fold + ) cdef inline object create_date_from_ts( - int64_t value, npy_datetimestruct dts, - object tz, object freq, bint fold): - """ convenience routine to construct a datetime.date from its parts """ + int64_t value, + npy_datetimestruct dts, + object tz, + object freq, + bint fold +): + """ + Convenience routine to construct a datetime.date from its parts. + """ # GH 25057 add fold argument to match other func_create signatures return date(dts.year, dts.month, dts.day) cdef inline object create_time_from_ts( - int64_t value, npy_datetimestruct dts, - object tz, object freq, bint fold): - """ convenience routine to construct a datetime.time from its parts """ + int64_t value, + npy_datetimestruct dts, + object tz, + object freq, + bint fold +): + """ + Convenience routine to construct a datetime.time from its parts. + """ return time(dts.hour, dts.min, dts.sec, dts.us, tz, fold=fold) @cython.wraparound(False) @cython.boundscheck(False) -def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None, - bint fold=0, str box="datetime"): +def ints_to_pydatetime( + const int64_t[:] arr, + object tz=None, + object freq=None, + bint fold=0, + str box="datetime" +): """ - Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp + Convert an i8 repr to an ndarray of datetimes, date, time or Timestamp. Parameters ---------- - arr : array of i8 - tz : str, default None + arr : array of i8 + tz : str, optional convert to this timezone - freq : str/Offset, default None + freq : str/Offset, optional freq to convert fold : bint, default is 0 Due to daylight saving time, one wall clock time can occur twice @@ -91,17 +134,16 @@ def ints_to_pydatetime(const int64_t[:] arr, object tz=None, object freq=None, the wall clock hits the ambiguous time .. versionadded:: 1.1.0 - box : {'datetime', 'timestamp', 'date', 'time'}, default 'datetime' - If datetime, convert to datetime.datetime - If date, convert to datetime.date - If time, convert to datetime.time - If Timestamp, convert to pandas.Timestamp + box : {'datetime', 'timestamp', 'date', 'time'}, default 'datetime' + * If datetime, convert to datetime.datetime + * If date, convert to datetime.date + * If time, convert to datetime.time + * If Timestamp, convert to pandas.Timestamp Returns ------- - result : array of dtype specified by box + ndarray of dtype specified by box """ - cdef: Py_ssize_t i, n = len(arr) ndarray[int64_t] trans @@ -224,8 +266,12 @@ def _test_parse_iso8601(ts: str): @cython.wraparound(False) @cython.boundscheck(False) -def format_array_from_datetime(ndarray[int64_t] values, object tz=None, - object format=None, object na_rep=None): +def format_array_from_datetime( + ndarray[int64_t] values, + object tz=None, + object format=None, + object na_rep=None +): """ return a np object array of the string formatted values @@ -303,8 +349,12 @@ def format_array_from_datetime(ndarray[int64_t] values, object tz=None, return result -def array_with_unit_to_datetime(ndarray values, ndarray mask, object unit, - str errors='coerce'): +def array_with_unit_to_datetime( + ndarray values, + ndarray mask, + object unit, + str errors='coerce' +): """ Convert the ndarray to datetime according to the time unit. @@ -322,14 +372,13 @@ def array_with_unit_to_datetime(ndarray values, ndarray mask, object unit, Parameters ---------- values : ndarray of object - Date-like objects to convert - mask : ndarray of bool - Not-a-time mask for non-nullable integer types conversion, - can be None + Date-like objects to convert. + mask : boolean ndarray + Not-a-time mask for non-nullable integer types conversion, can be None. unit : object - Time unit to use during conversion + Time unit to use during conversion. errors : str, default 'raise' - Error behavior when parsing + Error behavior when parsing. Returns ------- @@ -382,8 +431,7 @@ def array_with_unit_to_datetime(ndarray values, ndarray mask, object unit, if ((fvalues < Timestamp.min.value).any() or (fvalues > Timestamp.max.value).any()): - raise OutOfBoundsDatetime(f"cannot convert input with unit " - f"'{unit}'") + raise OutOfBoundsDatetime(f"cannot convert input with unit '{unit}'") result = (iresult * m).astype('M8[ns]') iresult = result.view('i8') iresult[mask] = NPY_NAT @@ -409,8 +457,8 @@ def array_with_unit_to_datetime(ndarray values, ndarray mask, object unit, except OverflowError: if is_raise: raise OutOfBoundsDatetime( - f"cannot convert input {val} with the unit " - f"'{unit}'") + f"cannot convert input {val} with the unit '{unit}'" + ) elif is_ignore: raise AssertionError iresult[i] = NPY_NAT @@ -425,16 +473,16 @@ def array_with_unit_to_datetime(ndarray values, ndarray mask, object unit, except ValueError: if is_raise: raise ValueError( - f"non convertible value {val} with the unit " - f"'{unit}'") + f"non convertible value {val} with the unit '{unit}'" + ) elif is_ignore: raise AssertionError iresult[i] = NPY_NAT except OverflowError: if is_raise: raise OutOfBoundsDatetime( - f"cannot convert input {val} with the unit " - f"'{unit}'") + f"cannot convert input {val} with the unit '{unit}'" + ) elif is_ignore: raise AssertionError iresult[i] = NPY_NAT @@ -442,8 +490,9 @@ def array_with_unit_to_datetime(ndarray values, ndarray mask, object unit, else: if is_raise: - raise ValueError(f"unit='{unit}' not valid with non-numerical " - f"val='{val}'") + raise ValueError( + f"unit='{unit}' not valid with non-numerical val='{val}'" + ) if is_ignore: raise AssertionError @@ -486,9 +535,14 @@ def array_with_unit_to_datetime(ndarray values, ndarray mask, object unit, @cython.wraparound(False) @cython.boundscheck(False) -cpdef array_to_datetime(ndarray[object] values, str errors='raise', - bint dayfirst=False, bint yearfirst=False, - object utc=None, bint require_iso8601=False): +cpdef array_to_datetime( + ndarray[object] values, + str errors='raise', + bint dayfirst=False, + bint yearfirst=False, + object utc=None, + bint require_iso8601=False +): """ Converts a 1D array of date-like values to a numpy array of either: 1) datetime64[ns] data @@ -625,8 +679,9 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise', iresult[i] = NPY_NAT continue elif is_raise: - raise ValueError(f"time data {val} doesn't " - f"match format specified") + raise ValueError( + f"time data {val} doesn't match format specified" + ) return values, tz_out try: @@ -641,8 +696,7 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise', if is_coerce: iresult[i] = NPY_NAT continue - raise TypeError("invalid string coercion to " - "datetime") + raise TypeError("invalid string coercion to datetime") if tz is not None: seen_datetime_offset = 1 @@ -708,8 +762,7 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise', return ignore_errors_out_of_bounds_fallback(values), tz_out except TypeError: - return array_to_datetime_object(values, errors, - dayfirst, yearfirst) + return array_to_datetime_object(values, errors, dayfirst, yearfirst) if seen_datetime and seen_integer: # we have mixed datetimes & integers @@ -724,8 +777,7 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise', elif is_raise: raise ValueError("mixed datetimes and integers in passed array") else: - return array_to_datetime_object(values, errors, - dayfirst, yearfirst) + return array_to_datetime_object(values, errors, dayfirst, yearfirst) if seen_datetime_offset and not utc_convert: # GH#17697 @@ -736,8 +788,7 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise', # (with individual dateutil.tzoffsets) are returned is_same_offsets = len(out_tzoffset_vals) == 1 if not is_same_offsets: - return array_to_datetime_object(values, errors, - dayfirst, yearfirst) + return array_to_datetime_object(values, errors, dayfirst, yearfirst) else: tz_offset = out_tzoffset_vals.pop() tz_out = pytz.FixedOffset(tz_offset / 60.) @@ -784,8 +835,12 @@ cdef ignore_errors_out_of_bounds_fallback(ndarray[object] values): @cython.wraparound(False) @cython.boundscheck(False) -cdef array_to_datetime_object(ndarray[object] values, str errors, - bint dayfirst=False, bint yearfirst=False): +cdef array_to_datetime_object( + ndarray[object] values, + str errors, + bint dayfirst=False, + bint yearfirst=False +): """ Fall back function for array_to_datetime diff --git a/pandas/_libs/tslibs/fields.pyx b/pandas/_libs/tslibs/fields.pyx index 8bee7da6231ba..50b7fba67e78f 100644 --- a/pandas/_libs/tslibs/fields.pyx +++ b/pandas/_libs/tslibs/fields.pyx @@ -38,7 +38,7 @@ def get_time_micros(const int64_t[:] dtindex): cdef: ndarray[int64_t] micros - micros = np.mod(dtindex, DAY_SECONDS * 1000000000, dtype=np.int64) + micros = np.mod(dtindex, DAY_SECONDS * 1_000_000_000, dtype=np.int64) micros //= 1000 return micros @@ -54,13 +54,15 @@ def build_field_sarray(const int64_t[:] dtindex): npy_datetimestruct dts ndarray[int32_t] years, months, days, hours, minutes, seconds, mus - sa_dtype = [('Y', 'i4'), # year - ('M', 'i4'), # month - ('D', 'i4'), # day - ('h', 'i4'), # hour - ('m', 'i4'), # min - ('s', 'i4'), # second - ('u', 'i4')] # microsecond + sa_dtype = [ + ("Y", "i4"), # year + ("M", "i4"), # month + ("D", "i4"), # day + ("h", "i4"), # hour + ("m", "i4"), # min + ("s", "i4"), # second + ("u", "i4"), # microsecond + ] out = np.empty(count, dtype=sa_dtype) @@ -157,9 +159,12 @@ def get_start_end_field(const int64_t[:] dtindex, object field, int mo_off, dom, doy, dow, ldom _month_offset = np.array( - [[0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365], - [0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366]], - dtype=np.int32) + [ + [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365], + [0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366], + ], + dtype=np.int32, + ) out = np.zeros(count, dtype='int8') diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 68a25d0cc481a..7fec4ba5e7d25 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -764,7 +764,9 @@ NaT = c_NaT # Python-visible # ---------------------------------------------------------------------- cdef inline bint checknull_with_nat(object val): - """ utility to check if a value is a nat or not """ + """ + Utility to check if a value is a nat or not. + """ return val is None or util.is_nan(val) or val is c_NaT or val is C_NA diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index 5cd3467eed042..64b79200028b6 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -1090,11 +1090,10 @@ default 'raise' def normalize(self): """ - Normalize Timestamp to midnight, preserving - tz information. + Normalize Timestamp to midnight, preserving tz information. """ if self.tz is None or is_utc(self.tz): - DAY_NS = DAY_SECONDS * 1000000000 + DAY_NS = DAY_SECONDS * 1_000_000_000 normalized_value = self.value - (self.value % DAY_NS) return Timestamp(normalized_value).tz_localize(self.tz) normalized_value = normalize_i8_timestamps( @@ -1113,7 +1112,7 @@ cdef int64_t _NS_UPPER_BOUND = np.iinfo(np.int64).max # INT64_MIN + 1 == -9223372036854775807 # but to allow overflow free conversion with a microsecond resolution # use the smallest value with a 0 nanosecond unit (0s in last 3 digits) -cdef int64_t _NS_LOWER_BOUND = -9223372036854775000 +cdef int64_t _NS_LOWER_BOUND = -9_223_372_036_854_775_000 # Resolution is in nanoseconds Timestamp.min = Timestamp(_NS_LOWER_BOUND)
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32177
2020-02-22T12:34:09Z
2020-02-28T16:16:47Z
2020-02-28T16:16:47Z
2020-02-29T10:27:57Z
CLN: Some code cleanups
diff --git a/pandas/_libs/indexing.pyx b/pandas/_libs/indexing.pyx index cdccdb504571c..316943edee124 100644 --- a/pandas/_libs/indexing.pyx +++ b/pandas/_libs/indexing.pyx @@ -1,7 +1,6 @@ cdef class _NDFrameIndexerBase: """ - A base class for _NDFrameIndexer for fast instantiation and attribute - access. + A base class for _NDFrameIndexer for fast instantiation and attribute access. """ cdef public object obj, name, _ndim diff --git a/pandas/_libs/sparse.pyx b/pandas/_libs/sparse.pyx index 3a6dd506b2428..50f220af0f5bc 100644 --- a/pandas/_libs/sparse.pyx +++ b/pandas/_libs/sparse.pyx @@ -188,8 +188,7 @@ cdef class IntIndex(SparseIndex): return -1 @cython.wraparound(False) - cpdef ndarray[int32_t] lookup_array(self, ndarray[ - int32_t, ndim=1] indexer): + cpdef ndarray[int32_t] lookup_array(self, ndarray[int32_t, ndim=1] indexer): """ Vectorized lookup, returns ndarray[int32_t] """ @@ -424,12 +423,9 @@ cdef class BlockIndex(SparseIndex): """ Intersect two BlockIndex objects - Parameters - ---------- - Returns ------- - intersection : BlockIndex + BlockIndex """ cdef: BlockIndex y @@ -518,7 +514,7 @@ cdef class BlockIndex(SparseIndex): Returns ------- - union : BlockIndex + BlockIndex """ return BlockUnion(self, y.to_block_index()).result @@ -548,8 +544,7 @@ cdef class BlockIndex(SparseIndex): return -1 @cython.wraparound(False) - cpdef ndarray[int32_t] lookup_array(self, ndarray[ - int32_t, ndim=1] indexer): + cpdef ndarray[int32_t] lookup_array(self, ndarray[int32_t, ndim=1] indexer): """ Vectorized lookup, returns ndarray[int32_t] """ diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 57b4100fbceb0..6e978d495c325 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -84,12 +84,11 @@ def ensure_datetime64ns(arr: ndarray, copy: bool=True): Parameters ---------- arr : ndarray - copy : boolean, default True + copy : bool, default True Returns ------- - result : ndarray with dtype datetime64[ns] - + ndarray with dtype datetime64[ns] """ cdef: Py_ssize_t i, n = arr.size diff --git a/pandas/_libs/tslibs/resolution.pyx b/pandas/_libs/tslibs/resolution.pyx index 1e0eb7f97ec54..ecf31c15bb72c 100644 --- a/pandas/_libs/tslibs/resolution.pyx +++ b/pandas/_libs/tslibs/resolution.pyx @@ -110,8 +110,8 @@ def get_freq_group(freq) -> int: """ Return frequency code group of given frequency str or offset. - Example - ------- + Examples + -------- >>> get_freq_group('W-MON') 4000 @@ -193,8 +193,8 @@ class Resolution: """ Return resolution str against resolution code. - Example - ------- + Examples + -------- >>> Resolution.get_str(Resolution.RESO_SEC) 'second' """ @@ -205,8 +205,8 @@ class Resolution: """ Return resolution str against resolution code. - Example - ------- + Examples + -------- >>> Resolution.get_reso('second') 2 @@ -220,8 +220,8 @@ class Resolution: """ Return frequency str against resolution str. - Example - ------- + Examples + -------- >>> f.Resolution.get_freq_group('day') 4000 """ @@ -232,8 +232,8 @@ class Resolution: """ Return frequency str against resolution str. - Example - ------- + Examples + -------- >>> f.Resolution.get_freq('day') 'D' """ @@ -244,8 +244,8 @@ class Resolution: """ Return resolution str against frequency str. - Example - ------- + Examples + -------- >>> Resolution.get_str_from_freq('H') 'hour' """ @@ -256,8 +256,8 @@ class Resolution: """ Return resolution code against frequency str. - Example - ------- + Examples + -------- >>> Resolution.get_reso_from_freq('H') 4 @@ -273,8 +273,8 @@ class Resolution: Parameters ---------- - value : integer or float - freq : string + value : int or float + freq : str Frequency string Raises @@ -282,8 +282,8 @@ class Resolution: ValueError If the float cannot be converted to an integer at any resolution. - Example - ------- + Examples + -------- >>> Resolution.get_stride_from_decimal(1.5, 'T') (90, 'S') @@ -298,8 +298,9 @@ class Resolution: else: start_reso = cls.get_reso_from_freq(freq) if start_reso == 0: - raise ValueError("Could not convert to integer offset " - "at any resolution") + raise ValueError( + "Could not convert to integer offset at any resolution" + ) next_value = cls._reso_mult_map[start_reso] * value next_name = cls._reso_str_bump_map[freq] diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 3742506a7f8af..66660c5f641fd 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -37,51 +37,61 @@ from pandas._libs.tslibs.offsets import _Tick as Tick # Constants # components named tuple -Components = collections.namedtuple('Components', [ - 'days', 'hours', 'minutes', 'seconds', - 'milliseconds', 'microseconds', 'nanoseconds']) - - -cdef dict timedelta_abbrevs = { 'Y': 'Y', - 'y': 'Y', - 'M': 'M', - 'W': 'W', - 'w': 'W', - 'D': 'D', - 'd': 'D', - 'days': 'D', - 'day': 'D', - 'hours': 'h', - 'hour': 'h', - 'hr': 'h', - 'h': 'h', - 'm': 'm', - 'minute': 'm', - 'min': 'm', - 'minutes': 'm', - 't': 'm', - 's': 's', - 'seconds': 's', - 'sec': 's', - 'second': 's', - 'ms': 'ms', - 'milliseconds': 'ms', - 'millisecond': 'ms', - 'milli': 'ms', - 'millis': 'ms', - 'l': 'ms', - 'us': 'us', - 'microseconds': 'us', - 'microsecond': 'us', - 'micro': 'us', - 'micros': 'us', - 'u': 'us', - 'ns': 'ns', - 'nanoseconds': 'ns', - 'nano': 'ns', - 'nanos': 'ns', - 'nanosecond': 'ns', - 'n': 'ns'} +Components = collections.namedtuple( + "Components", + [ + "days", + "hours", + "minutes", + "seconds", + "milliseconds", + "microseconds", + "nanoseconds", + ], +) + +cdef dict timedelta_abbrevs = { + "Y": "Y", + "y": "Y", + "M": "M", + "W": "W", + "w": "W", + "D": "D", + "d": "D", + "days": "D", + "day": "D", + "hours": "h", + "hour": "h", + "hr": "h", + "h": "h", + "m": "m", + "minute": "m", + "min": "m", + "minutes": "m", + "t": "m", + "s": "s", + "seconds": "s", + "sec": "s", + "second": "s", + "ms": "ms", + "milliseconds": "ms", + "millisecond": "ms", + "milli": "ms", + "millis": "ms", + "l": "ms", + "us": "us", + "microseconds": "us", + "microsecond": "us", + "micro": "us", + "micros": "us", + "u": "us", + "ns": "ns", + "nanoseconds": "ns", + "nano": "ns", + "nanos": "ns", + "nanosecond": "ns", + "n": "ns", +} _no_input = object() @@ -137,9 +147,11 @@ cpdef int64_t delta_to_nanoseconds(delta) except? -1: if is_integer_object(delta): return delta if PyDelta_Check(delta): - return (delta.days * 24 * 60 * 60 * 1000000 + - delta.seconds * 1000000 + - delta.microseconds) * 1000 + return ( + delta.days * 24 * 60 * 60 * 1_000_000 + + delta.seconds * 1_000_000 + + delta.microseconds + ) * 1000 raise TypeError(type(delta)) @@ -212,9 +224,8 @@ def array_to_timedelta64(object[:] values, unit='ns', errors='raise'): Py_ssize_t i, n int64_t[:] iresult - if errors not in ('ignore', 'raise', 'coerce'): - raise ValueError("errors must be one of 'ignore', " - "'raise', or 'coerce'}") + if errors not in {'ignore', 'raise', 'coerce'}: + raise ValueError("errors must be one of {'ignore', 'raise', or 'coerce'}") n = values.shape[0] result = np.empty(n, dtype='m8[ns]') @@ -255,34 +266,34 @@ cpdef inline object precision_from_unit(object unit): int p if unit == 'Y': - m = 1000000000L * 31556952 + m = 1000000000 * 31556952 p = 9 elif unit == 'M': - m = 1000000000L * 2629746 + m = 1000000000 * 2629746 p = 9 elif unit == 'W': - m = 1000000000L * DAY_SECONDS * 7 + m = 1000000000 * DAY_SECONDS * 7 p = 9 elif unit == 'D' or unit == 'd': - m = 1000000000L * DAY_SECONDS + m = 1000000000 * DAY_SECONDS p = 9 elif unit == 'h': - m = 1000000000L * 3600 + m = 1000000000 * 3600 p = 9 elif unit == 'm': - m = 1000000000L * 60 + m = 1000000000 * 60 p = 9 elif unit == 's': - m = 1000000000L + m = 1000000000 p = 9 elif unit == 'ms': - m = 1000000L + m = 1000000 p = 6 elif unit == 'us': - m = 1000L + m = 1000 p = 3 elif unit == 'ns' or unit is None: - m = 1L + m = 1 p = 0 else: raise ValueError(f"cannot cast unit {unit}") @@ -383,13 +394,13 @@ cdef inline int64_t parse_timedelta_string(str ts) except? -1: if len(number): if current_unit is None: current_unit = 'h' - m = 1000000000L * 3600 + m = 1000000000 * 3600 elif current_unit == 'h': current_unit = 'm' - m = 1000000000L * 60 + m = 1000000000 * 60 elif current_unit == 'm': current_unit = 's' - m = 1000000000L + m = 1000000000 r = <int64_t>int(''.join(number)) * m result += timedelta_as_neg(r, neg) have_hhmmss = 1 @@ -408,7 +419,7 @@ cdef inline int64_t parse_timedelta_string(str ts) except? -1: # hh:mm:ss (so current_unit is 'm') if current_unit != 'm': raise ValueError("expected hh:mm:ss format before .") - m = 1000000000L + m = 1000000000 r = <int64_t>int(''.join(number)) * m result += timedelta_as_neg(r, neg) have_value = 1 @@ -437,9 +448,9 @@ cdef inline int64_t parse_timedelta_string(str ts) except? -1: raise ValueError("no units specified") if len(frac) > 0 and len(frac) <= 3: - m = 10**(3 -len(frac)) * 1000L * 1000L + m = 10**(3 -len(frac)) * 1000 * 1000 elif len(frac) > 3 and len(frac) <= 6: - m = 10**(6 -len(frac)) * 1000L + m = 10**(6 -len(frac)) * 1000 else: m = 10**(9 -len(frac)) @@ -451,7 +462,7 @@ cdef inline int64_t parse_timedelta_string(str ts) except? -1: elif current_unit is not None: if current_unit != 'm': raise ValueError("expected hh:mm:ss format") - m = 1000000000L + m = 1000000000 r = <int64_t>int(''.join(number)) * m result += timedelta_as_neg(r, neg) @@ -1018,6 +1029,7 @@ cdef class _Timedelta(timedelta): **Using string input** >>> td = pd.Timedelta('1 days 2 min 3 us 42 ns') + >>> td.nanoseconds 42 @@ -1095,7 +1107,7 @@ cdef class _Timedelta(timedelta): Returns ------- - formatted : str + str See Also -------- @@ -1115,6 +1127,7 @@ cdef class _Timedelta(timedelta): -------- >>> td = pd.Timedelta(days=6, minutes=50, seconds=3, ... milliseconds=10, microseconds=10, nanoseconds=12) + >>> td.isoformat() 'P6DT0H50M3.010010012S' >>> pd.Timedelta(hours=1, seconds=10).isoformat() @@ -1190,10 +1203,12 @@ class Timedelta(_Timedelta): value = nano + convert_to_timedelta64(timedelta(**kwargs), 'ns') except TypeError as e: - raise ValueError("cannot construct a Timedelta from the " - "passed arguments, allowed keywords are " - "[weeks, days, hours, minutes, seconds, " - "milliseconds, microseconds, nanoseconds]") + raise ValueError( + "cannot construct a Timedelta from the passed arguments, " + "allowed keywords are " + "[weeks, days, hours, minutes, seconds, " + "milliseconds, microseconds, nanoseconds]" + ) if unit in {'Y', 'y', 'M'}: raise ValueError( @@ -1230,8 +1245,9 @@ class Timedelta(_Timedelta): return NaT else: raise ValueError( - f"Value must be Timedelta, string, integer, " - f"float, timedelta or convertible, not {type(value).__name__}") + "Value must be Timedelta, string, integer, " + f"float, timedelta or convertible, not {type(value).__name__}" + ) if is_timedelta64_object(value): value = value.view('i8') @@ -1509,10 +1525,13 @@ cdef _rfloordiv(int64_t value, right): return right // value -cdef _broadcast_floordiv_td64(int64_t value, object other, - object (*operation)(int64_t value, - object right)): - """Boilerplate code shared by Timedelta.__floordiv__ and +cdef _broadcast_floordiv_td64( + int64_t value, + object other, + object (*operation)(int64_t value, object right) +): + """ + Boilerplate code shared by Timedelta.__floordiv__ and Timedelta.__rfloordiv__ because np.timedelta64 does not implement these. Parameters diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 35ee87e714fa8..0ec3e2ad467e1 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -2,9 +2,11 @@ from datetime import timezone # dateutil compat from dateutil.tz import ( - tzutc as _dateutil_tzutc, + tzfile as _dateutil_tzfile, tzlocal as _dateutil_tzlocal, - tzfile as _dateutil_tzfile) + tzutc as _dateutil_tzutc, +) + from dateutil.tz import gettz as dateutil_gettz @@ -103,7 +105,9 @@ cpdef inline object maybe_get_tz(object tz): def _p_tz_cache_key(tz): - """ Python interface for cache function to facilitate testing.""" + """ + Python interface for cache function to facilitate testing. + """ return tz_cache_key(tz) @@ -120,7 +124,7 @@ cdef inline object tz_cache_key(object tz): dateutil timezones. Notes - ===== + ----- This cannot just be the hash of a timezone object. Unfortunately, the hashes of two dateutil tz objects which represent the same timezone are not equal (even though the tz objects will compare equal and represent @@ -196,7 +200,7 @@ cdef int64_t[:] unbox_utcoffsets(object transinfo): arr = np.empty(sz, dtype='i8') for i in range(sz): - arr[i] = int(transinfo[i][0].total_seconds()) * 1000000000 + arr[i] = int(transinfo[i][0].total_seconds()) * 1_000_000_000 return arr @@ -217,7 +221,7 @@ cdef object get_dst_info(object tz): if cache_key is None: # e.g. pytz.FixedOffset, matplotlib.dates._UTC, # psycopg2.tz.FixedOffsetTimezone - num = int(get_utcoffset(tz, None).total_seconds()) * 1000000000 + num = int(get_utcoffset(tz, None).total_seconds()) * 1_000_000_000 return (np.array([NPY_NAT + 1], dtype=np.int64), np.array([num], dtype=np.int64), None) @@ -313,7 +317,7 @@ cpdef bint tz_compare(object start, object end): Returns: ------- - compare : bint + bool """ # GH 18523 diff --git a/pandas/_libs/writers.pyx b/pandas/_libs/writers.pyx index 73201e75c3c88..9e95dea979577 100644 --- a/pandas/_libs/writers.pyx +++ b/pandas/_libs/writers.pyx @@ -15,8 +15,13 @@ ctypedef fused pandas_string: @cython.boundscheck(False) @cython.wraparound(False) -def write_csv_rows(list data, ndarray data_index, - Py_ssize_t nlevels, ndarray cols, object writer): +def write_csv_rows( + list data, + ndarray data_index, + Py_ssize_t nlevels, + ndarray cols, + object writer +): """ Write the given data to the writer object, pre-allocating where possible for performance improvements. @@ -114,7 +119,9 @@ def convert_json_to_lines(arr: object) -> str: @cython.boundscheck(False) @cython.wraparound(False) def max_len_string_array(pandas_string[:] arr) -> Py_ssize_t: - """ return the maximum size of elements in a 1-dim string array """ + """ + Return the maximum size of elements in a 1-dim string array. + """ cdef: Py_ssize_t i, m = 0, l = 0, length = arr.shape[0] pandas_string val @@ -130,7 +137,9 @@ def max_len_string_array(pandas_string[:] arr) -> Py_ssize_t: cpdef inline Py_ssize_t word_len(object val): - """ return the maximum length of a string or bytes value """ + """ + Return the maximum length of a string or bytes value. + """ cdef: Py_ssize_t l = 0 @@ -148,8 +157,10 @@ cpdef inline Py_ssize_t word_len(object val): @cython.boundscheck(False) @cython.wraparound(False) def string_array_replace_from_nan_rep( - ndarray[object, ndim=1] arr, object nan_rep, - object replace=None): + ndarray[object, ndim=1] arr, + object nan_rep, + object replace=np.nan +): """ Replace the values in the array with 'replacement' if they are 'nan_rep'. Return the same array. @@ -157,9 +168,6 @@ def string_array_replace_from_nan_rep( cdef: Py_ssize_t length = len(arr), i = 0 - if replace is None: - replace = np.nan - for i in range(length): if arr[i] == nan_rep: arr[i] = replace
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32176
2020-02-22T12:10:07Z
2020-02-22T16:27:06Z
2020-02-22T16:27:06Z
2020-02-29T10:27:38Z
BUG: groupby nunique changing values
diff --git a/doc/source/whatsnew/v1.0.2.rst b/doc/source/whatsnew/v1.0.2.rst index 07afe60c9c22a..5f06188def4f2 100644 --- a/doc/source/whatsnew/v1.0.2.rst +++ b/doc/source/whatsnew/v1.0.2.rst @@ -19,6 +19,7 @@ Fixed regressions - Fixed regression in :meth:`Series.align` when ``other`` is a DataFrame and ``method`` is not None (:issue:`31785`) - Fixed regression in :meth:`pandas.core.groupby.RollingGroupby.apply` where the ``raw`` parameter was ignored (:issue:`31754`) - Fixed regression in :meth:`rolling(..).corr() <pandas.core.window.Rolling.corr>` when using a time offset (:issue:`31789`) +- Fixed regression in :meth:`DataFrameGroupBy.nunique` which was modifying the original values if ``NaN`` values were present (:issue:`31950`) - Fixed regression where :func:`read_pickle` raised a ``UnicodeDecodeError`` when reading a py27 pickle with :class:`MultiIndex` column (:issue:`31988`). - Fixed regression in :class:`DataFrame` arithmetic operations with mis-matched columns (:issue:`31623`) - Fixed regression in :meth:`GroupBy.agg` calling a user-provided function an extra time on an empty input (:issue:`31760`) diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py index 37b6429167646..1bb512aee39e2 100644 --- a/pandas/core/groupby/generic.py +++ b/pandas/core/groupby/generic.py @@ -591,30 +591,18 @@ def nunique(self, dropna: bool = True) -> Series: val = self.obj._internal_get_values() - # GH 27951 - # temporary fix while we wait for NumPy bug 12629 to be fixed - val[isna(val)] = np.datetime64("NaT") - - try: - sorter = np.lexsort((val, ids)) - except TypeError: # catches object dtypes - msg = f"val.dtype must be object, got {val.dtype}" - assert val.dtype == object, msg - val, _ = algorithms.factorize(val, sort=False) - sorter = np.lexsort((val, ids)) - _isna = lambda a: a == -1 - else: - _isna = isna - - ids, val = ids[sorter], val[sorter] + codes, _ = algorithms.factorize(val, sort=False) + sorter = np.lexsort((codes, ids)) + codes = codes[sorter] + ids = ids[sorter] # group boundaries are where group ids change # unique observations are where sorted values change idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]] - inc = np.r_[1, val[1:] != val[:-1]] + inc = np.r_[1, codes[1:] != codes[:-1]] # 1st item of each group is a new unique observation - mask = _isna(val) + mask = codes == -1 if dropna: inc[idx] = 1 inc[mask] = 0 diff --git a/pandas/tests/groupby/test_function.py b/pandas/tests/groupby/test_function.py index 6205dfb87bbd0..c402ca194648f 100644 --- a/pandas/tests/groupby/test_function.py +++ b/pandas/tests/groupby/test_function.py @@ -1017,6 +1017,7 @@ def test_frame_describe_unstacked_format(): @pytest.mark.parametrize("dropna", [False, True]) def test_series_groupby_nunique(n, m, sort, dropna): def check_nunique(df, keys, as_index=True): + original_df = df.copy() gr = df.groupby(keys, as_index=as_index, sort=sort) left = gr["julie"].nunique(dropna=dropna) @@ -1026,6 +1027,7 @@ def check_nunique(df, keys, as_index=True): right = right.reset_index(drop=True) tm.assert_series_equal(left, right, check_names=False) + tm.assert_frame_equal(df, original_df) days = date_range("2015-08-23", periods=10)
- [ ] closes #31950 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry just reopening #31973 for now - this doesn't special-case NaT. Will think if there's a better way to do it, feel free to close / take over if it's wrong
https://api.github.com/repos/pandas-dev/pandas/pulls/32175
2020-02-22T11:29:46Z
2020-02-23T15:02:17Z
2020-02-23T15:02:17Z
2020-02-25T17:10:59Z
DOC: Move pandas_development_faq from wiki to doc #30232
diff --git a/doc/source/development/index.rst b/doc/source/development/index.rst index f8a6bb6deb52d..66d92a8788b71 100644 --- a/doc/source/development/index.rst +++ b/doc/source/development/index.rst @@ -20,4 +20,5 @@ Development developer policies roadmap + pandas_development_faq meeting diff --git a/doc/source/development/pandas_development_faq.rst b/doc/source/development/pandas_development_faq.rst new file mode 100644 index 0000000000000..9f7864d9f99f5 --- /dev/null +++ b/doc/source/development/pandas_development_faq.rst @@ -0,0 +1,201 @@ +.. _pandas_development_faq: + +{{ header }} + +====================== +Pandas Development FAQ +====================== + +.. contents:: Table of contents: + :local: + +Purpose +======= + +The aim is to gather oft-asked questions/comments from contributors to make the +contribution process easier for + +* Core developers to give advice & accept new code contributions. +* New contributors to find an easier way in for quick and efficient bug-fixes + or feature additions + +While some questions/comments/advice may be applicable to general programming, +these are things that directly relate to ``pandas`` development. + +* `**PR == pull request** <https://help.github.com/articles/using-pull-requests>`_ +* **core developer:** A person contributing on very high frequency & who is + familiar with the code base and development process of ``pandas``. +* **contributors:** The occasional contributor, maybe from a specific domain, + contributes bug fixes, features or documentation with low frequency, may not + be an every day programmer (e.g. programming scientists or engineer using + pandas for data processing) and looks at things from an end-user perspective. + +Pandas Development & Release Process +==================================== + +Testing +------- + +**Q:** What are some recommendations for writing unit tests? + +**A:** Your test should be self-contained. That is, it should test preferably a +single thing, e.g., a method that you've added to the ``DataFrame`` class. Your +test function/method should start with ``test_`` and the rest of the name should +be related to whatever functionality you're testing, like +``test_replace_with_dict_regex``. + +**Q:** Help! I can't get the tests to run! + +**A:** You probably either have multiple Python versions installed and there's +an ABI (application binary interface) issue or you forgot to build the extension +modules in place. The latter can be done with + +.. code-block:: shell + + python setup.py build_ext --inplace + +from the ``pandas`` directory. + +Travis +------ + +**Q:** Why do I need a Travis file in my repo if it's already in the head +repository? + +**A:** Because we're not using subversion. Okay, seriously, it's because as far +as ``git`` is concerned *your* repository is the *only* one that exists. There's +really no such thing as a "head" repository in the eyes of ``git``, those are +concepts that we impose on it to make collaboration more effective and easier. +This is one of the nice aspects of +`distributed version control <http://en.wikipedia.org/wiki/Distributed_revision_control>`_. + +Workflow +-------- + +* What is a typical workflow on my local fork? +* Shall I work in a virtualenvironment? +* Shall I work in a virtualenvironment and then copy my changes over into a + clean local fork of my own repo? + +**Q:** Who will be responsible for evaluating my PR? + +**A:** Technically, anyone with push rights to the ``pandas-dev`` can +evaluate it. In practice, there are a handful of people who are constantly +watching the ``pandas`` repo for new PRs, so most likely it'll be one of them +that evaluates it. I'm not going to list names, but it's not that hard to figure +out... + +Criteria for PR +--------------- + +**Q:** What are the criteria for acceptance of a PR? + +**A:** First and foremost, your fix **must not break any existing +functionality**, one indicator of this is that your Travis build passes. Second, +just give it some time. Everyone is busy and @wesm has not (yet?) amassed a +``pandas`` development army. + +**Q:** Do I need to open an issue first? + +**A:** Not necessarily. If you want to submit a documentation change, e.g., a +typo fix, then opening an issue is not necessary. + +Coding Style +------------ + +**Q:** What level of commenting is accepted? + +**A:** The common sense level. Don't overdo it on the comments, and make sure +if you *do* comment that your comments explain *what* your code is doing, not +*how* it is doing it (that's what code is for). + +Obligatory example: + +BAD: + +.. code-block:: python + + # increment i + i = int + + i += 1 + +GOOD: + +.. code-block:: python + + # add a person to the person count + i = int + + i += 1 + +Debugging +--------- + +**Q:** How can I debug without adding loads of ``print`` statements/calls +everywhere? + +**A:** You can use the Python standard library's ``pdb`` and set a breakpoint. +Put ``import pdb; pdb.set_trace()`` at the line where you want to stop. +`ipdb <https://github.com/gotcha/ipdb>`_ is ``pdb`` with tab-completion and a +few other bells and whistles, making debugging less painful. There's also +`ipdbplugin <https://github.com/flavioamieiro/nose-ipdb>`_ which allows you to +drop into ``ipdb`` from `nose <https://github.com/nose-devs/nose>`_ when a test +fails via + +.. code-block:: shell + + nosetests --ipdb # or --ipdb-failures + +**Q:** Would a logging hook be a solution? + +**A:** That's probably a bit overkill. See the suggestions above. + +Pandas Library +============== + +Source Comments +--------------- + +* It would be nice to add more source comments to quickly understand the context + when chiming in to fix an issue + +Testing +------- + +**Q:** Why don't test functions have a docstring? + +**A:** If your tests are self-contained and aren't +`sprawling ecosystems of spaghetti <http://cdn.memegenerator.net/instances/250x250/26336623.jpg>`_ +then having a docstring is redundant. Also, the test name is usually (and +should be!) very descriptive. Remember there's no character limit for variable +names. We're not using FORTRAN. + +**Q:** ``DataFrame`` and other ``pandas`` objects often many properties/methods. +What is the level of detail that I should consider when I'm writing my test(s)? + +**A:** See the previous question/answer. Strive to test one and only one thing. +You could even separate out your tests by their formal parameters if you want +things to be *really* self-contained. + +**Q:** Should I consider possible corner cases of my implementation? + +**A:** The answer is a resounding **YES**! In some cases you may come across +something that is very pathological. In those cases you should ask a core +developer. + +Complexity +---------- + +* Some modules (e.g. io/parsers.py) seem to have grown into very high complexity. + It is very time consuming to find out what is done where just for fixing a + small bug. +* a splitting into several modules would be good +* more in-code comments telling why something is done and under which condition + and for what expected result. + + +Docstrings +---------- + +* even internal functions shall have a simple 1-line docstring
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32172
2020-02-22T06:15:31Z
2020-02-27T19:08:04Z
null
2020-02-27T19:09:01Z
REF/TST: collect Index join tests
diff --git a/pandas/tests/indexes/datetimes/test_date_range.py b/pandas/tests/indexes/datetimes/test_date_range.py index 4d0beecbbf5d3..d33351fe94a8c 100644 --- a/pandas/tests/indexes/datetimes/test_date_range.py +++ b/pandas/tests/indexes/datetimes/test_date_range.py @@ -759,17 +759,6 @@ def test_constructor(self): with pytest.raises(TypeError, match=msg): bdate_range(START, END, periods=10, freq=None) - def test_naive_aware_conflicts(self): - naive = bdate_range(START, END, freq=BDay(), tz=None) - aware = bdate_range(START, END, freq=BDay(), tz="Asia/Hong_Kong") - - msg = "tz-naive.*tz-aware" - with pytest.raises(TypeError, match=msg): - naive.join(aware) - - with pytest.raises(TypeError, match=msg): - aware.join(naive) - def test_misc(self): end = datetime(2009, 5, 13) dr = bdate_range(end=end, periods=20) diff --git a/pandas/tests/indexes/datetimes/test_datetime.py b/pandas/tests/indexes/datetimes/test_datetime.py index ca18d6fbea11a..1a72ef2bdf1aa 100644 --- a/pandas/tests/indexes/datetimes/test_datetime.py +++ b/pandas/tests/indexes/datetimes/test_datetime.py @@ -100,16 +100,13 @@ def test_stringified_slice_with_tz(self): df = DataFrame(np.arange(10), index=idx) df["2013-01-14 23:44:34.437768-05:00":] # no exception here - def test_append_join_nondatetimeindex(self): + def test_append_nondatetimeindex(self): rng = date_range("1/1/2000", periods=10) idx = Index(["a", "b", "c", "d"]) result = rng.append(idx) assert isinstance(result[0], Timestamp) - # it works - rng.join(idx, how="outer") - def test_map(self): rng = date_range("1/1/2000", periods=10) @@ -246,25 +243,6 @@ def test_isin(self): index.isin([index[2], 5]), np.array([False, False, True, False]) ) - def test_does_not_convert_mixed_integer(self): - df = tm.makeCustomDataframe( - 10, - 10, - data_gen_f=lambda *args, **kwargs: randn(), - r_idx_type="i", - c_idx_type="dt", - ) - cols = df.columns.join(df.index, how="outer") - joined = cols.join(df.columns) - assert cols.dtype == np.dtype("O") - assert cols.dtype == joined.dtype - tm.assert_numpy_array_equal(cols.values, joined.values) - - def test_join_self(self, join_type): - index = date_range("1/1/2000", periods=10) - joined = index.join(index, how=join_type) - assert index is joined - def assert_index_parameters(self, index): assert index.freq == "40960N" assert index.inferred_freq == "40960N" @@ -282,20 +260,6 @@ def test_ns_index(self): new_index = pd.date_range(start=index[0], end=index[-1], freq=index.freq) self.assert_index_parameters(new_index) - def test_join_with_period_index(self, join_type): - df = tm.makeCustomDataframe( - 10, - 10, - data_gen_f=lambda *args: np.random.randint(2), - c_idx_type="p", - r_idx_type="dt", - ) - s = df.iloc[:5, 0] - - expected = df.columns.astype("O").join(s.index, how=join_type) - result = df.columns.join(s.index, how=join_type) - tm.assert_index_equal(expected, result) - def test_factorize(self): idx1 = DatetimeIndex( ["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"] diff --git a/pandas/tests/indexes/datetimes/test_join.py b/pandas/tests/indexes/datetimes/test_join.py new file mode 100644 index 0000000000000..e4d6958dbd3d8 --- /dev/null +++ b/pandas/tests/indexes/datetimes/test_join.py @@ -0,0 +1,131 @@ +from datetime import datetime + +import numpy as np +import pytest + +from pandas import DatetimeIndex, Index, Timestamp, date_range, to_datetime +import pandas._testing as tm + +from pandas.tseries.offsets import BDay, BMonthEnd + + +class TestJoin: + def test_does_not_convert_mixed_integer(self): + df = tm.makeCustomDataframe( + 10, + 10, + data_gen_f=lambda *args, **kwargs: np.random.randn(), + r_idx_type="i", + c_idx_type="dt", + ) + cols = df.columns.join(df.index, how="outer") + joined = cols.join(df.columns) + assert cols.dtype == np.dtype("O") + assert cols.dtype == joined.dtype + tm.assert_numpy_array_equal(cols.values, joined.values) + + def test_join_self(self, join_type): + index = date_range("1/1/2000", periods=10) + joined = index.join(index, how=join_type) + assert index is joined + + def test_join_with_period_index(self, join_type): + df = tm.makeCustomDataframe( + 10, + 10, + data_gen_f=lambda *args: np.random.randint(2), + c_idx_type="p", + r_idx_type="dt", + ) + s = df.iloc[:5, 0] + + expected = df.columns.astype("O").join(s.index, how=join_type) + result = df.columns.join(s.index, how=join_type) + tm.assert_index_equal(expected, result) + + def test_join_object_index(self): + rng = date_range("1/1/2000", periods=10) + idx = Index(["a", "b", "c", "d"]) + + result = rng.join(idx, how="outer") + assert isinstance(result[0], Timestamp) + + def test_join_utc_convert(self, join_type): + rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") + + left = rng.tz_convert("US/Eastern") + right = rng.tz_convert("Europe/Berlin") + + result = left.join(left[:-5], how=join_type) + assert isinstance(result, DatetimeIndex) + assert result.tz == left.tz + + result = left.join(right[:-5], how=join_type) + assert isinstance(result, DatetimeIndex) + assert result.tz.zone == "UTC" + + @pytest.mark.parametrize("sort", [None, False]) + def test_datetimeindex_union_join_empty(self, sort): + dti = date_range(start="1/1/2001", end="2/1/2001", freq="D") + empty = Index([]) + + result = dti.union(empty, sort=sort) + expected = dti.astype("O") + tm.assert_index_equal(result, expected) + + result = dti.join(empty) + assert isinstance(result, DatetimeIndex) + tm.assert_index_equal(result, dti) + + def test_join_nonunique(self): + idx1 = to_datetime(["2012-11-06 16:00:11.477563", "2012-11-06 16:00:11.477563"]) + idx2 = to_datetime(["2012-11-06 15:11:09.006507", "2012-11-06 15:11:09.006507"]) + rs = idx1.join(idx2, how="outer") + assert rs.is_monotonic + + @pytest.mark.parametrize("freq", ["B", "C"]) + def test_outer_join(self, freq): + # should just behave as union + start, end = datetime(2009, 1, 1), datetime(2010, 1, 1) + rng = date_range(start=start, end=end, freq=freq) + + # overlapping + left = rng[:10] + right = rng[5:10] + + the_join = left.join(right, how="outer") + assert isinstance(the_join, DatetimeIndex) + + # non-overlapping, gap in middle + left = rng[:5] + right = rng[10:] + + the_join = left.join(right, how="outer") + assert isinstance(the_join, DatetimeIndex) + assert the_join.freq is None + + # non-overlapping, no gap + left = rng[:5] + right = rng[5:10] + + the_join = left.join(right, how="outer") + assert isinstance(the_join, DatetimeIndex) + + # overlapping, but different offset + other = date_range(start, end, freq=BMonthEnd()) + + the_join = rng.join(other, how="outer") + assert isinstance(the_join, DatetimeIndex) + assert the_join.freq is None + + def test_naive_aware_conflicts(self): + start, end = datetime(2009, 1, 1), datetime(2010, 1, 1) + naive = date_range(start, end, freq=BDay(), tz=None) + aware = date_range(start, end, freq=BDay(), tz="Asia/Hong_Kong") + + msg = "tz-naive.*tz-aware" + with pytest.raises(TypeError, match=msg): + naive.join(aware) + + with pytest.raises(TypeError, match=msg): + aware.join(naive) diff --git a/pandas/tests/indexes/datetimes/test_setops.py b/pandas/tests/indexes/datetimes/test_setops.py index 78188c54b1d85..d58ecbad4c1b3 100644 --- a/pandas/tests/indexes/datetimes/test_setops.py +++ b/pandas/tests/indexes/datetimes/test_setops.py @@ -14,7 +14,6 @@ Series, bdate_range, date_range, - to_datetime, ) import pandas._testing as tm @@ -348,25 +347,6 @@ def test_datetimeindex_diff(self, sort): dti2 = date_range(freq="Q-JAN", start=datetime(1997, 12, 31), periods=98) assert len(dti1.difference(dti2, sort)) == 2 - @pytest.mark.parametrize("sort", [None, False]) - def test_datetimeindex_union_join_empty(self, sort): - dti = date_range(start="1/1/2001", end="2/1/2001", freq="D") - empty = Index([]) - - result = dti.union(empty, sort=sort) - expected = dti.astype("O") - tm.assert_index_equal(result, expected) - - result = dti.join(empty) - assert isinstance(result, DatetimeIndex) - tm.assert_index_equal(result, dti) - - def test_join_nonunique(self): - idx1 = to_datetime(["2012-11-06 16:00:11.477563", "2012-11-06 16:00:11.477563"]) - idx2 = to_datetime(["2012-11-06 15:11:09.006507", "2012-11-06 15:11:09.006507"]) - rs = idx1.join(idx2, how="outer") - assert rs.is_monotonic - class TestBusinessDatetimeIndex: def setup_method(self, method): @@ -408,38 +388,6 @@ def test_union(self, sort): the_union = self.rng.union(rng, sort=sort) assert isinstance(the_union, DatetimeIndex) - def test_outer_join(self): - # should just behave as union - - # overlapping - left = self.rng[:10] - right = self.rng[5:10] - - the_join = left.join(right, how="outer") - assert isinstance(the_join, DatetimeIndex) - - # non-overlapping, gap in middle - left = self.rng[:5] - right = self.rng[10:] - - the_join = left.join(right, how="outer") - assert isinstance(the_join, DatetimeIndex) - assert the_join.freq is None - - # non-overlapping, no gap - left = self.rng[:5] - right = self.rng[5:10] - - the_join = left.join(right, how="outer") - assert isinstance(the_join, DatetimeIndex) - - # overlapping, but different offset - rng = date_range(START, END, freq=BMonthEnd()) - - the_join = self.rng.join(rng, how="outer") - assert isinstance(the_join, DatetimeIndex) - assert the_join.freq is None - @pytest.mark.parametrize("sort", [None, False]) def test_union_not_cacheable(self, sort): rng = date_range("1/1/2000", periods=50, freq=Minute()) @@ -556,38 +504,6 @@ def test_union(self, sort): the_union = self.rng.union(rng, sort=sort) assert isinstance(the_union, DatetimeIndex) - def test_outer_join(self): - # should just behave as union - - # overlapping - left = self.rng[:10] - right = self.rng[5:10] - - the_join = left.join(right, how="outer") - assert isinstance(the_join, DatetimeIndex) - - # non-overlapping, gap in middle - left = self.rng[:5] - right = self.rng[10:] - - the_join = left.join(right, how="outer") - assert isinstance(the_join, DatetimeIndex) - assert the_join.freq is None - - # non-overlapping, no gap - left = self.rng[:5] - right = self.rng[5:10] - - the_join = left.join(right, how="outer") - assert isinstance(the_join, DatetimeIndex) - - # overlapping, but different offset - rng = date_range(START, END, freq=BMonthEnd()) - - the_join = self.rng.join(rng, how="outer") - assert isinstance(the_join, DatetimeIndex) - assert the_join.freq is None - def test_intersection_bug(self): # GH #771 a = bdate_range("11/30/2011", "12/31/2011", freq="C") diff --git a/pandas/tests/indexes/datetimes/test_timezones.py b/pandas/tests/indexes/datetimes/test_timezones.py index 7574e4501f5aa..9c1e8cb0f563f 100644 --- a/pandas/tests/indexes/datetimes/test_timezones.py +++ b/pandas/tests/indexes/datetimes/test_timezones.py @@ -804,20 +804,6 @@ def test_dti_tz_constructors(self, tzstr): # ------------------------------------------------------------- # Unsorted - def test_join_utc_convert(self, join_type): - rng = date_range("1/1/2011", periods=100, freq="H", tz="utc") - - left = rng.tz_convert("US/Eastern") - right = rng.tz_convert("Europe/Berlin") - - result = left.join(left[:-5], how=join_type) - assert isinstance(result, DatetimeIndex) - assert result.tz == left.tz - - result = left.join(right[:-5], how=join_type) - assert isinstance(result, DatetimeIndex) - assert result.tz.zone == "UTC" - @pytest.mark.parametrize( "dtype", [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"], diff --git a/pandas/tests/indexes/numeric/__init__.py b/pandas/tests/indexes/numeric/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/indexes/numeric/test_join.py b/pandas/tests/indexes/numeric/test_join.py new file mode 100644 index 0000000000000..c8dffa411e5fd --- /dev/null +++ b/pandas/tests/indexes/numeric/test_join.py @@ -0,0 +1,388 @@ +import numpy as np +import pytest + +from pandas import Index, Int64Index, UInt64Index +import pandas._testing as tm + + +class TestJoinInt64Index: + def test_join_non_unique(self): + left = Index([4, 4, 3, 3]) + + joined, lidx, ridx = left.join(left, return_indexers=True) + + exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4]) + tm.assert_index_equal(joined, exp_joined) + + exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.intp) + tm.assert_numpy_array_equal(lidx, exp_lidx) + + exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.intp) + tm.assert_numpy_array_equal(ridx, exp_ridx) + + def test_join_inner(self): + index = Int64Index(range(0, 20, 2)) + other = Int64Index([7, 12, 25, 1, 2, 5]) + other_mono = Int64Index([1, 2, 5, 7, 12, 25]) + + # not monotonic + res, lidx, ridx = index.join(other, how="inner", return_indexers=True) + + # no guarantee of sortedness, so sort for comparison purposes + ind = res.argsort() + res = res.take(ind) + lidx = lidx.take(ind) + ridx = ridx.take(ind) + + eres = Int64Index([2, 12]) + elidx = np.array([1, 6], dtype=np.intp) + eridx = np.array([4, 1], dtype=np.intp) + + assert isinstance(res, Int64Index) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index.join(other_mono, how="inner", return_indexers=True) + + res2 = index.intersection(other_mono) + tm.assert_index_equal(res, res2) + + elidx = np.array([1, 6], dtype=np.intp) + eridx = np.array([1, 4], dtype=np.intp) + assert isinstance(res, Int64Index) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_left(self): + index = Int64Index(range(0, 20, 2)) + other = Int64Index([7, 12, 25, 1, 2, 5]) + other_mono = Int64Index([1, 2, 5, 7, 12, 25]) + + # not monotonic + res, lidx, ridx = index.join(other, how="left", return_indexers=True) + eres = index + eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1], dtype=np.intp) + + assert isinstance(res, Int64Index) + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index.join(other_mono, how="left", return_indexers=True) + eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1], dtype=np.intp) + assert isinstance(res, Int64Index) + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # non-unique + idx = Index([1, 1, 2, 5]) + idx2 = Index([1, 2, 5, 7, 9]) + res, lidx, ridx = idx2.join(idx, how="left", return_indexers=True) + eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 + eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_right(self): + index = Int64Index(range(0, 20, 2)) + other = Int64Index([7, 12, 25, 1, 2, 5]) + other_mono = Int64Index([1, 2, 5, 7, 12, 25]) + + # not monotonic + res, lidx, ridx = index.join(other, how="right", return_indexers=True) + eres = other + elidx = np.array([-1, 6, -1, -1, 1, -1], dtype=np.intp) + + assert isinstance(other, Int64Index) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + assert ridx is None + + # monotonic + res, lidx, ridx = index.join(other_mono, how="right", return_indexers=True) + eres = other_mono + elidx = np.array([-1, 1, -1, -1, 6, -1], dtype=np.intp) + assert isinstance(other, Int64Index) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + assert ridx is None + + # non-unique + idx = Index([1, 1, 2, 5]) + idx2 = Index([1, 2, 5, 7, 9]) + res, lidx, ridx = idx.join(idx2, how="right", return_indexers=True) + eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 + elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_non_int_index(self): + index = Int64Index(range(0, 20, 2)) + other = Index([3, 6, 7, 8, 10], dtype=object) + + outer = index.join(other, how="outer") + outer2 = other.join(index, how="outer") + expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18]) + tm.assert_index_equal(outer, outer2) + tm.assert_index_equal(outer, expected) + + inner = index.join(other, how="inner") + inner2 = other.join(index, how="inner") + expected = Index([6, 8, 10]) + tm.assert_index_equal(inner, inner2) + tm.assert_index_equal(inner, expected) + + left = index.join(other, how="left") + tm.assert_index_equal(left, index.astype(object)) + + left2 = other.join(index, how="left") + tm.assert_index_equal(left2, other) + + right = index.join(other, how="right") + tm.assert_index_equal(right, other) + + right2 = other.join(index, how="right") + tm.assert_index_equal(right2, index.astype(object)) + + def test_join_outer(self): + index = Int64Index(range(0, 20, 2)) + other = Int64Index([7, 12, 25, 1, 2, 5]) + other_mono = Int64Index([1, 2, 5, 7, 12, 25]) + + # not monotonic + # guarantee of sortedness + res, lidx, ridx = index.join(other, how="outer", return_indexers=True) + noidx_res = index.join(other, how="outer") + tm.assert_index_equal(res, noidx_res) + + eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25]) + elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) + eridx = np.array( + [-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2], dtype=np.intp + ) + + assert isinstance(res, Int64Index) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index.join(other_mono, how="outer", return_indexers=True) + noidx_res = index.join(other_mono, how="outer") + tm.assert_index_equal(res, noidx_res) + + elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) + eridx = np.array( + [-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5], dtype=np.intp + ) + assert isinstance(res, Int64Index) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + +class TestJoinUInt64Index: + @pytest.fixture + def index_large(self): + # large values used in TestUInt64Index where no compat needed with Int64/Float64 + large = [2 ** 63, 2 ** 63 + 10, 2 ** 63 + 15, 2 ** 63 + 20, 2 ** 63 + 25] + return UInt64Index(large) + + def test_join_inner(self, index_large): + other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) + other_mono = UInt64Index( + 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64") + ) + + # not monotonic + res, lidx, ridx = index_large.join(other, how="inner", return_indexers=True) + + # no guarantee of sortedness, so sort for comparison purposes + ind = res.argsort() + res = res.take(ind) + lidx = lidx.take(ind) + ridx = ridx.take(ind) + + eres = UInt64Index(2 ** 63 + np.array([10, 25], dtype="uint64")) + elidx = np.array([1, 4], dtype=np.intp) + eridx = np.array([5, 2], dtype=np.intp) + + assert isinstance(res, UInt64Index) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index_large.join( + other_mono, how="inner", return_indexers=True + ) + + res2 = index_large.intersection(other_mono) + tm.assert_index_equal(res, res2) + + elidx = np.array([1, 4], dtype=np.intp) + eridx = np.array([3, 5], dtype=np.intp) + + assert isinstance(res, UInt64Index) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_left(self, index_large): + other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) + other_mono = UInt64Index( + 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64") + ) + + # not monotonic + res, lidx, ridx = index_large.join(other, how="left", return_indexers=True) + eres = index_large + eridx = np.array([-1, 5, -1, -1, 2], dtype=np.intp) + + assert isinstance(res, UInt64Index) + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index_large.join(other_mono, how="left", return_indexers=True) + eridx = np.array([-1, 3, -1, -1, 5], dtype=np.intp) + + assert isinstance(res, UInt64Index) + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # non-unique + idx = UInt64Index(2 ** 63 + np.array([1, 1, 2, 5], dtype="uint64")) + idx2 = UInt64Index(2 ** 63 + np.array([1, 2, 5, 7, 9], dtype="uint64")) + res, lidx, ridx = idx2.join(idx, how="left", return_indexers=True) + + # 1 is in idx2, so it should be x2 + eres = UInt64Index(2 ** 63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64")) + eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) + + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_right(self, index_large): + other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) + other_mono = UInt64Index( + 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64") + ) + + # not monotonic + res, lidx, ridx = index_large.join(other, how="right", return_indexers=True) + eres = other + elidx = np.array([-1, -1, 4, -1, -1, 1], dtype=np.intp) + + tm.assert_numpy_array_equal(lidx, elidx) + assert isinstance(other, UInt64Index) + tm.assert_index_equal(res, eres) + assert ridx is None + + # monotonic + res, lidx, ridx = index_large.join( + other_mono, how="right", return_indexers=True + ) + eres = other_mono + elidx = np.array([-1, -1, -1, 1, -1, 4], dtype=np.intp) + + assert isinstance(other, UInt64Index) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_index_equal(res, eres) + assert ridx is None + + # non-unique + idx = UInt64Index(2 ** 63 + np.array([1, 1, 2, 5], dtype="uint64")) + idx2 = UInt64Index(2 ** 63 + np.array([1, 2, 5, 7, 9], dtype="uint64")) + res, lidx, ridx = idx.join(idx2, how="right", return_indexers=True) + + # 1 is in idx2, so it should be x2 + eres = UInt64Index(2 ** 63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64")) + elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) + eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) + + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_non_int_index(self, index_large): + other = Index( + 2 ** 63 + np.array([1, 5, 7, 10, 20], dtype="uint64"), dtype=object + ) + + outer = index_large.join(other, how="outer") + outer2 = other.join(index_large, how="outer") + expected = Index( + 2 ** 63 + np.array([0, 1, 5, 7, 10, 15, 20, 25], dtype="uint64") + ) + tm.assert_index_equal(outer, outer2) + tm.assert_index_equal(outer, expected) + + inner = index_large.join(other, how="inner") + inner2 = other.join(index_large, how="inner") + expected = Index(2 ** 63 + np.array([10, 20], dtype="uint64")) + tm.assert_index_equal(inner, inner2) + tm.assert_index_equal(inner, expected) + + left = index_large.join(other, how="left") + tm.assert_index_equal(left, index_large.astype(object)) + + left2 = other.join(index_large, how="left") + tm.assert_index_equal(left2, other) + + right = index_large.join(other, how="right") + tm.assert_index_equal(right, other) + + right2 = other.join(index_large, how="right") + tm.assert_index_equal(right2, index_large.astype(object)) + + def test_join_outer(self, index_large): + other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) + other_mono = UInt64Index( + 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64") + ) + + # not monotonic + # guarantee of sortedness + res, lidx, ridx = index_large.join(other, how="outer", return_indexers=True) + noidx_res = index_large.join(other, how="outer") + tm.assert_index_equal(res, noidx_res) + + eres = UInt64Index( + 2 ** 63 + np.array([0, 1, 2, 7, 10, 12, 15, 20, 25], dtype="uint64") + ) + elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) + eridx = np.array([-1, 3, 4, 0, 5, 1, -1, -1, 2], dtype=np.intp) + + assert isinstance(res, UInt64Index) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # monotonic + res, lidx, ridx = index_large.join( + other_mono, how="outer", return_indexers=True + ) + noidx_res = index_large.join(other_mono, how="outer") + tm.assert_index_equal(res, noidx_res) + + elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) + eridx = np.array([-1, 0, 1, 2, 3, 4, -1, -1, 5], dtype=np.intp) + + assert isinstance(res, UInt64Index) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) diff --git a/pandas/tests/indexes/period/test_join.py b/pandas/tests/indexes/period/test_join.py new file mode 100644 index 0000000000000..9e3df0c32d6d5 --- /dev/null +++ b/pandas/tests/indexes/period/test_join.py @@ -0,0 +1,43 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs import IncompatibleFrequency + +from pandas import Index, PeriodIndex, period_range +import pandas._testing as tm + + +class TestJoin: + def test_joins(self, join_type): + index = period_range("1/1/2000", "1/20/2000", freq="D") + + joined = index.join(index[:-5], how=join_type) + + assert isinstance(joined, PeriodIndex) + assert joined.freq == index.freq + + def test_join_self(self, join_type): + index = period_range("1/1/2000", "1/20/2000", freq="D") + + res = index.join(index, how=join_type) + assert index is res + + def test_join_does_not_recur(self): + df = tm.makeCustomDataframe( + 3, + 2, + data_gen_f=lambda *args: np.random.randint(2), + c_idx_type="p", + r_idx_type="dt", + ) + s = df.iloc[:2, 0] + + res = s.index.join(df.columns, how="outer") + expected = Index([s.index[0], s.index[1], df.columns[0], df.columns[1]], object) + tm.assert_index_equal(res, expected) + + def test_join_mismatched_freq_raises(self): + index = period_range("1/1/2000", "1/20/2000", freq="D") + index3 = period_range("1/1/2000", "1/20/2000", freq="2D") + with pytest.raises(IncompatibleFrequency): + index.join(index3) diff --git a/pandas/tests/indexes/period/test_period.py b/pandas/tests/indexes/period/test_period.py index 4db93e850f579..6479b14e9521e 100644 --- a/pandas/tests/indexes/period/test_period.py +++ b/pandas/tests/indexes/period/test_period.py @@ -586,11 +586,6 @@ def test_map(self): exp = Index([x.ordinal for x in index]) tm.assert_index_equal(result, exp) - def test_join_self(self, join_type): - index = period_range("1/1/2000", periods=10) - joined = index.join(index, how=join_type) - assert index is joined - def test_insert(self): # GH 18295 (test missing) expected = PeriodIndex(["2017Q1", NaT, "2017Q2", "2017Q3", "2017Q4"], freq="Q") diff --git a/pandas/tests/indexes/period/test_setops.py b/pandas/tests/indexes/period/test_setops.py index 6f254b7b4408d..647d56d33f312 100644 --- a/pandas/tests/indexes/period/test_setops.py +++ b/pandas/tests/indexes/period/test_setops.py @@ -4,7 +4,7 @@ from pandas._libs.tslibs import IncompatibleFrequency import pandas as pd -from pandas import Index, PeriodIndex, date_range, period_range +from pandas import PeriodIndex, date_range, period_range import pandas._testing as tm @@ -13,34 +13,6 @@ def _permute(obj): class TestPeriodIndex: - def test_joins(self, join_type): - index = period_range("1/1/2000", "1/20/2000", freq="D") - - joined = index.join(index[:-5], how=join_type) - - assert isinstance(joined, PeriodIndex) - assert joined.freq == index.freq - - def test_join_self(self, join_type): - index = period_range("1/1/2000", "1/20/2000", freq="D") - - res = index.join(index, how=join_type) - assert index is res - - def test_join_does_not_recur(self): - df = tm.makeCustomDataframe( - 3, - 2, - data_gen_f=lambda *args: np.random.randint(2), - c_idx_type="p", - r_idx_type="dt", - ) - s = df.iloc[:2, 0] - - res = s.index.join(df.columns, how="outer") - expected = Index([s.index[0], s.index[1], df.columns[0], df.columns[1]], object) - tm.assert_index_equal(res, expected) - @pytest.mark.parametrize("sort", [None, False]) def test_union(self, sort): # union @@ -181,10 +153,6 @@ def test_union_misc(self, sort): with pytest.raises(IncompatibleFrequency): index.union(index2, sort=sort) - index3 = period_range("1/1/2000", "1/20/2000", freq="2D") - with pytest.raises(IncompatibleFrequency): - index.join(index3) - # TODO: belongs elsewhere def test_union_dataframe_index(self): rng1 = period_range("1/1/1999", "1/1/2012", freq="M") diff --git a/pandas/tests/indexes/ranges/test_join.py b/pandas/tests/indexes/ranges/test_join.py new file mode 100644 index 0000000000000..76013d2b7a387 --- /dev/null +++ b/pandas/tests/indexes/ranges/test_join.py @@ -0,0 +1,174 @@ +import numpy as np + +from pandas import Index, Int64Index, RangeIndex +import pandas._testing as tm + + +class TestJoin: + def test_join_outer(self): + # join with Int64Index + index = RangeIndex(start=0, stop=20, step=2) + other = Int64Index(np.arange(25, 14, -1)) + + res, lidx, ridx = index.join(other, how="outer", return_indexers=True) + noidx_res = index.join(other, how="outer") + tm.assert_index_equal(res, noidx_res) + + eres = Int64Index( + [0, 2, 4, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] + ) + elidx = np.array( + [0, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, 9, -1, -1, -1, -1, -1, -1, -1], + dtype=np.intp, + ) + eridx = np.array( + [-1, -1, -1, -1, -1, -1, -1, -1, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0], + dtype=np.intp, + ) + + assert isinstance(res, Int64Index) + assert not isinstance(res, RangeIndex) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # join with RangeIndex + other = RangeIndex(25, 14, -1) + + res, lidx, ridx = index.join(other, how="outer", return_indexers=True) + noidx_res = index.join(other, how="outer") + tm.assert_index_equal(res, noidx_res) + + assert isinstance(res, Int64Index) + assert not isinstance(res, RangeIndex) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_inner(self): + # Join with non-RangeIndex + index = RangeIndex(start=0, stop=20, step=2) + other = Int64Index(np.arange(25, 14, -1)) + + res, lidx, ridx = index.join(other, how="inner", return_indexers=True) + + # no guarantee of sortedness, so sort for comparison purposes + ind = res.argsort() + res = res.take(ind) + lidx = lidx.take(ind) + ridx = ridx.take(ind) + + eres = Int64Index([16, 18]) + elidx = np.array([8, 9], dtype=np.intp) + eridx = np.array([9, 7], dtype=np.intp) + + assert isinstance(res, Int64Index) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + # Join two RangeIndex + other = RangeIndex(25, 14, -1) + + res, lidx, ridx = index.join(other, how="inner", return_indexers=True) + + assert isinstance(res, RangeIndex) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_left(self): + # Join with Int64Index + index = RangeIndex(start=0, stop=20, step=2) + other = Int64Index(np.arange(25, 14, -1)) + + res, lidx, ridx = index.join(other, how="left", return_indexers=True) + eres = index + eridx = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 9, 7], dtype=np.intp) + + assert isinstance(res, RangeIndex) + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + # Join withRangeIndex + other = Int64Index(np.arange(25, 14, -1)) + + res, lidx, ridx = index.join(other, how="left", return_indexers=True) + + assert isinstance(res, RangeIndex) + tm.assert_index_equal(res, eres) + assert lidx is None + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_right(self): + # Join with Int64Index + index = RangeIndex(start=0, stop=20, step=2) + other = Int64Index(np.arange(25, 14, -1)) + + res, lidx, ridx = index.join(other, how="right", return_indexers=True) + eres = other + elidx = np.array([-1, -1, -1, -1, -1, -1, -1, 9, -1, 8, -1], dtype=np.intp) + + assert isinstance(other, Int64Index) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + assert ridx is None + + # Join withRangeIndex + other = RangeIndex(25, 14, -1) + + res, lidx, ridx = index.join(other, how="right", return_indexers=True) + eres = other + + assert isinstance(other, RangeIndex) + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + assert ridx is None + + def test_join_non_int_index(self): + index = RangeIndex(start=0, stop=20, step=2) + other = Index([3, 6, 7, 8, 10], dtype=object) + + outer = index.join(other, how="outer") + outer2 = other.join(index, how="outer") + expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18]) + tm.assert_index_equal(outer, outer2) + tm.assert_index_equal(outer, expected) + + inner = index.join(other, how="inner") + inner2 = other.join(index, how="inner") + expected = Index([6, 8, 10]) + tm.assert_index_equal(inner, inner2) + tm.assert_index_equal(inner, expected) + + left = index.join(other, how="left") + tm.assert_index_equal(left, index.astype(object)) + + left2 = other.join(index, how="left") + tm.assert_index_equal(left2, other) + + right = index.join(other, how="right") + tm.assert_index_equal(right, other) + + right2 = other.join(index, how="right") + tm.assert_index_equal(right2, index.astype(object)) + + def test_join_non_unique(self): + index = RangeIndex(start=0, stop=20, step=2) + other = Index([4, 4, 3, 3]) + + res, lidx, ridx = index.join(other, return_indexers=True) + + eres = Int64Index([0, 2, 4, 4, 6, 8, 10, 12, 14, 16, 18]) + elidx = np.array([0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp) + eridx = np.array([-1, -1, 0, 1, -1, -1, -1, -1, -1, -1, -1], dtype=np.intp) + + tm.assert_index_equal(res, eres) + tm.assert_numpy_array_equal(lidx, elidx) + tm.assert_numpy_array_equal(ridx, eridx) + + def test_join_self(self, join_type): + index = RangeIndex(start=0, stop=20, step=2) + joined = index.join(index, how=join_type) + assert index is joined diff --git a/pandas/tests/indexes/ranges/test_range.py b/pandas/tests/indexes/ranges/test_range.py index 24616f05c19ce..c1cc23039eeaf 100644 --- a/pandas/tests/indexes/ranges/test_range.py +++ b/pandas/tests/indexes/ranges/test_range.py @@ -294,174 +294,6 @@ def test_get_indexer_decreasing(self, stop): expected = np.array([-1, 2, -1, -1, 1, -1, -1, 0, -1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) - def test_join_outer(self): - # join with Int64Index - index = self.create_index() - other = Int64Index(np.arange(25, 14, -1)) - - res, lidx, ridx = index.join(other, how="outer", return_indexers=True) - noidx_res = index.join(other, how="outer") - tm.assert_index_equal(res, noidx_res) - - eres = Int64Index( - [0, 2, 4, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25] - ) - elidx = np.array( - [0, 1, 2, 3, 4, 5, 6, 7, -1, 8, -1, 9, -1, -1, -1, -1, -1, -1, -1], - dtype=np.intp, - ) - eridx = np.array( - [-1, -1, -1, -1, -1, -1, -1, -1, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0], - dtype=np.intp, - ) - - assert isinstance(res, Int64Index) - assert not isinstance(res, RangeIndex) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - # join with RangeIndex - other = RangeIndex(25, 14, -1) - - res, lidx, ridx = index.join(other, how="outer", return_indexers=True) - noidx_res = index.join(other, how="outer") - tm.assert_index_equal(res, noidx_res) - - assert isinstance(res, Int64Index) - assert not isinstance(res, RangeIndex) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - def test_join_inner(self): - # Join with non-RangeIndex - index = self.create_index() - other = Int64Index(np.arange(25, 14, -1)) - - res, lidx, ridx = index.join(other, how="inner", return_indexers=True) - - # no guarantee of sortedness, so sort for comparison purposes - ind = res.argsort() - res = res.take(ind) - lidx = lidx.take(ind) - ridx = ridx.take(ind) - - eres = Int64Index([16, 18]) - elidx = np.array([8, 9], dtype=np.intp) - eridx = np.array([9, 7], dtype=np.intp) - - assert isinstance(res, Int64Index) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - # Join two RangeIndex - other = RangeIndex(25, 14, -1) - - res, lidx, ridx = index.join(other, how="inner", return_indexers=True) - - assert isinstance(res, RangeIndex) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - def test_join_left(self): - # Join with Int64Index - index = self.create_index() - other = Int64Index(np.arange(25, 14, -1)) - - res, lidx, ridx = index.join(other, how="left", return_indexers=True) - eres = index - eridx = np.array([-1, -1, -1, -1, -1, -1, -1, -1, 9, 7], dtype=np.intp) - - assert isinstance(res, RangeIndex) - tm.assert_index_equal(res, eres) - assert lidx is None - tm.assert_numpy_array_equal(ridx, eridx) - - # Join withRangeIndex - other = Int64Index(np.arange(25, 14, -1)) - - res, lidx, ridx = index.join(other, how="left", return_indexers=True) - - assert isinstance(res, RangeIndex) - tm.assert_index_equal(res, eres) - assert lidx is None - tm.assert_numpy_array_equal(ridx, eridx) - - def test_join_right(self): - # Join with Int64Index - index = self.create_index() - other = Int64Index(np.arange(25, 14, -1)) - - res, lidx, ridx = index.join(other, how="right", return_indexers=True) - eres = other - elidx = np.array([-1, -1, -1, -1, -1, -1, -1, 9, -1, 8, -1], dtype=np.intp) - - assert isinstance(other, Int64Index) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - assert ridx is None - - # Join withRangeIndex - other = RangeIndex(25, 14, -1) - - res, lidx, ridx = index.join(other, how="right", return_indexers=True) - eres = other - - assert isinstance(other, RangeIndex) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - assert ridx is None - - def test_join_non_int_index(self): - index = self.create_index() - other = Index([3, 6, 7, 8, 10], dtype=object) - - outer = index.join(other, how="outer") - outer2 = other.join(index, how="outer") - expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18]) - tm.assert_index_equal(outer, outer2) - tm.assert_index_equal(outer, expected) - - inner = index.join(other, how="inner") - inner2 = other.join(index, how="inner") - expected = Index([6, 8, 10]) - tm.assert_index_equal(inner, inner2) - tm.assert_index_equal(inner, expected) - - left = index.join(other, how="left") - tm.assert_index_equal(left, index.astype(object)) - - left2 = other.join(index, how="left") - tm.assert_index_equal(left2, other) - - right = index.join(other, how="right") - tm.assert_index_equal(right, other) - - right2 = other.join(index, how="right") - tm.assert_index_equal(right2, index.astype(object)) - - def test_join_non_unique(self): - index = self.create_index() - other = Index([4, 4, 3, 3]) - - res, lidx, ridx = index.join(other, return_indexers=True) - - eres = Int64Index([0, 2, 4, 4, 6, 8, 10, 12, 14, 16, 18]) - elidx = np.array([0, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp) - eridx = np.array([-1, -1, 0, 1, -1, -1, -1, -1, -1, -1, -1], dtype=np.intp) - - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - def test_join_self(self, join_type): - index = self.create_index() - joined = index.join(index, how=join_type) - assert index is joined - def test_nbytes(self): # memory savings vs int index diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index 3b4b6b09dcda5..77163e7a6a06a 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -2056,7 +2056,9 @@ def test_slice_keep_name(self): assert index.name == index[1:].name @pytest.mark.parametrize( - "index", ["unicode", "string", "datetime", "int", "float"], indirect=True + "index", + ["unicode", "string", "datetime", "int", "uint", "float"], + indirect=True, ) def test_join_self(self, index, join_type): joined = index.join(index, how=join_type) diff --git a/pandas/tests/indexes/test_numeric.py b/pandas/tests/indexes/test_numeric.py index 1b504ce99604d..10d57d8616cf3 100644 --- a/pandas/tests/indexes/test_numeric.py +++ b/pandas/tests/indexes/test_numeric.py @@ -580,25 +580,6 @@ def test_identical(self): assert not index.copy(dtype=object).identical(index.copy(dtype=self._dtype)) - def test_join_non_unique(self): - left = Index([4, 4, 3, 3]) - - joined, lidx, ridx = left.join(left, return_indexers=True) - - exp_joined = Index([3, 3, 3, 3, 4, 4, 4, 4]) - tm.assert_index_equal(joined, exp_joined) - - exp_lidx = np.array([2, 2, 3, 3, 0, 0, 1, 1], dtype=np.intp) - tm.assert_numpy_array_equal(lidx, exp_lidx) - - exp_ridx = np.array([2, 3, 2, 3, 0, 1, 0, 1], dtype=np.intp) - tm.assert_numpy_array_equal(ridx, exp_ridx) - - def test_join_self(self, join_type): - index = self.create_index() - joined = index.join(index, how=join_type) - assert index is joined - def test_union_noncomparable(self): # corner case, non-Int64Index index = self.create_index() @@ -798,175 +779,6 @@ def test_intersection(self): ) tm.assert_index_equal(result, expected) - def test_join_inner(self): - index = self.create_index() - other = Int64Index([7, 12, 25, 1, 2, 5]) - other_mono = Int64Index([1, 2, 5, 7, 12, 25]) - - # not monotonic - res, lidx, ridx = index.join(other, how="inner", return_indexers=True) - - # no guarantee of sortedness, so sort for comparison purposes - ind = res.argsort() - res = res.take(ind) - lidx = lidx.take(ind) - ridx = ridx.take(ind) - - eres = Int64Index([2, 12]) - elidx = np.array([1, 6], dtype=np.intp) - eridx = np.array([4, 1], dtype=np.intp) - - assert isinstance(res, Int64Index) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - # monotonic - res, lidx, ridx = index.join(other_mono, how="inner", return_indexers=True) - - res2 = index.intersection(other_mono) - tm.assert_index_equal(res, res2) - - elidx = np.array([1, 6], dtype=np.intp) - eridx = np.array([1, 4], dtype=np.intp) - assert isinstance(res, Int64Index) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - def test_join_left(self): - index = self.create_index() - other = Int64Index([7, 12, 25, 1, 2, 5]) - other_mono = Int64Index([1, 2, 5, 7, 12, 25]) - - # not monotonic - res, lidx, ridx = index.join(other, how="left", return_indexers=True) - eres = index - eridx = np.array([-1, 4, -1, -1, -1, -1, 1, -1, -1, -1], dtype=np.intp) - - assert isinstance(res, Int64Index) - tm.assert_index_equal(res, eres) - assert lidx is None - tm.assert_numpy_array_equal(ridx, eridx) - - # monotonic - res, lidx, ridx = index.join(other_mono, how="left", return_indexers=True) - eridx = np.array([-1, 1, -1, -1, -1, -1, 4, -1, -1, -1], dtype=np.intp) - assert isinstance(res, Int64Index) - tm.assert_index_equal(res, eres) - assert lidx is None - tm.assert_numpy_array_equal(ridx, eridx) - - # non-unique - idx = Index([1, 1, 2, 5]) - idx2 = Index([1, 2, 5, 7, 9]) - res, lidx, ridx = idx2.join(idx, how="left", return_indexers=True) - eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 - eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) - elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - def test_join_right(self): - index = self.create_index() - other = Int64Index([7, 12, 25, 1, 2, 5]) - other_mono = Int64Index([1, 2, 5, 7, 12, 25]) - - # not monotonic - res, lidx, ridx = index.join(other, how="right", return_indexers=True) - eres = other - elidx = np.array([-1, 6, -1, -1, 1, -1], dtype=np.intp) - - assert isinstance(other, Int64Index) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - assert ridx is None - - # monotonic - res, lidx, ridx = index.join(other_mono, how="right", return_indexers=True) - eres = other_mono - elidx = np.array([-1, 1, -1, -1, 6, -1], dtype=np.intp) - assert isinstance(other, Int64Index) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - assert ridx is None - - # non-unique - idx = Index([1, 1, 2, 5]) - idx2 = Index([1, 2, 5, 7, 9]) - res, lidx, ridx = idx.join(idx2, how="right", return_indexers=True) - eres = Index([1, 1, 2, 5, 7, 9]) # 1 is in idx2, so it should be x2 - elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) - eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - def test_join_non_int_index(self): - index = self.create_index() - other = Index([3, 6, 7, 8, 10], dtype=object) - - outer = index.join(other, how="outer") - outer2 = other.join(index, how="outer") - expected = Index([0, 2, 3, 4, 6, 7, 8, 10, 12, 14, 16, 18]) - tm.assert_index_equal(outer, outer2) - tm.assert_index_equal(outer, expected) - - inner = index.join(other, how="inner") - inner2 = other.join(index, how="inner") - expected = Index([6, 8, 10]) - tm.assert_index_equal(inner, inner2) - tm.assert_index_equal(inner, expected) - - left = index.join(other, how="left") - tm.assert_index_equal(left, index.astype(object)) - - left2 = other.join(index, how="left") - tm.assert_index_equal(left2, other) - - right = index.join(other, how="right") - tm.assert_index_equal(right, other) - - right2 = other.join(index, how="right") - tm.assert_index_equal(right2, index.astype(object)) - - def test_join_outer(self): - index = self.create_index() - other = Int64Index([7, 12, 25, 1, 2, 5]) - other_mono = Int64Index([1, 2, 5, 7, 12, 25]) - - # not monotonic - # guarantee of sortedness - res, lidx, ridx = index.join(other, how="outer", return_indexers=True) - noidx_res = index.join(other, how="outer") - tm.assert_index_equal(res, noidx_res) - - eres = Int64Index([0, 1, 2, 4, 5, 6, 7, 8, 10, 12, 14, 16, 18, 25]) - elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) - eridx = np.array( - [-1, 3, 4, -1, 5, -1, 0, -1, -1, 1, -1, -1, -1, 2], dtype=np.intp - ) - - assert isinstance(res, Int64Index) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - # monotonic - res, lidx, ridx = index.join(other_mono, how="outer", return_indexers=True) - noidx_res = index.join(other_mono, how="outer") - tm.assert_index_equal(res, noidx_res) - - elidx = np.array([0, -1, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, 9, -1], dtype=np.intp) - eridx = np.array( - [-1, 0, 1, -1, 2, -1, 3, -1, -1, 4, -1, -1, -1, 5], dtype=np.intp - ) - assert isinstance(res, Int64Index) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - class TestUInt64Index(NumericInt): @@ -1043,196 +855,6 @@ def test_intersection(self, index_large): ) tm.assert_index_equal(result, expected) - def test_join_inner(self, index_large): - other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) - other_mono = UInt64Index( - 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64") - ) - - # not monotonic - res, lidx, ridx = index_large.join(other, how="inner", return_indexers=True) - - # no guarantee of sortedness, so sort for comparison purposes - ind = res.argsort() - res = res.take(ind) - lidx = lidx.take(ind) - ridx = ridx.take(ind) - - eres = UInt64Index(2 ** 63 + np.array([10, 25], dtype="uint64")) - elidx = np.array([1, 4], dtype=np.intp) - eridx = np.array([5, 2], dtype=np.intp) - - assert isinstance(res, UInt64Index) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - # monotonic - res, lidx, ridx = index_large.join( - other_mono, how="inner", return_indexers=True - ) - - res2 = index_large.intersection(other_mono) - tm.assert_index_equal(res, res2) - - elidx = np.array([1, 4], dtype=np.intp) - eridx = np.array([3, 5], dtype=np.intp) - - assert isinstance(res, UInt64Index) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - def test_join_left(self, index_large): - other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) - other_mono = UInt64Index( - 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64") - ) - - # not monotonic - res, lidx, ridx = index_large.join(other, how="left", return_indexers=True) - eres = index_large - eridx = np.array([-1, 5, -1, -1, 2], dtype=np.intp) - - assert isinstance(res, UInt64Index) - tm.assert_index_equal(res, eres) - assert lidx is None - tm.assert_numpy_array_equal(ridx, eridx) - - # monotonic - res, lidx, ridx = index_large.join(other_mono, how="left", return_indexers=True) - eridx = np.array([-1, 3, -1, -1, 5], dtype=np.intp) - - assert isinstance(res, UInt64Index) - tm.assert_index_equal(res, eres) - assert lidx is None - tm.assert_numpy_array_equal(ridx, eridx) - - # non-unique - idx = UInt64Index(2 ** 63 + np.array([1, 1, 2, 5], dtype="uint64")) - idx2 = UInt64Index(2 ** 63 + np.array([1, 2, 5, 7, 9], dtype="uint64")) - res, lidx, ridx = idx2.join(idx, how="left", return_indexers=True) - - # 1 is in idx2, so it should be x2 - eres = UInt64Index(2 ** 63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64")) - eridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) - elidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) - - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - def test_join_right(self, index_large): - other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) - other_mono = UInt64Index( - 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64") - ) - - # not monotonic - res, lidx, ridx = index_large.join(other, how="right", return_indexers=True) - eres = other - elidx = np.array([-1, -1, 4, -1, -1, 1], dtype=np.intp) - - tm.assert_numpy_array_equal(lidx, elidx) - assert isinstance(other, UInt64Index) - tm.assert_index_equal(res, eres) - assert ridx is None - - # monotonic - res, lidx, ridx = index_large.join( - other_mono, how="right", return_indexers=True - ) - eres = other_mono - elidx = np.array([-1, -1, -1, 1, -1, 4], dtype=np.intp) - - assert isinstance(other, UInt64Index) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_index_equal(res, eres) - assert ridx is None - - # non-unique - idx = UInt64Index(2 ** 63 + np.array([1, 1, 2, 5], dtype="uint64")) - idx2 = UInt64Index(2 ** 63 + np.array([1, 2, 5, 7, 9], dtype="uint64")) - res, lidx, ridx = idx.join(idx2, how="right", return_indexers=True) - - # 1 is in idx2, so it should be x2 - eres = UInt64Index(2 ** 63 + np.array([1, 1, 2, 5, 7, 9], dtype="uint64")) - elidx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp) - eridx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp) - - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - def test_join_non_int_index(self, index_large): - other = Index( - 2 ** 63 + np.array([1, 5, 7, 10, 20], dtype="uint64"), dtype=object - ) - - outer = index_large.join(other, how="outer") - outer2 = other.join(index_large, how="outer") - expected = Index( - 2 ** 63 + np.array([0, 1, 5, 7, 10, 15, 20, 25], dtype="uint64") - ) - tm.assert_index_equal(outer, outer2) - tm.assert_index_equal(outer, expected) - - inner = index_large.join(other, how="inner") - inner2 = other.join(index_large, how="inner") - expected = Index(2 ** 63 + np.array([10, 20], dtype="uint64")) - tm.assert_index_equal(inner, inner2) - tm.assert_index_equal(inner, expected) - - left = index_large.join(other, how="left") - tm.assert_index_equal(left, index_large.astype(object)) - - left2 = other.join(index_large, how="left") - tm.assert_index_equal(left2, other) - - right = index_large.join(other, how="right") - tm.assert_index_equal(right, other) - - right2 = other.join(index_large, how="right") - tm.assert_index_equal(right2, index_large.astype(object)) - - def test_join_outer(self, index_large): - other = UInt64Index(2 ** 63 + np.array([7, 12, 25, 1, 2, 10], dtype="uint64")) - other_mono = UInt64Index( - 2 ** 63 + np.array([1, 2, 7, 10, 12, 25], dtype="uint64") - ) - - # not monotonic - # guarantee of sortedness - res, lidx, ridx = index_large.join(other, how="outer", return_indexers=True) - noidx_res = index_large.join(other, how="outer") - tm.assert_index_equal(res, noidx_res) - - eres = UInt64Index( - 2 ** 63 + np.array([0, 1, 2, 7, 10, 12, 15, 20, 25], dtype="uint64") - ) - elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) - eridx = np.array([-1, 3, 4, 0, 5, 1, -1, -1, 2], dtype=np.intp) - - assert isinstance(res, UInt64Index) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - - # monotonic - res, lidx, ridx = index_large.join( - other_mono, how="outer", return_indexers=True - ) - noidx_res = index_large.join(other_mono, how="outer") - tm.assert_index_equal(res, noidx_res) - - elidx = np.array([0, -1, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp) - eridx = np.array([-1, 0, 1, 2, 3, 4, -1, -1, 5], dtype=np.intp) - - assert isinstance(res, UInt64Index) - tm.assert_index_equal(res, eres) - tm.assert_numpy_array_equal(lidx, elidx) - tm.assert_numpy_array_equal(ridx, eridx) - @pytest.mark.parametrize("dtype", ["int64", "uint64"]) def test_int_float_union_dtype(dtype): diff --git a/pandas/tests/indexes/timedeltas/test_join.py b/pandas/tests/indexes/timedeltas/test_join.py new file mode 100644 index 0000000000000..3e73ed35dae96 --- /dev/null +++ b/pandas/tests/indexes/timedeltas/test_join.py @@ -0,0 +1,37 @@ +import numpy as np + +from pandas import Index, Timedelta, timedelta_range +import pandas._testing as tm + + +class TestJoin: + def test_append_join_nondatetimeindex(self): + rng = timedelta_range("1 days", periods=10) + idx = Index(["a", "b", "c", "d"]) + + result = rng.append(idx) + assert isinstance(result[0], Timedelta) + + # it works + rng.join(idx, how="outer") + + def test_join_self(self, join_type): + index = timedelta_range("1 day", periods=10) + joined = index.join(index, how=join_type) + tm.assert_index_equal(index, joined) + + def test_does_not_convert_mixed_integer(self): + df = tm.makeCustomDataframe( + 10, + 10, + data_gen_f=lambda *args, **kwargs: np.random.randn(), + r_idx_type="i", + c_idx_type="td", + ) + str(df) + + cols = df.columns.join(df.index, how="outer") + joined = cols.join(df.columns) + assert cols.dtype == np.dtype("O") + assert cols.dtype == joined.dtype + tm.assert_index_equal(cols, joined) diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index 8a91c9d5e09c8..d4a94f8693081 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -91,27 +91,6 @@ def test_factorize(self): tm.assert_numpy_array_equal(arr, exp_arr) tm.assert_index_equal(idx, idx3) - def test_join_self(self, join_type): - index = timedelta_range("1 day", periods=10) - joined = index.join(index, how=join_type) - tm.assert_index_equal(index, joined) - - def test_does_not_convert_mixed_integer(self): - df = tm.makeCustomDataframe( - 10, - 10, - data_gen_f=lambda *args, **kwargs: randn(), - r_idx_type="i", - c_idx_type="td", - ) - str(df) - - cols = df.columns.join(df.index, how="outer") - joined = cols.join(df.columns) - assert cols.dtype == np.dtype("O") - assert cols.dtype == joined.dtype - tm.assert_index_equal(cols, joined) - def test_sort_values(self): idx = TimedeltaIndex(["4d", "1d", "2d"]) @@ -181,16 +160,6 @@ def test_hash_error(self): ): hash(index) - def test_append_join_nondatetimeindex(self): - rng = timedelta_range("1 days", periods=10) - idx = Index(["a", "b", "c", "d"]) - - result = rng.append(idx) - assert isinstance(result[0], Timedelta) - - # it works - rng.join(idx, how="outer") - def test_append_numpy_bug_1681(self): td = timedelta_range("1 days", "10 days", freq="2D")
we get a little bit of parametrization out of the deal, not much
https://api.github.com/repos/pandas-dev/pandas/pulls/32171
2020-02-22T02:54:41Z
2020-02-22T15:40:55Z
2020-02-22T15:40:55Z
2020-02-22T15:42:16Z
DOC: Move testing and pandas_development_faq pages from wiki to doc #30232 & #20501
diff --git a/doc/source/development/index.rst b/doc/source/development/index.rst index f8a6bb6deb52d..5b4fcc5e74e83 100644 --- a/doc/source/development/index.rst +++ b/doc/source/development/index.rst @@ -14,10 +14,12 @@ Development contributing code_style + testing maintaining internals extending developer policies roadmap + pandas_development_faq meeting diff --git a/doc/source/development/pandas_development_faq.rst b/doc/source/development/pandas_development_faq.rst new file mode 100644 index 0000000000000..4cd7acd139c1a --- /dev/null +++ b/doc/source/development/pandas_development_faq.rst @@ -0,0 +1,203 @@ +.. _pandas_development_faq: + +{{ header }} + +====================== +Pandas Development FAQ +====================== + +.. contents:: Table of contents: + :local: + +Purpose +======= + +Based on https://github.com/pydata/pandas/pull/4404#issuecomment-22864665 this +wiki page gathers oft-asked questions/comments from contributors to make the +contribution process a bit less painful. + +The aim is to make it easier for + +* Core developers to give advice & accept new code contributions. +* New contributors to find an easier way in for quick and efficient bug-fixes + or feature additions + +While some questions/comments/advice may be applicable to general programming, +these are things that directly relate to ``pandas`` development. + +* `**PR == pull request** <https://help.github.com/articles/using-pull-requests>`_ +* **core developer:** A person contributing on very high frequency & who is + familiar with the code base and development process of ``pandas``. +* **contributors:** The occasional contributor, maybe from a specific domain, + contributes bug fixes, features or documentation with low frequency, may not + be an every day programmer (e.g. programming scientists or engineer using + pandas for data processing) and looks at things from an end-user perspective. + +Pandas Development & Release Process +==================================== + +Testing +------- + +**Q:** What are some recommendations for writing unit tests? + +**A:** Your test should be self-contained. That is, it should test preferably a +single thing, e.g., a method that you've added to the ``DataFrame`` class. Your +test function/method should start with ``test_`` and the rest of the name should +be related to whatever functionality you're testing, like ``test_replace_with_dict_regex``. + +**Q:** Help! I can't get the tests to run! + +**A:** You probably either have multiple Python versions installed and there's +an ABI (application binary interface) issue or you forgot to build the extension +modules in place. The latter can be done with + +.. code-block:: shell + + python setup.py build_ext --inplace + +from the ``pandas`` directory. + +Travis +------ + +**Q:** Where do I need to change the settings in my GitHub configuration and/or +Travis configuration for the Travis to start builds from my fork? + +**A:** To be filled out. + +**Q:** Why do I need a Travis file in my repo if it's already in the head repository? + +**A:** Because we're not using subversion. Okay, seriously, it's because as far +as ``git`` is concerned *your* repository is the *only* one that exists. There's +really no such thing as a "head" repository in the eyes of ``git``, those are concepts +that we impose on it to make collaboration more effective and easier. This is one +of the nice aspects of `distributed version control <http://en.wikipedia.org/wiki/Distributed_revision_control>`_. + +Documentation +------------- + +**Q:** Does Travis build documentation? + +**A:** Currently, no. There are some issues surrounding Sphinx error reporting. +We are investigating ways to solve this problem. + +Workflow +-------- + +* What is a typical workflow on my local fork? +* Shall I work in a virtualenvironment? +* Shall I work in a virtualenvironment and then copy my changes over into a + clean local fork of my own repo? + +**Q:** Who will be responsible for evaluating my PR? + +**A:** Technically, anyone with push rights to the ``pydata/pandas`` can evaluate +it. In practice, there are a handful of people who are constantly watching the ``pandas`` +repo for new PRs, so most likely it'll be one of them that evaluates it. I'm not +going to list names, but it's not that hard to figure out... + +Criteria for PR +--------------- + +**Q:** What are the criteria for acceptance of a PR? + +**A:** First and foremost, your fix **must not break any existing functionality**, +one indicator of this is that your Travis build passes. Second, just give it some +time. Everyone is busy and @wesm has not (yet?) amassed a ``pandas`` development army. + +**Q:** Do I need to open an issue first? + +**A:** Not necessarily. If you want to submit a documentation change, e.g., a +typo fix, then opening an issue is not necessary. + +Coding Style +------------ + +**Q:** What level of commenting is accepted? + +**A:** The common sense level. Don't overdo it on the comments, and make sure +if you *do* comment that your comments explain *what* your code is doing, not +*how* it is doing it (that's what code is for). + +Obligatory example: + +BAD: + +.. code-block:: python + + # increment i + i += 1 + + +GOOD: + +.. code-block:: python + + # add a person to the person count + i += 1 + + +Debugging +--------- + +**Q:** How can I debug without adding loads of ``print`` statements/calls everywhere? + +**A:** You can use the Python standard library's ``pdb`` and set a breakpoint. +Put ``import pdb; pdb.set_trace()`` at the line where you want to stop. +`ipdb <https://github.com/gotcha/ipdb>`_ is ``pdb`` with tab-completion and a few other +bells and whistles, making debugging less painful. There's also `ipdbplugin <https://github.com/flavioamieiro/nose-ipdb>`_ which allows you to drop into ``ipdb`` from +`nose <https://github.com/nose-devs/nose>`_ when a test fails via + +.. code-block:: shell + + nosetests --ipdb # or --ipdb-failures + +**Q:** Would a logging hook be a solution? + +**A:** That's probably a bit overkill. See the suggestions above. + +Pandas Library +============== + +Source Comments +--------------- + +* It would be nice to add more source comments to quickly understand the context + when chiming in to fix an issue + +Testing +------- + +**Q:** Why don't test functions have a docstring? + +**A:** If your tests are self-contained and aren't `sprawling ecosystems of spaghetti <http://cdn.memegenerator.net/instances/250x250/26336623.jpg>`_ then having a docstring +is redundant. Also, the test name is usually (and should be!) very descriptive. +Remember there's no character limit for variable names. We're not using FORTRAN. + +**Q:** ``DataFrame`` and other ``pandas`` objects often many properties/methods. +What is the level of detail that I should consider when I'm writing my test(s)? + +**A:** See the previous question/answer. Strive to test one and only one thing. +You could even separate out your tests by their formal parameters if you want +things to be *really* self-contained. + +**Q:** Should I consider possible corner cases of my implementation? + +**A:** The answer is a resounding **YES**! In some cases you may come across +something that is very pathological. In those cases you should ask a core developer. + +Complexity +---------- + +* Some modules (e.g. io/parsers.py) seem to have grown into very high complexity. + It is very time consuming to find out what is done where just for fixing a small bug. +* a splitting into several modules would be good +* more in-code comments telling why something is done and under which condition and + for what expected result. + + +Docstrings +---------- + +* even internal functions shall have a simple 1-line docstring diff --git a/doc/source/development/testing.rst b/doc/source/development/testing.rst new file mode 100644 index 0000000000000..269638a062371 --- /dev/null +++ b/doc/source/development/testing.rst @@ -0,0 +1,246 @@ +.. _testing: + +{{ header }} + +======= +Testing +======= + +.. contents:: Table of contents: + :local: + +First off - thank you for writing test cases - they're really important in +developing pandas! + +Typical Imports +=============== + +.. code-block:: python + + import nose + import unittest + import pandas.util.testing as tm + from pandas.util.testing import makeCustomDataframe as mkdf + +Making your tests behave well +============================= + +``pandas`` committers run test cases after every change (as does Travis), so it's +important that you make your tests well-behaved. Balancing that, it's important +that your test cases cover the functionality of your addition, so that when +others make changes, they can be confident that they aren't introducing errors +in your code. This includes: + +1. marking network-using test cases with ``@network`` (see below). +2. marking slow tests with ``@slow`` +3. using smaller test cases where it makes sense (for example, if you're + testing a ``numexpr`` evaluation, you can generally just set ``expr._MIN_ELEMENTS = 0`` + and go ahead, rather than needing to test on a frame of at least 10K + elements). +4. making sure to skip tests (or even test files) if a required import is not + available. + +In addition, stylistically, the preference is to group multiple related tests +under one test function and *not* to use the generator functionality of nose +in order to keep the actual # of tests small. + +E.g.: + +.. code-block:: python + + @slow + def test_million_element_arithmetic(): + df = mkdf(100000, 100000) + tm.assert_frame_equal(df.mod(df) * df * 0, df * 0) + +Additional imports +------------------ + +When creating a subclass of ``unittest.TestCase`` there are useful instance +methods such as ``self.assertEqual(a, b)`` that allow you to test the equality +of two objects. These are not available *as functions* in the Python standard +library. However, these methods are available as functions in the ``nose.tools`` +module. To use ``self.assertEqual(a, b)`` in a function you would put +``from nose.tools import assert_equal`` somewhere in the file and then call it +wherever you need it. + +**Important**: make sure to document failure conditions (and use the +``assertRaisesRegexp`` where necessary to make it clearer *which* exception +you want to get). Testing for ``Exception`` is strongly discouraged. + +Testing using a File +==================== + +This context manager allows safe read/write access to a temporary file, with +a generated filename (or your filename if provided). The file will be +automatically deleted when the context block is exited. + +.. code-block:: python + + with tm.ensure_clean('my_file_path') as path: + # do something with the path + +Testing for Exceptions +====================== + +Generally, it's not acceptable to just check that something raises ``Exception``, +because that tends to mask a lot of errors. For example, if a function's signature +changes between releases, you could be catching the wrong kind of error altogether. +Going forward, the goal is to have no test cases that pass if ``Exception`` or a +subclass is raised (we're not quite there yet). + +Another element that is helpful is to use ``assertRaisesRegexp`` from ``pandas.util.testing``. +It lets you be very explicit with what you expect (and prevents hiding errors like +changing signatures, etc.) + +.. code-block:: python + + with tm.assertRaises(ValueError): + raise ValueError("an error") + with tm.assertRaisesRegexp(TypeError, 'invalid literal'): + int('abc') + +Handling tests requiring network connectivity +============================================= + +**Please run your tests without an internet connection before submitting a PR!** (it's really important that your tests *not* fail when you have no internet connection (i.e., they should skip with out a network connection). In general, network tests are finicky. All tests that involve networking *must* be marked as "network", either by using the ``network`` decorator or the ``with_connectivity_check`` decorator from ``pandas.util.testing``.Unless you *absolutely* need to test that a function/method correctly handles connectivity errors, you should use the ``network`` decorator, which will catch all ``IOError`` s (which includes ``URLError``). If you believe that your test case will only fail if you simply aren't connected to the internet, you can use the ``with_connectivity_test`` to check: + +.. code-block:: python + + >>> @with_connectivity_check + ... def test_my_function(): + ... urllib2.urlopen("funny://rabbithead") + >>> test_my_function() + Traceback (most recent call last) + ... + URLError...#some message + +If you want to have the decorator always raise errors, just pass ``raise_on_error=True`` +to the ``network`` decorator: + +.. code-block:: python + + >>> @network(raise_on_error=True) + ... def test2(): + ... raise URLError("WRONG!") + Traceback (most recent call last) + ... + URLError: WRONG! + +The ``with_connectivity_check`` decorator defaults to checking ``http://www.google.com`` +to determine whether it is connected. But if you had a test that depends on yahoo, +it might make sense to check yahoo instead: + +.. code-block:: python + + @with_connectivity_check("http://www.yahoo.com") + def some_test_with_yahoo(): + # do something etc. + +It's a good idea to break up network tests into at least two parts: + +1. Tests that check that the code works and gracefully handles errors. +2. Tests that really only matter if you have network connectivity (like making + sure that the current Google Analytics feed is being processed properly). + +For (1), you might want to use ``@network(raise_on_error=True)``, because those +tests should *not* fail without connectivity. + +For (2), you should definitely suppress network errors, and, particularly if you +have a slow test, you may even want to check for connectivity *first* (so the +test never even runs if there isn't a network connection). You can do that easily +by passing ``check_before_test=True`` to ``with_connectivity_check``: + +.. code-block:: python + + @with_connectivity_check("http://www.somespecificsite.com", check_before_test=True) + def some_test(): + for i in range(1000): + test_some_really_long_function(i) + +Testing for Warnings +==================== + +To test for warnings, you can use the ``assert_produces_warning`` contextmanager, +which checks that your code produces a warning. + +Probably the most common case is just a test case for a DeprecationWarning: + +.. code-block:: python + + >>> with assert_produces_warning(DeprecationWarning): + ... some_function_that_raises_deprecation_warning() + +With no arguments, it checks that any warning is raised. + +.. code-block:: python + + >>> import warnings + >>> with assert_produces_warning(): + ... warnings.warn(UserWarning()) + ... + +When passed False, it checks that *no* warnings are raised. + +.. code-block:: python + + >>> with assert_produces_warning(False): + ... warnings.warn(RuntimeWarning()) + ... + Traceback (most recent call last): + ... + AssertionError: Caused unexpected warning(s): ['RuntimeWarning']. + +Finally, if you pass it a warning class, it will check that the *specific* +class of warning was raised and no other. + +.. code-block:: python + + >>> with assert_produces_warning(UserWarning): + ... warnings.warn(RuntimeWarning()) + Traceback (most recent call last): + ... + AssertionError: Did not see expected warning of class 'UserWarning'. + +Reading from either a URL or zip file +===================================== + +Reading from a url +------------------ + +.. code-block:: python + + from pandas.io.common import urlopen + with urlopen('http://www.google.com') as url: + raw_text = url.read() + + +Reading a file named ``file.txt`` that's inside of a zip file named ``file.zip`` +-------------------------------------------------------------------------------- + +.. code-block:: python + + from pandas.io.common import ZipFile + with ZipFile('file.zip') as zf: + raw_text = zf.read('file.txt') + +Hook up travis-ci +================= + +We use travis for testings the entire library across various python versions. +If you [hook up your fork](http://about.travis-ci.org/docs/user/getting-started/) +to run travis, then it is displayed prominently whether your pull request passes +or fails the testing suite. This is incredibly helpful. + +If it shows that it passes, great! We can consider merging. If there's a failure, +this let's you and us know there is something wrong, and needs some attention +before it can be considered for merging. + +Sometimes Travis will say a change failed for reasons unrelated to your pull +request. For example there could be a build error or network error. To get Travis +to retest your pull request, do the following: + +.. code-block:: shell + + git commit --amend -C HEAD + git push origin <yourbranch> -f`
- [ ] closes #20501 - [ ] tests added / passed - [ ] passes `black pandas` - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/32170
2020-02-22T01:56:57Z
2020-02-22T02:30:55Z
null
2020-02-22T02:30:55Z
BUG: disallow invalid dtype to CategoricalDtype._from_values_or_dtype
diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index d93ad973ff02d..0730de934b56c 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -324,6 +324,8 @@ def _from_values_or_dtype( raise ValueError( "Cannot specify `categories` or `ordered` together with `dtype`." ) + elif not isinstance(dtype, CategoricalDtype): + raise ValueError(f"Cannot not construct CategoricalDtype from {dtype}") elif is_categorical(values): # If no "dtype" was passed, use the one from "values", but honor # the "ordered" and "categories" arguments diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index dd99b81fb6764..9eb5fda87d2d2 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -127,6 +127,11 @@ def test_from_values_or_dtype_raises(self, values, categories, ordered, dtype): with pytest.raises(ValueError, match=msg): CategoricalDtype._from_values_or_dtype(values, categories, ordered, dtype) + def test_from_values_or_dtype_invalid_dtype(self): + msg = "Cannot not construct CategoricalDtype from <class 'object'>" + with pytest.raises(ValueError, match=msg): + CategoricalDtype._from_values_or_dtype(None, None, None, object) + def test_is_dtype(self, dtype): assert CategoricalDtype.is_dtype(dtype) assert CategoricalDtype.is_dtype("category") diff --git a/pandas/tests/indexes/common.py b/pandas/tests/indexes/common.py index 2073aa0727809..c9e762af3a303 100644 --- a/pandas/tests/indexes/common.py +++ b/pandas/tests/indexes/common.py @@ -605,7 +605,8 @@ def test_equals(self, indices): assert not indices.equals(np.array(indices)) # Cannot pass in non-int64 dtype to RangeIndex - if not isinstance(indices, RangeIndex): + if not isinstance(indices, (RangeIndex, CategoricalIndex)): + # TODO: CategoricalIndex can be re-allowed following GH#32167 same_values = Index(indices, dtype=object) assert indices.equals(same_values) assert same_values.equals(indices)
- [ ] closes #xxxx - [x] tests added / passed - [x] passes `black pandas` - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry xref #32167. Together with that, this will allow us to simplify the CategoricalIndex constructor.
https://api.github.com/repos/pandas-dev/pandas/pulls/32169
2020-02-22T00:34:46Z
2020-02-22T16:31:17Z
2020-02-22T16:31:17Z
2020-02-22T16:33:38Z
REF: de-duplicate object-dtype handling
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 14ee21ea5614c..e102e4ae14d86 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -311,14 +311,7 @@ def __new__( # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 from pandas.core.indexes.interval import IntervalIndex - closed = kwargs.pop("closed", None) - if is_dtype_equal(_o_dtype, dtype): - return IntervalIndex( - data, name=name, copy=copy, closed=closed, **kwargs - ).astype(object) - return IntervalIndex( - data, dtype=dtype, name=name, copy=copy, closed=closed, **kwargs - ) + return _maybe_asobject(dtype, IntervalIndex, data, copy, name, **kwargs) elif ( is_datetime64_any_dtype(data) @@ -328,39 +321,19 @@ def __new__( # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 from pandas import DatetimeIndex - if is_dtype_equal(_o_dtype, dtype): - # GH#23524 passing `dtype=object` to DatetimeIndex is invalid, - # will raise in the where `data` is already tz-aware. So - # we leave it out of this step and cast to object-dtype after - # the DatetimeIndex construction. - # Note we can pass copy=False because the .astype below - # will always make a copy - return DatetimeIndex(data, copy=False, name=name, **kwargs).astype( - object - ) - else: - return DatetimeIndex(data, copy=copy, name=name, dtype=dtype, **kwargs) + return _maybe_asobject(dtype, DatetimeIndex, data, copy, name, **kwargs) elif is_timedelta64_dtype(data) or is_timedelta64_dtype(dtype): # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 from pandas import TimedeltaIndex - if is_dtype_equal(_o_dtype, dtype): - # Note we can pass copy=False because the .astype below - # will always make a copy - return TimedeltaIndex(data, copy=False, name=name, **kwargs).astype( - object - ) - else: - return TimedeltaIndex(data, copy=copy, name=name, dtype=dtype, **kwargs) + return _maybe_asobject(dtype, TimedeltaIndex, data, copy, name, **kwargs) elif is_period_dtype(data) or is_period_dtype(dtype): # Delay import for perf. https://github.com/pandas-dev/pandas/pull/31423 from pandas import PeriodIndex - if is_dtype_equal(_o_dtype, dtype): - return PeriodIndex(data, copy=False, name=name, **kwargs).astype(object) - return PeriodIndex(data, dtype=dtype, copy=copy, name=name, **kwargs) + return _maybe_asobject(dtype, PeriodIndex, data, copy, name, **kwargs) # extension dtype elif is_extension_array_dtype(data) or is_extension_array_dtype(dtype): @@ -5765,3 +5738,40 @@ def _try_convert_to_int_array( pass raise ValueError + + +def _maybe_asobject(dtype, klass, data, copy: bool, name: Label, **kwargs): + """ + If an object dtype was specified, create the non-object Index + and then convert it to object. + + Parameters + ---------- + dtype : np.dtype, ExtensionDtype, str + klass : Index subclass + data : list-like + copy : bool + name : hashable + **kwargs + + Returns + ------- + Index + + Notes + ----- + We assume that calling .astype(object) on this klass will make a copy. + """ + + # GH#23524 passing `dtype=object` to DatetimeIndex is invalid, + # will raise in the where `data` is already tz-aware. So + # we leave it out of this step and cast to object-dtype after + # the DatetimeIndex construction. + + if is_dtype_equal(_o_dtype, dtype): + # Note we can pass copy=False because the .astype below + # will always make a copy + index = klass(data, copy=False, name=name, **kwargs) + return index.astype(object) + + return klass(data, dtype=dtype, copy=copy, name=name, **kwargs)
We'll also use this for the categorical case following #32167.
https://api.github.com/repos/pandas-dev/pandas/pulls/32168
2020-02-22T00:26:58Z
2020-02-22T16:31:39Z
2020-02-22T16:31:39Z
2020-02-22T16:32:24Z