title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
Check import warning
diff --git a/ci/code_checks.sh b/ci/code_checks.sh index a8a86eedb0549..953547f72d3e1 100755 --- a/ci/code_checks.sh +++ b/ci/code_checks.sh @@ -153,8 +153,8 @@ fi ### CODE ### if [[ -z "$CHECK" || "$CHECK" == "code" ]]; then - MSG='Check for modules that pandas should not import' ; echo $MSG - python -c " + MSG='Check import. No warnings, and blacklist some optional dependencies' ; echo $MSG + python -W error -c " import sys import pandas diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 5108e23c53b5a..f9c659106a516 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -139,6 +139,7 @@ def lfilter(*args, **kwargs): Hashable = collections.abc.Hashable Iterable = collections.abc.Iterable Mapping = collections.abc.Mapping + MutableMapping = collections.abc.MutableMapping Sequence = collections.abc.Sequence Sized = collections.abc.Sized Set = collections.abc.Set @@ -200,6 +201,7 @@ def get_range_parameters(data): Hashable = collections.Hashable Iterable = collections.Iterable Mapping = collections.Mapping + MutableMapping = collections.MutableMapping Sequence = collections.Sequence Sized = collections.Sized Set = collections.Set diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index 65836140146bd..86bb4e4b94382 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -1,4 +1,3 @@ -from collections import MutableMapping from datetime import datetime, time from functools import partial @@ -18,6 +17,7 @@ from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.missing import notna +from pandas import compat from pandas.core import algorithms @@ -570,7 +570,7 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False, from pandas import Series values = convert_listlike(arg._values, True, format) result = Series(values, index=arg.index, name=arg.name) - elif isinstance(arg, (ABCDataFrame, MutableMapping)): + elif isinstance(arg, (ABCDataFrame, compat.MutableMapping)): result = _assemble_from_unit_mappings(arg, errors=errors) elif isinstance(arg, ABCIndexClass): cache_array = _maybe_cache(arg, format, cache, convert_listlike)
We usually catch uses of deprecated modules, but I think this may have slipped through since it's at import time.
https://api.github.com/repos/pandas-dev/pandas/pulls/24102
2018-12-04T22:07:01Z
2018-12-05T12:15:23Z
2018-12-05T12:15:23Z
2018-12-05T12:15:34Z
REF: Separate out DataFrame/Series Construction Helpers
diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index ac1c34edba914..938ca53b5fdce 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -380,12 +380,12 @@ def __init__(self, values, categories=None, ordered=None, dtype=None, dtype = CategoricalDtype(values.categories, dtype.ordered) elif not isinstance(values, (ABCIndexClass, ABCSeries)): - # _sanitize_array coerces np.nan to a string under certain versions + # sanitize_array coerces np.nan to a string under certain versions # of numpy values = maybe_infer_to_datetimelike(values, convert_dates=True) if not isinstance(values, np.ndarray): values = _convert_to_list_like(values) - from pandas.core.series import _sanitize_array + from pandas.core.internals.construction import sanitize_array # By convention, empty lists result in object dtype: if len(values) == 0: sanitize_dtype = 'object' @@ -394,7 +394,7 @@ def __init__(self, values, categories=None, ordered=None, dtype=None, null_mask = isna(values) if null_mask.any(): values = [values[idx] for idx in np.where(~null_mask)[0]] - values = _sanitize_array(values, None, dtype=sanitize_dtype) + values = sanitize_array(values, None, dtype=sanitize_dtype) if dtype.categories is None: try: @@ -2442,12 +2442,12 @@ def isin(self, values): >>> s.isin(['lama']) array([ True, False, True, False, True, False]) """ - from pandas.core.series import _sanitize_array + from pandas.core.internals.construction import sanitize_array if not is_list_like(values): raise TypeError("only list-like objects are allowed to be passed" " to isin(), you passed a [{values_type}]" .format(values_type=type(values).__name__)) - values = _sanitize_array(values, None, None) + values = sanitize_array(values, None, None) null_mask = np.asarray(isna(values)) code_values = self.categories.get_indexer(values) code_values = code_values[null_mask | (code_values >= 0)] diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py index 96724b6c4b362..134466d769ada 100644 --- a/pandas/core/arrays/sparse.py +++ b/pandas/core/arrays/sparse.py @@ -631,8 +631,8 @@ def __init__(self, data, sparse_index=None, index=None, fill_value=None, if not is_array_like(data): try: # probably shared code in sanitize_series - from pandas.core.series import _sanitize_array - data = _sanitize_array(data, index=None) + from pandas.core.internals.construction import sanitize_array + data = sanitize_array(data, index=None) except ValueError: # NumPy may raise a ValueError on data like [1, []] # we retry with object dtype here. diff --git a/pandas/core/frame.py b/pandas/core/frame.py index b9f32042924b9..2c1fa5ef4439e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -31,7 +31,7 @@ validate_axis_style_args) from pandas import compat -from pandas.compat import (range, map, zip, lrange, lmap, lzip, StringIO, u, +from pandas.compat import (range, map, zip, lmap, lzip, StringIO, u, OrderedDict, PY36, raise_with_traceback, string_and_binary_types) from pandas.compat.numpy import function as nv @@ -39,7 +39,6 @@ from pandas.core.dtypes.cast import ( maybe_upcast, cast_scalar_to_array, - construct_1d_arraylike_from_scalar, infer_dtype_from_scalar, maybe_cast_to_datetime, maybe_infer_to_datetimelike, @@ -50,11 +49,9 @@ maybe_upcast_putmask, find_common_type) from pandas.core.dtypes.common import ( - is_categorical_dtype, is_object_dtype, is_extension_type, is_extension_array_dtype, - is_datetime64tz_dtype, is_datetime64_any_dtype, is_bool_dtype, is_integer_dtype, @@ -88,12 +85,13 @@ from pandas.core.indexes import base as ibase from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.period import PeriodIndex -from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.indexing import (maybe_droplevels, convert_to_index_sliceable, check_bool_indexer) -from pandas.core.internals import (BlockManager, - create_block_manager_from_arrays, - create_block_manager_from_blocks) +from pandas.core.internals import BlockManager +from pandas.core.internals.construction import ( + masked_rec_array_to_mgr, get_names_from_index, to_arrays, + reorder_arrays, init_ndarray, init_dict, + arrays_to_mgr, sanitize_index) from pandas.core.series import Series from pandas.io.formats import console @@ -386,13 +384,13 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, mgr = self._init_mgr(data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy) elif isinstance(data, dict): - mgr = self._init_dict(data, index, columns, dtype=dtype) + mgr = init_dict(data, index, columns, dtype=dtype) elif isinstance(data, ma.MaskedArray): import numpy.ma.mrecords as mrecords # masked recarray if isinstance(data, mrecords.MaskedRecords): - mgr = _masked_rec_array_to_mgr(data, index, columns, dtype, - copy) + mgr = masked_rec_array_to_mgr(data, index, columns, dtype, + copy) # a masked array else: @@ -402,8 +400,8 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, data[mask] = fill_value else: data = data.copy() - mgr = self._init_ndarray(data, index, columns, dtype=dtype, - copy=copy) + mgr = init_ndarray(data, index, columns, dtype=dtype, + copy=copy) elif isinstance(data, (np.ndarray, Series, Index)): if data.dtype.names: @@ -411,13 +409,13 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, data = {k: data[k] for k in data_columns} if columns is None: columns = data_columns - mgr = self._init_dict(data, index, columns, dtype=dtype) + mgr = init_dict(data, index, columns, dtype=dtype) elif getattr(data, 'name', None) is not None: - mgr = self._init_dict({data.name: data}, index, columns, - dtype=dtype) + mgr = init_dict({data.name: data}, index, columns, + dtype=dtype) else: - mgr = self._init_ndarray(data, index, columns, dtype=dtype, - copy=copy) + mgr = init_ndarray(data, index, columns, dtype=dtype, + copy=copy) # For data is list-like, or Iterable (will consume into list) elif (isinstance(data, compat.Iterable) @@ -428,25 +426,25 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1: if is_named_tuple(data[0]) and columns is None: columns = data[0]._fields - arrays, columns = _to_arrays(data, columns, dtype=dtype) + arrays, columns = to_arrays(data, columns, dtype=dtype) columns = ensure_index(columns) # set the index if index is None: if isinstance(data[0], Series): - index = _get_names_from_index(data) + index = get_names_from_index(data) elif isinstance(data[0], Categorical): index = ibase.default_index(len(data[0])) else: index = ibase.default_index(len(data)) - mgr = _arrays_to_mgr(arrays, columns, index, columns, - dtype=dtype) + mgr = arrays_to_mgr(arrays, columns, index, columns, + dtype=dtype) else: - mgr = self._init_ndarray(data, index, columns, dtype=dtype, - copy=copy) + mgr = init_ndarray(data, index, columns, dtype=dtype, + copy=copy) else: - mgr = self._init_dict({}, index, columns, dtype=dtype) + mgr = init_dict({}, index, columns, dtype=dtype) else: try: arr = np.array(data, dtype=dtype, copy=copy) @@ -458,8 +456,8 @@ def __init__(self, data=None, index=None, columns=None, dtype=None, if arr.ndim == 0 and index is not None and columns is not None: values = cast_scalar_to_array((len(index), len(columns)), data, dtype=dtype) - mgr = self._init_ndarray(values, index, columns, - dtype=values.dtype, copy=False) + mgr = init_ndarray(values, index, columns, + dtype=values.dtype, copy=False) else: raise ValueError('DataFrame constructor not properly called!') @@ -470,113 +468,13 @@ def _init_dict(self, data, index, columns, dtype=None): Segregate Series based on type and coerce into matrices. Needs to handle a lot of exceptional cases. """ - if columns is not None: - arrays = Series(data, index=columns, dtype=object) - data_names = arrays.index - - missing = arrays.isnull() - if index is None: - # GH10856 - # raise ValueError if only scalars in dict - index = extract_index(arrays[~missing]) - else: - index = ensure_index(index) - - # no obvious "empty" int column - if missing.any() and not is_integer_dtype(dtype): - if dtype is None or np.issubdtype(dtype, np.flexible): - # 1783 - nan_dtype = object - else: - nan_dtype = dtype - v = construct_1d_arraylike_from_scalar(np.nan, len(index), - nan_dtype) - arrays.loc[missing] = [v] * missing.sum() - - else: - keys = com.dict_keys_to_ordered_list(data) - columns = data_names = Index(keys) - arrays = [data[k] for k in keys] - - return _arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype) + return init_dict(data, index, columns, dtype=dtype) + # TODO: Can we get rid of this as a method? def _init_ndarray(self, values, index, columns, dtype=None, copy=False): # input must be a ndarray, list, Series, index - - if isinstance(values, Series): - if columns is None: - if values.name is not None: - columns = [values.name] - if index is None: - index = values.index - else: - values = values.reindex(index) - - # zero len case (GH #2234) - if not len(values) and columns is not None and len(columns): - values = np.empty((0, 1), dtype=object) - - # helper to create the axes as indexes - def _get_axes(N, K, index=index, columns=columns): - # return axes or defaults - - if index is None: - index = ibase.default_index(N) - else: - index = ensure_index(index) - - if columns is None: - columns = ibase.default_index(K) - else: - columns = ensure_index(columns) - return index, columns - - # we could have a categorical type passed or coerced to 'category' - # recast this to an _arrays_to_mgr - if (is_categorical_dtype(getattr(values, 'dtype', None)) or - is_categorical_dtype(dtype)): - - if not hasattr(values, 'dtype'): - values = _prep_ndarray(values, copy=copy) - values = values.ravel() - elif copy: - values = values.copy() - - index, columns = _get_axes(len(values), 1) - return _arrays_to_mgr([values], columns, index, columns, - dtype=dtype) - elif (is_datetime64tz_dtype(values) or - is_extension_array_dtype(values)): - # GH19157 - if columns is None: - columns = [0] - return _arrays_to_mgr([values], columns, index, columns, - dtype=dtype) - - # by definition an array here - # the dtypes will be coerced to a single dtype - values = _prep_ndarray(values, copy=copy) - - if dtype is not None: - if not is_dtype_equal(values.dtype, dtype): - try: - values = values.astype(dtype) - except Exception as orig: - e = ValueError("failed to cast to '{dtype}' (Exception " - "was: {orig})".format(dtype=dtype, - orig=orig)) - raise_with_traceback(e) - - index, columns = _get_axes(*values.shape) - values = values.T - - # if we don't have a dtype specified, then try to convert objects - # on the entire block; this is to convert if we have datetimelike's - # embedded in an object type - if dtype is None and is_object_dtype(values): - values = maybe_infer_to_datetimelike(values) - - return create_block_manager_from_blocks([values], [columns, index]) + return init_ndarray(values, index, columns, dtype=dtype, copy=copy) + # TODO: can we just get rid of this as a method? # ---------------------------------------------------------------------- @@ -1535,17 +1433,17 @@ def from_records(cls, data, index=None, exclude=None, columns=None, arr_columns.append(k) arrays.append(v) - arrays, arr_columns = _reorder_arrays(arrays, arr_columns, - columns) + arrays, arr_columns = reorder_arrays(arrays, arr_columns, + columns) elif isinstance(data, (np.ndarray, DataFrame)): - arrays, columns = _to_arrays(data, columns) + arrays, columns = to_arrays(data, columns) if columns is not None: columns = ensure_index(columns) arr_columns = columns else: - arrays, arr_columns = _to_arrays(data, columns, - coerce_float=coerce_float) + arrays, arr_columns = to_arrays(data, columns, + coerce_float=coerce_float) arr_columns = ensure_index(arr_columns) if columns is not None: @@ -1587,7 +1485,7 @@ def from_records(cls, data, index=None, exclude=None, columns=None, arr_columns = arr_columns.drop(arr_exclude) columns = columns.drop(exclude) - mgr = _arrays_to_mgr(arrays, arr_columns, result_index, columns) + mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns) return cls(mgr) @@ -1780,7 +1678,7 @@ def from_items(cls, items, columns=None, orient='columns'): @classmethod def _from_arrays(cls, arrays, columns, index, dtype=None): - mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) + mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype) return cls(mgr) @classmethod @@ -3559,17 +3457,15 @@ def reindexer(value): value = reindexer(value).T elif isinstance(value, ExtensionArray): - from pandas.core.series import _sanitize_index - # Explicitly copy here, instead of in _sanitize_index, + # Explicitly copy here, instead of in sanitize_index, # as sanitize_index won't copy an EA, even with copy=True value = value.copy() - value = _sanitize_index(value, self.index, copy=False) + value = sanitize_index(value, self.index, copy=False) elif isinstance(value, Index) or is_sequence(value): - from pandas.core.series import _sanitize_index # turn me into an ndarray - value = _sanitize_index(value, self.index, copy=False) + value = sanitize_index(value, self.index, copy=False) if not isinstance(value, (np.ndarray, Index)): if isinstance(value, list) and len(value) > 0: value = maybe_convert_platform(value) @@ -7841,341 +7737,6 @@ def isin(self, values): ops.add_special_arithmetic_methods(DataFrame) -def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): - """ - Segregate Series based on type and coerce into matrices. - - Needs to handle a lot of exceptional cases. - """ - # figure out the index, if necessary - if index is None: - index = extract_index(arrays) - else: - index = ensure_index(index) - - # don't force copy because getting jammed in an ndarray anyway - arrays = _homogenize(arrays, index, dtype) - - # from BlockManager perspective - axes = [ensure_index(columns), index] - - return create_block_manager_from_arrays(arrays, arr_names, axes) - - -def extract_index(data): - from pandas.core.index import _union_indexes - - index = None - if len(data) == 0: - index = Index([]) - elif len(data) > 0: - raw_lengths = [] - indexes = [] - - have_raw_arrays = False - have_series = False - have_dicts = False - - for v in data: - if isinstance(v, Series): - have_series = True - indexes.append(v.index) - elif isinstance(v, dict): - have_dicts = True - indexes.append(list(v.keys())) - elif is_list_like(v) and getattr(v, 'ndim', 1) == 1: - have_raw_arrays = True - raw_lengths.append(len(v)) - - if not indexes and not raw_lengths: - raise ValueError('If using all scalar values, you must pass' - ' an index') - - if have_series or have_dicts: - index = _union_indexes(indexes) - - if have_raw_arrays: - lengths = list(set(raw_lengths)) - if len(lengths) > 1: - raise ValueError('arrays must all be same length') - - if have_dicts: - raise ValueError('Mixing dicts with non-Series may lead to ' - 'ambiguous ordering.') - - if have_series: - if lengths[0] != len(index): - msg = ('array length %d does not match index length %d' % - (lengths[0], len(index))) - raise ValueError(msg) - else: - index = ibase.default_index(lengths[0]) - - return ensure_index(index) - - -def _prep_ndarray(values, copy=True): - if not isinstance(values, (np.ndarray, Series, Index)): - if len(values) == 0: - return np.empty((0, 0), dtype=object) - - def convert(v): - return maybe_convert_platform(v) - - # we could have a 1-dim or 2-dim list here - # this is equiv of np.asarray, but does object conversion - # and platform dtype preservation - try: - if is_list_like(values[0]) or hasattr(values[0], 'len'): - values = np.array([convert(v) for v in values]) - elif isinstance(values[0], np.ndarray) and values[0].ndim == 0: - # GH#21861 - values = np.array([convert(v) for v in values]) - else: - values = convert(values) - except (ValueError, TypeError): - values = convert(values) - - else: - - # drop subclass info, do not copy data - values = np.asarray(values) - if copy: - values = values.copy() - - if values.ndim == 1: - values = values.reshape((values.shape[0], 1)) - elif values.ndim != 2: - raise ValueError('Must pass 2-d input') - - return values - - -def _to_arrays(data, columns, coerce_float=False, dtype=None): - """ - Return list of arrays, columns. - """ - if isinstance(data, DataFrame): - if columns is not None: - arrays = [data._ixs(i, axis=1).values - for i, col in enumerate(data.columns) if col in columns] - else: - columns = data.columns - arrays = [data._ixs(i, axis=1).values for i in range(len(columns))] - - return arrays, columns - - if not len(data): - if isinstance(data, np.ndarray): - columns = data.dtype.names - if columns is not None: - return [[]] * len(columns), columns - return [], [] # columns if columns is not None else [] - if isinstance(data[0], (list, tuple)): - return _list_to_arrays(data, columns, coerce_float=coerce_float, - dtype=dtype) - elif isinstance(data[0], compat.Mapping): - return _list_of_dict_to_arrays(data, columns, - coerce_float=coerce_float, dtype=dtype) - elif isinstance(data[0], Series): - return _list_of_series_to_arrays(data, columns, - coerce_float=coerce_float, - dtype=dtype) - elif isinstance(data[0], Categorical): - if columns is None: - columns = ibase.default_index(len(data)) - return data, columns - elif (isinstance(data, (np.ndarray, Series, Index)) and - data.dtype.names is not None): - - columns = list(data.dtype.names) - arrays = [data[k] for k in columns] - return arrays, columns - else: - # last ditch effort - data = lmap(tuple, data) - return _list_to_arrays(data, columns, coerce_float=coerce_float, - dtype=dtype) - - -def _masked_rec_array_to_mgr(data, index, columns, dtype, copy): - """ - Extract from a masked rec array and create the manager. - """ - - # essentially process a record array then fill it - fill_value = data.fill_value - fdata = ma.getdata(data) - if index is None: - index = _get_names_from_index(fdata) - if index is None: - index = ibase.default_index(len(data)) - index = ensure_index(index) - - if columns is not None: - columns = ensure_index(columns) - arrays, arr_columns = _to_arrays(fdata, columns) - - # fill if needed - new_arrays = [] - for fv, arr, col in zip(fill_value, arrays, arr_columns): - mask = ma.getmaskarray(data[col]) - if mask.any(): - arr, fv = maybe_upcast(arr, fill_value=fv, copy=True) - arr[mask] = fv - new_arrays.append(arr) - - # create the manager - arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns) - if columns is None: - columns = arr_columns - - mgr = _arrays_to_mgr(arrays, arr_columns, index, columns) - - if copy: - mgr = mgr.copy() - return mgr - - -def _reorder_arrays(arrays, arr_columns, columns): - # reorder according to the columns - if (columns is not None and len(columns) and arr_columns is not None and - len(arr_columns)): - indexer = ensure_index(arr_columns).get_indexer(columns) - arr_columns = ensure_index([arr_columns[i] for i in indexer]) - arrays = [arrays[i] for i in indexer] - return arrays, arr_columns - - -def _list_to_arrays(data, columns, coerce_float=False, dtype=None): - if len(data) > 0 and isinstance(data[0], tuple): - content = list(lib.to_object_array_tuples(data).T) - else: - # list of lists - content = list(lib.to_object_array(data).T) - return _convert_object_array(content, columns, dtype=dtype, - coerce_float=coerce_float) - - -def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): - from pandas.core.index import _get_objs_combined_axis - - if columns is None: - columns = _get_objs_combined_axis(data, sort=False) - - indexer_cache = {} - - aligned_values = [] - for s in data: - index = getattr(s, 'index', None) - if index is None: - index = ibase.default_index(len(s)) - - if id(index) in indexer_cache: - indexer = indexer_cache[id(index)] - else: - indexer = indexer_cache[id(index)] = index.get_indexer(columns) - - values = com.values_from_object(s) - aligned_values.append(algorithms.take_1d(values, indexer)) - - values = np.vstack(aligned_values) - - if values.dtype == np.object_: - content = list(values.T) - return _convert_object_array(content, columns, dtype=dtype, - coerce_float=coerce_float) - else: - return values.T, columns - - -def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): - if columns is None: - gen = (list(x.keys()) for x in data) - sort = not any(isinstance(d, OrderedDict) for d in data) - columns = lib.fast_unique_multiple_list_gen(gen, sort=sort) - - # assure that they are of the base dict class and not of derived - # classes - data = [(type(d) is dict) and d or dict(d) for d in data] - - content = list(lib.dicts_to_array(data, list(columns)).T) - return _convert_object_array(content, columns, dtype=dtype, - coerce_float=coerce_float) - - -def _convert_object_array(content, columns, coerce_float=False, dtype=None): - if columns is None: - columns = ibase.default_index(len(content)) - else: - if len(columns) != len(content): # pragma: no cover - # caller's responsibility to check for this... - raise AssertionError('{col:d} columns passed, passed data had ' - '{con} columns'.format(col=len(columns), - con=len(content))) - - # provide soft conversion of object dtypes - def convert(arr): - if dtype != object and dtype != np.object: - arr = lib.maybe_convert_objects(arr, try_float=coerce_float) - arr = maybe_cast_to_datetime(arr, dtype) - return arr - - arrays = [convert(arr) for arr in content] - - return arrays, columns - - -def _get_names_from_index(data): - has_some_name = any(getattr(s, 'name', None) is not None for s in data) - if not has_some_name: - return ibase.default_index(len(data)) - - index = lrange(len(data)) - count = 0 - for i, s in enumerate(data): - n = getattr(s, 'name', None) - if n is not None: - index[i] = n - else: - index[i] = 'Unnamed %d' % count - count += 1 - - return index - - -def _homogenize(data, index, dtype=None): - from pandas.core.series import _sanitize_array - - oindex = None - homogenized = [] - - for v in data: - if isinstance(v, Series): - if dtype is not None: - v = v.astype(dtype) - if v.index is not index: - # Forces alignment. No need to copy data since we - # are putting it into an ndarray later - v = v.reindex(index, copy=False) - else: - if isinstance(v, dict): - if oindex is None: - oindex = index.astype('O') - - if isinstance(index, (DatetimeIndex, TimedeltaIndex)): - v = com.dict_compat(v) - else: - v = dict(v) - v = lib.fast_multiget(v, oindex.values, default=np.nan) - v = _sanitize_array(v, index, dtype=dtype, copy=False, - raise_cast_failure=False) - - homogenized.append(v) - - return homogenized - - def _from_nested_dict(data): # TODO: this should be seriously cythonized new_data = OrderedDict() diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py new file mode 100644 index 0000000000000..5859dc9e858b7 --- /dev/null +++ b/pandas/core/internals/construction.py @@ -0,0 +1,699 @@ +""" +Functions for preparing various inputs passed to the DataFrame or Series +constructors before passing them to aBlockManager. +""" +from collections import OrderedDict + +import numpy as np +import numpy.ma as ma + +from pandas._libs import lib +from pandas._libs.tslibs import IncompatibleFrequency +import pandas.compat as compat +from pandas.compat import ( + get_range_parameters, lmap, lrange, raise_with_traceback, range) + +from pandas.core.dtypes.cast import ( + construct_1d_arraylike_from_scalar, construct_1d_ndarray_preserving_na, + construct_1d_object_array_from_listlike, infer_dtype_from_scalar, + maybe_cast_to_datetime, maybe_cast_to_integer_array, maybe_castable, + maybe_convert_platform, maybe_infer_to_datetimelike, maybe_upcast) +from pandas.core.dtypes.common import ( + is_categorical_dtype, is_datetime64tz_dtype, is_dtype_equal, + is_extension_array_dtype, is_extension_type, is_float_dtype, + is_integer_dtype, is_iterator, is_list_like, is_object_dtype, pandas_dtype) +from pandas.core.dtypes.generic import ( + ABCDataFrame, ABCDatetimeIndex, ABCIndexClass, ABCPeriodIndex, ABCSeries, + ABCTimedeltaIndex) +from pandas.core.dtypes.missing import isna + +from pandas.core import algorithms, common as com +from pandas.core.arrays import Categorical, ExtensionArray, period_array +from pandas.core.index import ( + Index, _get_objs_combined_axis, _union_indexes, ensure_index) +from pandas.core.indexes import base as ibase +from pandas.core.internals import ( + create_block_manager_from_arrays, create_block_manager_from_blocks) + +# --------------------------------------------------------------------- +# BlockManager Interface + + +def arrays_to_mgr(arrays, arr_names, index, columns, dtype=None): + """ + Segregate Series based on type and coerce into matrices. + + Needs to handle a lot of exceptional cases. + """ + # figure out the index, if necessary + if index is None: + index = extract_index(arrays) + else: + index = ensure_index(index) + + # don't force copy because getting jammed in an ndarray anyway + arrays = _homogenize(arrays, index, dtype) + + # from BlockManager perspective + axes = [ensure_index(columns), index] + + return create_block_manager_from_arrays(arrays, arr_names, axes) + + +def masked_rec_array_to_mgr(data, index, columns, dtype, copy): + """ + Extract from a masked rec array and create the manager. + """ + + # essentially process a record array then fill it + fill_value = data.fill_value + fdata = ma.getdata(data) + if index is None: + index = get_names_from_index(fdata) + if index is None: + index = ibase.default_index(len(data)) + index = ensure_index(index) + + if columns is not None: + columns = ensure_index(columns) + arrays, arr_columns = to_arrays(fdata, columns) + + # fill if needed + new_arrays = [] + for fv, arr, col in zip(fill_value, arrays, arr_columns): + mask = ma.getmaskarray(data[col]) + if mask.any(): + arr, fv = maybe_upcast(arr, fill_value=fv, copy=True) + arr[mask] = fv + new_arrays.append(arr) + + # create the manager + arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns) + if columns is None: + columns = arr_columns + + mgr = arrays_to_mgr(arrays, arr_columns, index, columns) + + if copy: + mgr = mgr.copy() + return mgr + + +# --------------------------------------------------------------------- +# DataFrame Constructor Interface + +def init_ndarray(values, index, columns, dtype=None, copy=False): + # input must be a ndarray, list, Series, index + + if isinstance(values, ABCSeries): + if columns is None: + if values.name is not None: + columns = [values.name] + if index is None: + index = values.index + else: + values = values.reindex(index) + + # zero len case (GH #2234) + if not len(values) and columns is not None and len(columns): + values = np.empty((0, 1), dtype=object) + + # we could have a categorical type passed or coerced to 'category' + # recast this to an arrays_to_mgr + if (is_categorical_dtype(getattr(values, 'dtype', None)) or + is_categorical_dtype(dtype)): + + if not hasattr(values, 'dtype'): + values = prep_ndarray(values, copy=copy) + values = values.ravel() + elif copy: + values = values.copy() + + index, columns = _get_axes(len(values), 1, index, columns) + return arrays_to_mgr([values], columns, index, columns, + dtype=dtype) + elif (is_datetime64tz_dtype(values) or + is_extension_array_dtype(values)): + # GH#19157 + if columns is None: + columns = [0] + return arrays_to_mgr([values], columns, index, columns, + dtype=dtype) + + # by definition an array here + # the dtypes will be coerced to a single dtype + values = prep_ndarray(values, copy=copy) + + if dtype is not None: + if not is_dtype_equal(values.dtype, dtype): + try: + values = values.astype(dtype) + except Exception as orig: + e = ValueError("failed to cast to '{dtype}' (Exception " + "was: {orig})".format(dtype=dtype, + orig=orig)) + raise_with_traceback(e) + + index, columns = _get_axes(*values.shape, index=index, columns=columns) + values = values.T + + # if we don't have a dtype specified, then try to convert objects + # on the entire block; this is to convert if we have datetimelike's + # embedded in an object type + if dtype is None and is_object_dtype(values): + values = maybe_infer_to_datetimelike(values) + + return create_block_manager_from_blocks([values], [columns, index]) + + +def init_dict(data, index, columns, dtype=None): + """ + Segregate Series based on type and coerce into matrices. + Needs to handle a lot of exceptional cases. + """ + if columns is not None: + from pandas.core.series import Series + arrays = Series(data, index=columns, dtype=object) + data_names = arrays.index + + missing = arrays.isnull() + if index is None: + # GH10856 + # raise ValueError if only scalars in dict + index = extract_index(arrays[~missing]) + else: + index = ensure_index(index) + + # no obvious "empty" int column + if missing.any() and not is_integer_dtype(dtype): + if dtype is None or np.issubdtype(dtype, np.flexible): + # GH#1783 + nan_dtype = object + else: + nan_dtype = dtype + v = construct_1d_arraylike_from_scalar(np.nan, len(index), + nan_dtype) + arrays.loc[missing] = [v] * missing.sum() + + else: + keys = com.dict_keys_to_ordered_list(data) + columns = data_names = Index(keys) + arrays = [data[k] for k in keys] + + return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype) + + +# --------------------------------------------------------------------- + +def prep_ndarray(values, copy=True): + if not isinstance(values, (np.ndarray, ABCSeries, Index)): + if len(values) == 0: + return np.empty((0, 0), dtype=object) + + def convert(v): + return maybe_convert_platform(v) + + # we could have a 1-dim or 2-dim list here + # this is equiv of np.asarray, but does object conversion + # and platform dtype preservation + try: + if is_list_like(values[0]) or hasattr(values[0], 'len'): + values = np.array([convert(v) for v in values]) + elif isinstance(values[0], np.ndarray) and values[0].ndim == 0: + # GH#21861 + values = np.array([convert(v) for v in values]) + else: + values = convert(values) + except (ValueError, TypeError): + values = convert(values) + + else: + + # drop subclass info, do not copy data + values = np.asarray(values) + if copy: + values = values.copy() + + if values.ndim == 1: + values = values.reshape((values.shape[0], 1)) + elif values.ndim != 2: + raise ValueError('Must pass 2-d input') + + return values + + +def _homogenize(data, index, dtype=None): + oindex = None + homogenized = [] + + for v in data: + if isinstance(v, ABCSeries): + if dtype is not None: + v = v.astype(dtype) + if v.index is not index: + # Forces alignment. No need to copy data since we + # are putting it into an ndarray later + v = v.reindex(index, copy=False) + else: + if isinstance(v, dict): + if oindex is None: + oindex = index.astype('O') + + if isinstance(index, (ABCDatetimeIndex, ABCTimedeltaIndex)): + v = com.dict_compat(v) + else: + v = dict(v) + v = lib.fast_multiget(v, oindex.values, default=np.nan) + v = sanitize_array(v, index, dtype=dtype, copy=False, + raise_cast_failure=False) + + homogenized.append(v) + + return homogenized + + +def extract_index(data): + index = None + if len(data) == 0: + index = Index([]) + elif len(data) > 0: + raw_lengths = [] + indexes = [] + + have_raw_arrays = False + have_series = False + have_dicts = False + + for v in data: + if isinstance(v, ABCSeries): + have_series = True + indexes.append(v.index) + elif isinstance(v, dict): + have_dicts = True + indexes.append(list(v.keys())) + elif is_list_like(v) and getattr(v, 'ndim', 1) == 1: + have_raw_arrays = True + raw_lengths.append(len(v)) + + if not indexes and not raw_lengths: + raise ValueError('If using all scalar values, you must pass' + ' an index') + + if have_series or have_dicts: + index = _union_indexes(indexes) + + if have_raw_arrays: + lengths = list(set(raw_lengths)) + if len(lengths) > 1: + raise ValueError('arrays must all be same length') + + if have_dicts: + raise ValueError('Mixing dicts with non-Series may lead to ' + 'ambiguous ordering.') + + if have_series: + if lengths[0] != len(index): + msg = ('array length %d does not match index length %d' % + (lengths[0], len(index))) + raise ValueError(msg) + else: + index = ibase.default_index(lengths[0]) + + return ensure_index(index) + + +def reorder_arrays(arrays, arr_columns, columns): + # reorder according to the columns + if (columns is not None and len(columns) and arr_columns is not None and + len(arr_columns)): + indexer = ensure_index(arr_columns).get_indexer(columns) + arr_columns = ensure_index([arr_columns[i] for i in indexer]) + arrays = [arrays[i] for i in indexer] + return arrays, arr_columns + + +def get_names_from_index(data): + has_some_name = any(getattr(s, 'name', None) is not None for s in data) + if not has_some_name: + return ibase.default_index(len(data)) + + index = lrange(len(data)) + count = 0 + for i, s in enumerate(data): + n = getattr(s, 'name', None) + if n is not None: + index[i] = n + else: + index[i] = 'Unnamed %d' % count + count += 1 + + return index + + +def _get_axes(N, K, index, columns): + # helper to create the axes as indexes + # return axes or defaults + + if index is None: + index = ibase.default_index(N) + else: + index = ensure_index(index) + + if columns is None: + columns = ibase.default_index(K) + else: + columns = ensure_index(columns) + return index, columns + + +# --------------------------------------------------------------------- +# Conversion of Inputs to Arrays + +def to_arrays(data, columns, coerce_float=False, dtype=None): + """ + Return list of arrays, columns. + """ + if isinstance(data, ABCDataFrame): + if columns is not None: + arrays = [data._ixs(i, axis=1).values + for i, col in enumerate(data.columns) if col in columns] + else: + columns = data.columns + arrays = [data._ixs(i, axis=1).values for i in range(len(columns))] + + return arrays, columns + + if not len(data): + if isinstance(data, np.ndarray): + columns = data.dtype.names + if columns is not None: + return [[]] * len(columns), columns + return [], [] # columns if columns is not None else [] + if isinstance(data[0], (list, tuple)): + return _list_to_arrays(data, columns, coerce_float=coerce_float, + dtype=dtype) + elif isinstance(data[0], compat.Mapping): + return _list_of_dict_to_arrays(data, columns, + coerce_float=coerce_float, dtype=dtype) + elif isinstance(data[0], ABCSeries): + return _list_of_series_to_arrays(data, columns, + coerce_float=coerce_float, + dtype=dtype) + elif isinstance(data[0], Categorical): + if columns is None: + columns = ibase.default_index(len(data)) + return data, columns + elif (isinstance(data, (np.ndarray, ABCSeries, Index)) and + data.dtype.names is not None): + + columns = list(data.dtype.names) + arrays = [data[k] for k in columns] + return arrays, columns + else: + # last ditch effort + data = lmap(tuple, data) + return _list_to_arrays(data, columns, coerce_float=coerce_float, + dtype=dtype) + + +def _list_to_arrays(data, columns, coerce_float=False, dtype=None): + if len(data) > 0 and isinstance(data[0], tuple): + content = list(lib.to_object_array_tuples(data).T) + else: + # list of lists + content = list(lib.to_object_array(data).T) + return _convert_object_array(content, columns, dtype=dtype, + coerce_float=coerce_float) + + +def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None): + if columns is None: + columns = _get_objs_combined_axis(data, sort=False) + + indexer_cache = {} + + aligned_values = [] + for s in data: + index = getattr(s, 'index', None) + if index is None: + index = ibase.default_index(len(s)) + + if id(index) in indexer_cache: + indexer = indexer_cache[id(index)] + else: + indexer = indexer_cache[id(index)] = index.get_indexer(columns) + + values = com.values_from_object(s) + aligned_values.append(algorithms.take_1d(values, indexer)) + + values = np.vstack(aligned_values) + + if values.dtype == np.object_: + content = list(values.T) + return _convert_object_array(content, columns, dtype=dtype, + coerce_float=coerce_float) + else: + return values.T, columns + + +def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None): + if columns is None: + gen = (list(x.keys()) for x in data) + sort = not any(isinstance(d, OrderedDict) for d in data) + columns = lib.fast_unique_multiple_list_gen(gen, sort=sort) + + # assure that they are of the base dict class and not of derived + # classes + data = [(type(d) is dict) and d or dict(d) for d in data] + + content = list(lib.dicts_to_array(data, list(columns)).T) + return _convert_object_array(content, columns, dtype=dtype, + coerce_float=coerce_float) + + +def _convert_object_array(content, columns, coerce_float=False, dtype=None): + if columns is None: + columns = ibase.default_index(len(content)) + else: + if len(columns) != len(content): # pragma: no cover + # caller's responsibility to check for this... + raise AssertionError('{col:d} columns passed, passed data had ' + '{con} columns'.format(col=len(columns), + con=len(content))) + + # provide soft conversion of object dtypes + def convert(arr): + if dtype != object and dtype != np.object: + arr = lib.maybe_convert_objects(arr, try_float=coerce_float) + arr = maybe_cast_to_datetime(arr, dtype) + return arr + + arrays = [convert(arr) for arr in content] + + return arrays, columns + + +# --------------------------------------------------------------------- +# Series-Based + +def sanitize_index(data, index, copy=False): + """ + Sanitize an index type to return an ndarray of the underlying, pass + through a non-Index. + """ + + if index is None: + return data + + if len(data) != len(index): + raise ValueError('Length of values does not match length of ' 'index') + + if isinstance(data, ABCIndexClass) and not copy: + pass + elif isinstance(data, (ABCPeriodIndex, ABCDatetimeIndex)): + data = data._values + if copy: + data = data.copy() + + elif isinstance(data, np.ndarray): + + # coerce datetimelike types + if data.dtype.kind in ['M', 'm']: + data = sanitize_array(data, index, copy=copy) + + return data + + +def sanitize_array(data, index, dtype=None, copy=False, + raise_cast_failure=False): + """ + Sanitize input data to an ndarray, copy if specified, coerce to the + dtype if specified. + """ + + if dtype is not None: + dtype = pandas_dtype(dtype) + + if isinstance(data, ma.MaskedArray): + mask = ma.getmaskarray(data) + if mask.any(): + data, fill_value = maybe_upcast(data, copy=True) + data[mask] = fill_value + else: + data = data.copy() + + # GH#846 + if isinstance(data, (np.ndarray, Index, ABCSeries)): + + if dtype is not None: + subarr = np.array(data, copy=False) + + # possibility of nan -> garbage + if is_float_dtype(data.dtype) and is_integer_dtype(dtype): + if not isna(data).any(): + subarr = _try_cast(data, True, dtype, copy, + raise_cast_failure) + elif copy: + subarr = data.copy() + else: + subarr = _try_cast(data, True, dtype, copy, raise_cast_failure) + elif isinstance(data, Index): + # don't coerce Index types + # e.g. indexes can have different conversions (so don't fast path + # them) + # GH#6140 + subarr = sanitize_index(data, index, copy=copy) + else: + + # we will try to copy be-definition here + subarr = _try_cast(data, True, dtype, copy, raise_cast_failure) + + elif isinstance(data, ExtensionArray): + subarr = data + + if dtype is not None and not data.dtype.is_dtype(dtype): + subarr = data.astype(dtype) + + if copy: + subarr = data.copy() + return subarr + + elif isinstance(data, (list, tuple)) and len(data) > 0: + if dtype is not None: + try: + subarr = _try_cast(data, False, dtype, copy, + raise_cast_failure) + except Exception: + if raise_cast_failure: # pragma: no cover + raise + subarr = np.array(data, dtype=object, copy=copy) + subarr = lib.maybe_convert_objects(subarr) + + else: + subarr = maybe_convert_platform(data) + + subarr = maybe_cast_to_datetime(subarr, dtype) + + elif isinstance(data, range): + # GH#16804 + start, stop, step = get_range_parameters(data) + arr = np.arange(start, stop, step, dtype='int64') + subarr = _try_cast(arr, False, dtype, copy, raise_cast_failure) + else: + subarr = _try_cast(data, False, dtype, copy, raise_cast_failure) + + # scalar like, GH + if getattr(subarr, 'ndim', 0) == 0: + if isinstance(data, list): # pragma: no cover + subarr = np.array(data, dtype=object) + elif index is not None: + value = data + + # figure out the dtype from the value (upcast if necessary) + if dtype is None: + dtype, value = infer_dtype_from_scalar(value) + else: + # need to possibly convert the value here + value = maybe_cast_to_datetime(value, dtype) + + subarr = construct_1d_arraylike_from_scalar( + value, len(index), dtype) + + else: + return subarr.item() + + # the result that we want + elif subarr.ndim == 1: + if index is not None: + + # a 1-element ndarray + if len(subarr) != len(index) and len(subarr) == 1: + subarr = construct_1d_arraylike_from_scalar( + subarr[0], len(index), subarr.dtype) + + elif subarr.ndim > 1: + if isinstance(data, np.ndarray): + raise Exception('Data must be 1-dimensional') + else: + subarr = com.asarray_tuplesafe(data, dtype=dtype) + + # This is to prevent mixed-type Series getting all casted to + # NumPy string type, e.g. NaN --> '-1#IND'. + if issubclass(subarr.dtype.type, compat.string_types): + # GH#16605 + # If not empty convert the data to dtype + # GH#19853: If data is a scalar, subarr has already the result + if not lib.is_scalar(data): + if not np.all(isna(data)): + data = np.array(data, dtype=dtype, copy=False) + subarr = np.array(data, dtype=object, copy=copy) + + if is_object_dtype(subarr.dtype) and dtype != 'object': + inferred = lib.infer_dtype(subarr) + if inferred == 'period': + try: + subarr = period_array(subarr) + except IncompatibleFrequency: + pass + + return subarr + + +def _try_cast(arr, take_fast_path, dtype, copy, raise_cast_failure): + + # perf shortcut as this is the most common case + if take_fast_path: + if maybe_castable(arr) and not copy and dtype is None: + return arr + + try: + # GH#15832: Check if we are requesting a numeric dype and + # that we can convert the data to the requested dtype. + if is_integer_dtype(dtype): + subarr = maybe_cast_to_integer_array(arr, dtype) + + subarr = maybe_cast_to_datetime(arr, dtype) + # Take care in creating object arrays (but iterators are not + # supported): + if is_object_dtype(dtype) and (is_list_like(subarr) and + not (is_iterator(subarr) or + isinstance(subarr, np.ndarray))): + subarr = construct_1d_object_array_from_listlike(subarr) + elif not is_extension_type(subarr): + subarr = construct_1d_ndarray_preserving_na(subarr, dtype, + copy=copy) + except (ValueError, TypeError): + if is_categorical_dtype(dtype): + # We *do* allow casting to categorical, since we know + # that Categorical is the only array type for 'category'. + subarr = Categorical(arr, dtype.categories, + ordered=dtype.ordered) + elif is_extension_array_dtype(dtype): + # create an extension array from its dtype + array_type = dtype.construct_array_type()._from_sequence + subarr = array_type(arr, dtype=dtype, copy=copy) + elif dtype is not None and raise_cast_failure: + raise + else: + subarr = np.array(arr, dtype=object, copy=copy) + return subarr diff --git a/pandas/core/series.py b/pandas/core/series.py index 8d4d7677cca44..6b4c9927ef0f1 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -7,35 +7,27 @@ import warnings import numpy as np -import numpy.ma as ma from pandas._libs import iNaT, index as libindex, lib, tslibs import pandas.compat as compat -from pandas.compat import ( - PY36, OrderedDict, StringIO, get_range_parameters, range, u, zip) +from pandas.compat import PY36, OrderedDict, StringIO, u, zip from pandas.compat.numpy import function as nv from pandas.util._decorators import Appender, Substitution, deprecate from pandas.util._validators import validate_bool_kwarg -from pandas.core.dtypes.cast import ( - construct_1d_arraylike_from_scalar, construct_1d_ndarray_preserving_na, - construct_1d_object_array_from_listlike, infer_dtype_from_scalar, - maybe_cast_to_datetime, maybe_cast_to_integer_array, maybe_castable, - maybe_convert_platform, maybe_upcast) from pandas.core.dtypes.common import ( _is_unorderable_exception, ensure_platform_int, is_bool, is_categorical_dtype, is_datetime64tz_dtype, is_datetimelike, is_dict_like, - is_extension_array_dtype, is_extension_type, is_float_dtype, is_hashable, - is_integer, is_integer_dtype, is_iterator, is_list_like, is_object_dtype, - is_scalar, is_string_like, is_timedelta64_dtype, pandas_dtype) + is_extension_array_dtype, is_extension_type, is_hashable, is_integer, + is_iterator, is_list_like, is_scalar, is_string_like, is_timedelta64_dtype) from pandas.core.dtypes.generic import ( - ABCDataFrame, ABCIndexClass, ABCSeries, ABCSparseArray, ABCSparseSeries) + ABCDataFrame, ABCSeries, ABCSparseArray, ABCSparseSeries) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, remove_na_arraylike) from pandas.core import algorithms, base, generic, nanops, ops from pandas.core.accessor import CachedAccessor -from pandas.core.arrays import ExtensionArray, SparseArray, period_array +from pandas.core.arrays import ExtensionArray, SparseArray from pandas.core.arrays.categorical import Categorical, CategoricalAccessor from pandas.core.arrays.sparse import SparseAccessor import pandas.core.common as com @@ -49,6 +41,7 @@ from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.indexing import check_bool_indexer, maybe_convert_indices from pandas.core.internals import SingleBlockManager +from pandas.core.internals.construction import sanitize_array from pandas.core.strings import StringMethods from pandas.core.tools.datetimes import to_datetime @@ -256,8 +249,8 @@ def __init__(self, data=None, index=None, dtype=None, name=None, elif copy: data = data.copy() else: - data = _sanitize_array(data, index, dtype, copy, - raise_cast_failure=True) + data = sanitize_array(data, index, dtype, copy, + raise_cast_failure=True) data = SingleBlockManager(data, index, fastpath=True) @@ -4262,207 +4255,3 @@ def to_period(self, freq=None, copy=True): # Add arithmetic! ops.add_flex_arithmetic_methods(Series) ops.add_special_arithmetic_methods(Series) - - -# ----------------------------------------------------------------------------- -# Supplementary functions - - -def _sanitize_index(data, index, copy=False): - """ - Sanitize an index type to return an ndarray of the underlying, pass - through a non-Index. - """ - - if index is None: - return data - - if len(data) != len(index): - raise ValueError('Length of values does not match length of ' 'index') - - if isinstance(data, ABCIndexClass) and not copy: - pass - elif isinstance(data, (PeriodIndex, DatetimeIndex)): - data = data._values - if copy: - data = data.copy() - - elif isinstance(data, np.ndarray): - - # coerce datetimelike types - if data.dtype.kind in ['M', 'm']: - data = _sanitize_array(data, index, copy=copy) - - return data - - -def _sanitize_array(data, index, dtype=None, copy=False, - raise_cast_failure=False): - """ - Sanitize input data to an ndarray, copy if specified, coerce to the - dtype if specified. - """ - - if dtype is not None: - dtype = pandas_dtype(dtype) - - if isinstance(data, ma.MaskedArray): - mask = ma.getmaskarray(data) - if mask.any(): - data, fill_value = maybe_upcast(data, copy=True) - data[mask] = fill_value - else: - data = data.copy() - - def _try_cast(arr, take_fast_path): - - # perf shortcut as this is the most common case - if take_fast_path: - if maybe_castable(arr) and not copy and dtype is None: - return arr - - try: - # gh-15832: Check if we are requesting a numeric dype and - # that we can convert the data to the requested dtype. - if is_integer_dtype(dtype): - subarr = maybe_cast_to_integer_array(arr, dtype) - - subarr = maybe_cast_to_datetime(arr, dtype) - # Take care in creating object arrays (but iterators are not - # supported): - if is_object_dtype(dtype) and (is_list_like(subarr) and - not (is_iterator(subarr) or - isinstance(subarr, np.ndarray))): - subarr = construct_1d_object_array_from_listlike(subarr) - elif not is_extension_type(subarr): - subarr = construct_1d_ndarray_preserving_na(subarr, dtype, - copy=copy) - except (ValueError, TypeError): - if is_categorical_dtype(dtype): - # We *do* allow casting to categorical, since we know - # that Categorical is the only array type for 'category'. - subarr = Categorical(arr, dtype.categories, - ordered=dtype.ordered) - elif is_extension_array_dtype(dtype): - # create an extension array from its dtype - array_type = dtype.construct_array_type()._from_sequence - subarr = array_type(arr, dtype=dtype, copy=copy) - elif dtype is not None and raise_cast_failure: - raise - else: - subarr = np.array(arr, dtype=object, copy=copy) - return subarr - - # GH #846 - if isinstance(data, (np.ndarray, Index, Series)): - - if dtype is not None: - subarr = np.array(data, copy=False) - - # possibility of nan -> garbage - if is_float_dtype(data.dtype) and is_integer_dtype(dtype): - if not isna(data).any(): - subarr = _try_cast(data, True) - elif copy: - subarr = data.copy() - else: - subarr = _try_cast(data, True) - elif isinstance(data, Index): - # don't coerce Index types - # e.g. indexes can have different conversions (so don't fast path - # them) - # GH 6140 - subarr = _sanitize_index(data, index, copy=copy) - else: - - # we will try to copy be-definition here - subarr = _try_cast(data, True) - - elif isinstance(data, ExtensionArray): - subarr = data - - if dtype is not None and not data.dtype.is_dtype(dtype): - subarr = data.astype(dtype) - - if copy: - subarr = data.copy() - return subarr - - elif isinstance(data, (list, tuple)) and len(data) > 0: - if dtype is not None: - try: - subarr = _try_cast(data, False) - except Exception: - if raise_cast_failure: # pragma: no cover - raise - subarr = np.array(data, dtype=object, copy=copy) - subarr = lib.maybe_convert_objects(subarr) - - else: - subarr = maybe_convert_platform(data) - - subarr = maybe_cast_to_datetime(subarr, dtype) - - elif isinstance(data, range): - # GH 16804 - start, stop, step = get_range_parameters(data) - arr = np.arange(start, stop, step, dtype='int64') - subarr = _try_cast(arr, False) - else: - subarr = _try_cast(data, False) - - # scalar like, GH - if getattr(subarr, 'ndim', 0) == 0: - if isinstance(data, list): # pragma: no cover - subarr = np.array(data, dtype=object) - elif index is not None: - value = data - - # figure out the dtype from the value (upcast if necessary) - if dtype is None: - dtype, value = infer_dtype_from_scalar(value) - else: - # need to possibly convert the value here - value = maybe_cast_to_datetime(value, dtype) - - subarr = construct_1d_arraylike_from_scalar( - value, len(index), dtype) - - else: - return subarr.item() - - # the result that we want - elif subarr.ndim == 1: - if index is not None: - - # a 1-element ndarray - if len(subarr) != len(index) and len(subarr) == 1: - subarr = construct_1d_arraylike_from_scalar( - subarr[0], len(index), subarr.dtype) - - elif subarr.ndim > 1: - if isinstance(data, np.ndarray): - raise Exception('Data must be 1-dimensional') - else: - subarr = com.asarray_tuplesafe(data, dtype=dtype) - - # This is to prevent mixed-type Series getting all casted to - # NumPy string type, e.g. NaN --> '-1#IND'. - if issubclass(subarr.dtype.type, compat.string_types): - # GH 16605 - # If not empty convert the data to dtype - # GH 19853: If data is a scalar, subarr has already the result - if not is_scalar(data): - if not np.all(isna(data)): - data = np.array(data, dtype=dtype, copy=False) - subarr = np.array(data, dtype=object, copy=copy) - - if is_object_dtype(subarr.dtype) and dtype != 'object': - inferred = lib.infer_dtype(subarr) - if inferred == 'period': - try: - subarr = period_array(subarr) - except tslibs.period.IncompatibleFrequency: - pass - - return subarr diff --git a/pandas/core/sparse/frame.py b/pandas/core/sparse/frame.py index f1c46abfab0b2..8fc6a8d8e923f 100644 --- a/pandas/core/sparse/frame.py +++ b/pandas/core/sparse/frame.py @@ -21,12 +21,13 @@ import pandas.core.algorithms as algos from pandas.core.arrays.sparse import SparseArray, SparseDtype import pandas.core.common as com -from pandas.core.frame import DataFrame, _prep_ndarray, extract_index +from pandas.core.frame import DataFrame import pandas.core.generic as generic from pandas.core.index import Index, MultiIndex, ensure_index import pandas.core.indexes.base as ibase from pandas.core.internals import ( BlockManager, create_block_manager_from_arrays) +from pandas.core.internals.construction import extract_index, prep_ndarray import pandas.core.ops as ops from pandas.core.series import Series from pandas.core.sparse.series import SparseSeries @@ -194,7 +195,7 @@ def sp_maker(x): def _init_matrix(self, data, index, columns, dtype=None): """ Init self from ndarray or list of lists """ - data = _prep_ndarray(data, copy=False) + data = prep_ndarray(data, copy=False) index, columns = self._prep_index(data, index, columns) data = {idx: data[:, i] for i, idx in enumerate(columns)} return self._init_dict(data, index, columns, dtype)
Why? In implementing #24096 I found it tough to tell all the paths by which a DatetimeIndex get passed to a DataFrame or Series. Collecting all these helper functions is a step towards reducing the number of paths available so these things can be caught in one place. The main thing this PR does is move helper functions from core.series and core.frame. A few ancillary things it does - remove runtime imports (there is only one left in construction.py) - use ABC classes for isinstance checks - nested functions `_try_cast` and `_get_axes` are de-nested - de-privatize functions that are imported elsewhere. A function in construction.py has a leading underscore if and only if it is not imported elsewhere.
https://api.github.com/repos/pandas-dev/pandas/pulls/24100
2018-12-04T21:03:14Z
2018-12-05T12:55:49Z
2018-12-05T12:55:49Z
2018-12-05T14:46:16Z
DOC: Make _field_accessor manage the docstring format
diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index e731dd33f5bb5..239891870cc3b 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -82,7 +82,7 @@ def f(self): return result f.__name__ = name - f.__doc__ = docstring + f.__doc__ = "\n{}\n".format(docstring) return property(f) @@ -1129,12 +1129,12 @@ def date(self): "The name of day in a week (ex: Friday)\n\n.. deprecated:: 0.23.0") dayofyear = _field_accessor('dayofyear', 'doy', - "\nThe ordinal day of the year\n") - quarter = _field_accessor('quarter', 'q', "\nThe quarter of the date\n") + "The ordinal day of the year.") + quarter = _field_accessor('quarter', 'q', "The quarter of the date.") days_in_month = _field_accessor( 'days_in_month', 'dim', - "\nThe number of days in the month\n") + "The number of days in the month.") daysinmonth = days_in_month _is_month_doc = """ Indicates whether the date is the {first_or_last} day of the month. diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 830283d31a929..4afc9f5483c2a 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -59,7 +59,7 @@ def f(self): return result f.__name__ = name - f.__doc__ = docstring + f.__doc__ = "\n{}\n".format(docstring) return property(f) @@ -684,16 +684,16 @@ def to_pytimedelta(self): return tslibs.ints_to_pytimedelta(self.asi8) days = _field_accessor("days", "days", - "\nNumber of days for each element.\n") + "Number of days for each element.") seconds = _field_accessor("seconds", "seconds", - "\nNumber of seconds (>= 0 and less than 1 day) " - "for each element.\n") + "Number of seconds (>= 0 and less than 1 day) " + "for each element.") microseconds = _field_accessor("microseconds", "microseconds", - "\nNumber of microseconds (>= 0 and less " - "than 1 second) for each element.\n") + "Number of microseconds (>= 0 and less " + "than 1 second) for each element.") nanoseconds = _field_accessor("nanoseconds", "nanoseconds", - "\nNumber of nanoseconds (>= 0 and less " - "than 1 microsecond) for each element.\n") + "Number of nanoseconds (>= 0 and less " + "than 1 microsecond) for each element.") @property def components(self):
- [x] closes #24068 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24099
2018-12-04T19:45:34Z
2018-12-05T14:28:30Z
2018-12-05T14:28:30Z
2018-12-05T14:28:33Z
DOC: Use official numpydoc extension
diff --git a/ci/deps/travis-36-doc.yaml b/ci/deps/travis-36-doc.yaml index ed0764fab414a..c345af0a2983c 100644 --- a/ci/deps/travis-36-doc.yaml +++ b/ci/deps/travis-36-doc.yaml @@ -21,6 +21,7 @@ dependencies: - notebook - numexpr - numpy=1.13* + - numpydoc - openpyxl - pandoc - pyarrow diff --git a/doc/source/conf.py b/doc/source/conf.py index 56f77f667df88..bbc4320433650 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -18,7 +18,10 @@ import importlib import logging import warnings + from sphinx.ext.autosummary import _import_by_name +from numpydoc.docscrape import NumpyDocString +from numpydoc.docscrape_sphinx import SphinxDocString logger = logging.getLogger(__name__) @@ -49,10 +52,6 @@ ]) -# numpydoc is available in the sphinxext directory, and can't be imported -# until sphinxext is available in the Python path -from numpydoc.docscrape import NumpyDocString - # -- General configuration ----------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be @@ -64,7 +63,7 @@ 'sphinx.ext.doctest', 'sphinx.ext.extlinks', 'sphinx.ext.todo', - 'numpydoc', + 'numpydoc', # handle NumPy documentation formatted docstrings 'IPython.sphinxext.ipython_directive', 'IPython.sphinxext.ipython_console_highlighting', 'matplotlib.sphinxext.plot_directive', @@ -102,12 +101,6 @@ if any(re.match(r"\s*api\s*", l) for l in index_rst_lines): autosummary_generate = True -# numpydoc -# for now use old parameter listing (styling + **kwargs problem) -numpydoc_use_blockquotes = True -# use member listing for attributes -numpydoc_attributes_as_param_list = False - # matplotlib plot directive plot_include_source = True plot_formats = [("png", 90)] @@ -420,6 +413,62 @@ ] +def sphinxdocstring_str(self, indent=0, func_role="obj"): + # Pandas displays Attributes section in style like Methods section + + # Function is copy of `SphinxDocString.__str__` + ns = { + 'signature': self._str_signature(), + 'index': self._str_index(), + 'summary': self._str_summary(), + 'extended_summary': self._str_extended_summary(), + 'parameters': self._str_param_list('Parameters'), + 'returns': self._str_returns('Returns'), + 'yields': self._str_returns('Yields'), + 'other_parameters': self._str_param_list('Other Parameters'), + 'raises': self._str_param_list('Raises'), + 'warns': self._str_param_list('Warns'), + 'warnings': self._str_warnings(), + 'see_also': self._str_see_also(func_role), + 'notes': self._str_section('Notes'), + 'references': self._str_references(), + 'examples': self._str_examples(), + # Replaced `self._str_param_list('Attributes', fake_autosummary=True)` + # with `self._str_member_list('Attributes')` + 'attributes': self._str_member_list('Attributes'), + 'methods': self._str_member_list('Methods'), + } + ns = {k: '\n'.join(v) for k, v in ns.items()} + + rendered = self.template.render(**ns) + return '\n'.join(self._str_indent(rendered.split('\n'), indent)) + + +SphinxDocString.__str__ = sphinxdocstring_str + + +# Fix "WARNING: Inline strong start-string without end-string." +# PR #155 "Escape the * in *args and **kwargs" from numpydoc +# Can be removed after PR merges in v0.9.0 +def decorate_process_param(func): + def _escape_args_and_kwargs(name): + if name[:2] == '**': + return r'\*\*' + name[2:] + elif name[:1] == '*': + return r'\*' + name[1:] + else: + return name + + def func_wrapper(self, param, desc, fake_autosummary): + param = _escape_args_and_kwargs(param.strip()) + return func(self, param, desc, fake_autosummary) + + return func_wrapper + + +func = SphinxDocString._process_param +SphinxDocString._process_param = decorate_process_param(func) + # Add custom Documenter to handle attributes/methods of an AccessorProperty # eg pandas.Series.str and pandas.Series.dt (see GH9322) diff --git a/doc/sphinxext/numpydoc/LICENSE.txt b/doc/sphinxext/numpydoc/LICENSE.txt deleted file mode 100644 index b15c699dcecaa..0000000000000 --- a/doc/sphinxext/numpydoc/LICENSE.txt +++ /dev/null @@ -1,94 +0,0 @@ -------------------------------------------------------------------------------- - The files - - numpydoc.py - - docscrape.py - - docscrape_sphinx.py - - phantom_import.py - have the following license: - -Copyright (C) 2008 Stefan van der Walt <stefan@mentat.za.net>, Pauli Virtanen <pav@iki.fi> - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - -THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, -INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING -IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------------------------- - The files - - compiler_unparse.py - - comment_eater.py - - traitsdoc.py - have the following license: - -This software is OSI Certified Open Source Software. -OSI Certified is a certification mark of the Open Source Initiative. - -Copyright (c) 2006, Enthought, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of Enthought, Inc. nor the names of its contributors may - be used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- - The file - - plot_directive.py - originates from Matplotlib (http://matplotlib.sf.net/) which has - the following license: - -Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved. - -1. This LICENSE AGREEMENT is between John D. Hunter (“JDH”), and the Individual or Organization (“Licensee”) accessing and otherwise using matplotlib software in source or binary form and its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved” are retained in matplotlib 0.98.3 alone or in any derivative version prepared by Licensee. - -3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3. - -4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS” basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS. - -5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material breach of its terms and conditions. - -7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party. - -8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement. - diff --git a/doc/sphinxext/numpydoc/README.rst b/doc/sphinxext/numpydoc/README.rst deleted file mode 100755 index f91811ef9add6..0000000000000 --- a/doc/sphinxext/numpydoc/README.rst +++ /dev/null @@ -1,51 +0,0 @@ -===================================== -numpydoc -- Numpy's Sphinx extensions -===================================== - -Numpy's documentation uses several custom extensions to Sphinx. These -are shipped in this ``numpydoc`` package, in case you want to make use -of them in third-party projects. - -The following extensions are available: - - - ``numpydoc``: support for the Numpy docstring format in Sphinx, and add - the code description directives ``np:function``, ``np-c:function``, etc. - that support the Numpy docstring syntax. - - - ``numpydoc.traitsdoc``: For gathering documentation about Traits attributes. - - - ``numpydoc.plot_directive``: Adaptation of Matplotlib's ``plot::`` - directive. Note that this implementation may still undergo severe - changes or eventually be deprecated. - - -numpydoc -======== - -Numpydoc inserts a hook into Sphinx's autodoc that converts docstrings -following the Numpy/Scipy format to a form palatable to Sphinx. - -Options -------- - -The following options can be set in conf.py: - -- numpydoc_use_plots: bool - - Whether to produce ``plot::`` directives for Examples sections that - contain ``import matplotlib``. - -- numpydoc_show_class_members: bool - - Whether to show all members of a class in the Methods and Attributes - sections automatically. - -- numpydoc_class_members_toctree: bool - - Whether to create a Sphinx table of contents for the lists of class - methods and attributes. If a table of contents is made, Sphinx expects - each entry to have a separate page. - -- numpydoc_edit_link: bool (DEPRECATED -- edit your HTML template instead) - - Whether to insert an edit link after docstrings. diff --git a/doc/sphinxext/numpydoc/__init__.py b/doc/sphinxext/numpydoc/__init__.py deleted file mode 100644 index 30dba8fcf9132..0000000000000 --- a/doc/sphinxext/numpydoc/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import division, absolute_import, print_function - -__version__ = '0.8.0.dev0' - - -def setup(app, *args, **kwargs): - from .numpydoc import setup - return setup(app, *args, **kwargs) diff --git a/doc/sphinxext/numpydoc/docscrape.py b/doc/sphinxext/numpydoc/docscrape.py deleted file mode 100644 index 598b4438ffabc..0000000000000 --- a/doc/sphinxext/numpydoc/docscrape.py +++ /dev/null @@ -1,624 +0,0 @@ -"""Extract reference documentation from the NumPy source tree. - -""" -from __future__ import division, absolute_import, print_function - -import inspect -import textwrap -import re -import pydoc -from warnings import warn -import collections -import copy -import sys - - -def strip_blank_lines(l): - "Remove leading and trailing blank lines from a list of lines" - while l and not l[0].strip(): - del l[0] - while l and not l[-1].strip(): - del l[-1] - return l - - -class Reader(object): - """A line-based string reader. - - """ - def __init__(self, data): - """ - Parameters - ---------- - data : str - String with lines separated by '\n'. - - """ - if isinstance(data, list): - self._str = data - else: - self._str = data.split('\n') # store string as list of lines - - self.reset() - - def __getitem__(self, n): - return self._str[n] - - def reset(self): - self._l = 0 # current line nr - - def read(self): - if not self.eof(): - out = self[self._l] - self._l += 1 - return out - else: - return '' - - def seek_next_non_empty_line(self): - for l in self[self._l:]: - if l.strip(): - break - else: - self._l += 1 - - def eof(self): - return self._l >= len(self._str) - - def read_to_condition(self, condition_func): - start = self._l - for line in self[start:]: - if condition_func(line): - return self[start:self._l] - self._l += 1 - if self.eof(): - return self[start:self._l+1] - return [] - - def read_to_next_empty_line(self): - self.seek_next_non_empty_line() - - def is_empty(line): - return not line.strip() - - return self.read_to_condition(is_empty) - - def read_to_next_unindented_line(self): - def is_unindented(line): - return (line.strip() and (len(line.lstrip()) == len(line))) - return self.read_to_condition(is_unindented) - - def peek(self, n=0): - if self._l + n < len(self._str): - return self[self._l + n] - else: - return '' - - def is_empty(self): - return not ''.join(self._str).strip() - - -class ParseError(Exception): - def __str__(self): - message = self.args[0] - if hasattr(self, 'docstring'): - message = "%s in %r" % (message, self.docstring) - return message - - -class NumpyDocString(collections.Mapping): - """Parses a numpydoc string to an abstract representation - - Instances define a mapping from section title to structured data. - - """ - - sections = { - 'Signature': '', - 'Summary': [''], - 'Extended Summary': [], - 'Parameters': [], - 'Returns': [], - 'Yields': [], - 'Raises': [], - 'Warns': [], - 'Other Parameters': [], - 'Attributes': [], - 'Methods': [], - 'See Also': [], - 'Notes': [], - 'Warnings': [], - 'References': '', - 'Examples': '', - 'index': {} - } - - def __init__(self, docstring, config={}): - orig_docstring = docstring - docstring = textwrap.dedent(docstring).split('\n') - - self._doc = Reader(docstring) - self._parsed_data = copy.deepcopy(self.sections) - - try: - self._parse() - except ParseError as e: - e.docstring = orig_docstring - raise - - def __getitem__(self, key): - return self._parsed_data[key] - - def __setitem__(self, key, val): - if key not in self._parsed_data: - self._error_location("Unknown section %s" % key, error=False) - else: - self._parsed_data[key] = val - - def __iter__(self): - return iter(self._parsed_data) - - def __len__(self): - return len(self._parsed_data) - - def _is_at_section(self): - self._doc.seek_next_non_empty_line() - - if self._doc.eof(): - return False - - l1 = self._doc.peek().strip() # e.g. Parameters - - if l1.startswith('.. index::'): - return True - - l2 = self._doc.peek(1).strip() # ---------- or ========== - return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) - - def _strip(self, doc): - i = 0 - j = 0 - for i, line in enumerate(doc): - if line.strip(): - break - - for j, line in enumerate(doc[::-1]): - if line.strip(): - break - - return doc[i:len(doc)-j] - - def _read_to_next_section(self): - section = self._doc.read_to_next_empty_line() - - while not self._is_at_section() and not self._doc.eof(): - if not self._doc.peek(-1).strip(): # previous line was empty - section += [''] - - section += self._doc.read_to_next_empty_line() - - return section - - def _read_sections(self): - while not self._doc.eof(): - data = self._read_to_next_section() - name = data[0].strip() - - if name.startswith('..'): # index section - yield name, data[1:] - elif len(data) < 2: - yield StopIteration - else: - yield name, self._strip(data[2:]) - - def _parse_param_list(self, content): - r = Reader(content) - params = [] - while not r.eof(): - header = r.read().strip() - if ' : ' in header: - arg_name, arg_type = header.split(' : ')[:2] - else: - arg_name, arg_type = header, '' - - desc = r.read_to_next_unindented_line() - desc = dedent_lines(desc) - desc = strip_blank_lines(desc) - - params.append((arg_name, arg_type, desc)) - - return params - - _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):" - r"`(?P<name>(?:~\w+\.)?[a-zA-Z0-9_.-]+)`|" - r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X) - - def _parse_see_also(self, content): - """ - func_name : Descriptive text - continued text - another_func_name : Descriptive text - func_name1, func_name2, :meth:`func_name`, func_name3 - - """ - items = [] - - def parse_item_name(text): - """Match ':role:`name`' or 'name'""" - m = self._name_rgx.match(text) - if m: - g = m.groups() - if g[1] is None: - return g[3], None - else: - return g[2], g[1] - raise ParseError("%s is not a item name" % text) - - def push_item(name, rest): - if not name: - return - name, role = parse_item_name(name) - items.append((name, list(rest), role)) - del rest[:] - - current_func = None - rest = [] - - for line in content: - if not line.strip(): - continue - - m = self._name_rgx.match(line) - if m and line[m.end():].strip().startswith(':'): - push_item(current_func, rest) - current_func, line = line[:m.end()], line[m.end():] - rest = [line.split(':', 1)[1].strip()] - if not rest[0]: - rest = [] - elif not line.startswith(' '): - push_item(current_func, rest) - current_func = None - if ',' in line: - for func in line.split(','): - if func.strip(): - push_item(func, []) - elif line.strip(): - current_func = line - elif current_func is not None: - rest.append(line.strip()) - push_item(current_func, rest) - return items - - def _parse_index(self, section, content): - """ - .. index: default - :refguide: something, else, and more - - """ - def strip_each_in(lst): - return [s.strip() for s in lst] - - out = {} - section = section.split('::') - if len(section) > 1: - out['default'] = strip_each_in(section[1].split(','))[0] - for line in content: - line = line.split(':') - if len(line) > 2: - out[line[1]] = strip_each_in(line[2].split(',')) - return out - - def _parse_summary(self): - """Grab signature (if given) and summary""" - if self._is_at_section(): - return - - # If several signatures present, take the last one - while True: - summary = self._doc.read_to_next_empty_line() - summary_str = " ".join([s.strip() for s in summary]).strip() - if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): - self['Signature'] = summary_str - if not self._is_at_section(): - continue - break - - if summary is not None: - self['Summary'] = summary - - if not self._is_at_section(): - self['Extended Summary'] = self._read_to_next_section() - - def _parse(self): - self._doc.reset() - self._parse_summary() - - sections = list(self._read_sections()) - section_names = set([section for section, content in sections]) - - has_returns = 'Returns' in section_names - has_yields = 'Yields' in section_names - # We could do more tests, but we are not. Arbitrarily. - if has_returns and has_yields: - msg = 'Docstring contains both a Returns and Yields section.' - raise ValueError(msg) - - for (section, content) in sections: - if not section.startswith('..'): - section = (s.capitalize() for s in section.split(' ')) - section = ' '.join(section) - if self.get(section): - self._error_location("The section %s appears twice" - % section) - - if section in ('Parameters', 'Returns', 'Yields', 'Raises', - 'Warns', 'Other Parameters', 'Attributes', - 'Methods'): - self[section] = self._parse_param_list(content) - elif section.startswith('.. index::'): - self['index'] = self._parse_index(section, content) - elif section == 'See Also': - self['See Also'] = self._parse_see_also(content) - else: - self[section] = content - - def _error_location(self, msg, error=True): - if hasattr(self, '_obj'): - # we know where the docs came from: - try: - filename = inspect.getsourcefile(self._obj) - except TypeError: - filename = None - msg = msg + (" in the docstring of %s in %s." - % (self._obj, filename)) - if error: - raise ValueError(msg) - else: - warn(msg) - - # string conversion routines - - def _str_header(self, name, symbol='-'): - return [name, len(name)*symbol] - - def _str_indent(self, doc, indent=4): - out = [] - for line in doc: - out += [' '*indent + line] - return out - - def _str_signature(self): - if self['Signature']: - return [self['Signature'].replace('*', '\*')] + [''] - else: - return [''] - - def _str_summary(self): - if self['Summary']: - return self['Summary'] + [''] - else: - return [] - - def _str_extended_summary(self): - if self['Extended Summary']: - return self['Extended Summary'] + [''] - else: - return [] - - def _str_param_list(self, name): - out = [] - if self[name]: - out += self._str_header(name) - for param, param_type, desc in self[name]: - if param_type: - out += ['%s : %s' % (param, param_type)] - else: - out += [param] - if desc and ''.join(desc).strip(): - out += self._str_indent(desc) - out += [''] - return out - - def _str_section(self, name): - out = [] - if self[name]: - out += self._str_header(name) - out += self[name] - out += [''] - return out - - def _str_see_also(self, func_role): - if not self['See Also']: - return [] - out = [] - out += self._str_header("See Also") - last_had_desc = True - for func, desc, role in self['See Also']: - if role: - link = ':%s:`%s`' % (role, func) - elif func_role: - link = ':%s:`%s`' % (func_role, func) - else: - link = "`%s`_" % func - if desc or last_had_desc: - out += [''] - out += [link] - else: - out[-1] += ", %s" % link - if desc: - out += self._str_indent([' '.join(desc)]) - last_had_desc = True - else: - last_had_desc = False - out += [''] - return out - - def _str_index(self): - idx = self['index'] - out = [] - out += ['.. index:: %s' % idx.get('default', '')] - for section, references in idx.items(): - if section == 'default': - continue - out += [' :%s: %s' % (section, ', '.join(references))] - return out - - def __str__(self, func_role=''): - out = [] - out += self._str_signature() - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Yields', - 'Other Parameters', 'Raises', 'Warns'): - out += self._str_param_list(param_list) - out += self._str_section('Warnings') - out += self._str_see_also(func_role) - for s in ('Notes', 'References', 'Examples'): - out += self._str_section(s) - for param_list in ('Attributes', 'Methods'): - out += self._str_param_list(param_list) - out += self._str_index() - return '\n'.join(out) - - -def indent(str, indent=4): - indent_str = ' '*indent - if str is None: - return indent_str - lines = str.split('\n') - return '\n'.join(indent_str + l for l in lines) - - -def dedent_lines(lines): - """Deindent a list of lines maximally""" - return textwrap.dedent("\n".join(lines)).split("\n") - - -def header(text, style='-'): - return text + '\n' + style*len(text) + '\n' - - -class FunctionDoc(NumpyDocString): - def __init__(self, func, role='func', doc=None, config={}): - self._f = func - self._role = role # e.g. "func" or "meth" - - if doc is None: - if func is None: - raise ValueError("No function or docstring given") - doc = inspect.getdoc(func) or '' - NumpyDocString.__init__(self, doc) - - if not self['Signature'] and func is not None: - func, func_name = self.get_func() - try: - try: - signature = str(inspect.signature(func)) - except (AttributeError, ValueError): - # try to read signature, backward compat for older Python - if sys.version_info[0] >= 3: - argspec = inspect.getfullargspec(func) - else: - argspec = inspect.getargspec(func) - signature = inspect.formatargspec(*argspec) - signature = '%s%s' % (func_name, signature.replace('*', '\*')) - except TypeError: - signature = '%s()' % func_name - self['Signature'] = signature - - def get_func(self): - func_name = getattr(self._f, '__name__', self.__class__.__name__) - if inspect.isclass(self._f): - func = getattr(self._f, '__call__', self._f.__init__) - else: - func = self._f - return func, func_name - - def __str__(self): - out = '' - - func, func_name = self.get_func() - signature = self['Signature'].replace('*', '\*') - - roles = {'func': 'function', - 'meth': 'method'} - - if self._role: - if self._role not in roles: - print("Warning: invalid role %s" % self._role) - out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''), - func_name) - - out += super(FunctionDoc, self).__str__(func_role=self._role) - return out - - -class ClassDoc(NumpyDocString): - - extra_public_methods = ['__call__'] - - def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, - config={}): - if not inspect.isclass(cls) and cls is not None: - raise ValueError("Expected a class or None, but got %r" % cls) - self._cls = cls - - self.show_inherited_members = config.get( - 'show_inherited_class_members', True) - - if modulename and not modulename.endswith('.'): - modulename += '.' - self._mod = modulename - - if doc is None: - if cls is None: - raise ValueError("No class or documentation string given") - doc = pydoc.getdoc(cls) - - NumpyDocString.__init__(self, doc) - - if config.get('show_class_members', True): - def splitlines_x(s): - if not s: - return [] - else: - return s.splitlines() - - for field, items in [('Methods', self.methods), - ('Attributes', self.properties)]: - if not self[field]: - doc_list = [] - for name in sorted(items): - try: - doc_item = pydoc.getdoc(getattr(self._cls, name)) - doc_list.append((name, '', splitlines_x(doc_item))) - except AttributeError: - pass # method doesn't exist - self[field] = doc_list - - @property - def methods(self): - if self._cls is None: - return [] - return [name for name, func in inspect.getmembers(self._cls) - if ((not name.startswith('_') - or name in self.extra_public_methods) - and isinstance(func, collections.Callable) - and self._is_show_member(name))] - - @property - def properties(self): - if self._cls is None: - return [] - return [name for name, func in inspect.getmembers(self._cls) - if (not name.startswith('_') and - (func is None or isinstance(func, property) or - inspect.isdatadescriptor(func)) - and self._is_show_member(name))] - - def _is_show_member(self, name): - if self.show_inherited_members: - return True # show all class members - if name not in self._cls.__dict__: - return False # class member is inherited, we do not show it - return True diff --git a/doc/sphinxext/numpydoc/docscrape_sphinx.py b/doc/sphinxext/numpydoc/docscrape_sphinx.py deleted file mode 100644 index 19c355eba1898..0000000000000 --- a/doc/sphinxext/numpydoc/docscrape_sphinx.py +++ /dev/null @@ -1,429 +0,0 @@ -from __future__ import division, absolute_import, print_function - -import sys -import re -import inspect -import textwrap -import pydoc -import collections -import os - -from jinja2 import FileSystemLoader -from jinja2.sandbox import SandboxedEnvironment -import sphinx -from sphinx.jinja2glue import BuiltinTemplateLoader - -from .docscrape import NumpyDocString, FunctionDoc, ClassDoc - -if sys.version_info[0] >= 3: - sixu = lambda s: s -else: - sixu = lambda s: unicode(s, 'unicode_escape') - - -IMPORT_MATPLOTLIB_RE = r'\b(import +matplotlib|from +matplotlib +import)\b' - - -class SphinxDocString(NumpyDocString): - def __init__(self, docstring, config={}): - NumpyDocString.__init__(self, docstring, config=config) - self.load_config(config) - - def load_config(self, config): - self.use_plots = config.get('use_plots', False) - self.use_blockquotes = config.get('use_blockquotes', False) - self.class_members_toctree = config.get('class_members_toctree', True) - self.attributes_as_param_list = config.get('attributes_as_param_list', True) - self.template = config.get('template', None) - if self.template is None: - template_dirs = [os.path.join(os.path.dirname(__file__), 'templates')] - template_loader = FileSystemLoader(template_dirs) - template_env = SandboxedEnvironment(loader=template_loader) - self.template = template_env.get_template('numpydoc_docstring.rst') - - # string conversion routines - def _str_header(self, name, symbol='`'): - return ['.. rubric:: ' + name, ''] - - def _str_field_list(self, name): - return [':' + name + ':'] - - def _str_indent(self, doc, indent=4): - out = [] - for line in doc: - out += [' '*indent + line] - return out - - def _str_signature(self): - return [''] - if self['Signature']: - return ['``%s``' % self['Signature']] + [''] - else: - return [''] - - def _str_summary(self): - return self['Summary'] + [''] - - def _str_extended_summary(self): - return self['Extended Summary'] + [''] - - def _str_returns(self, name='Returns'): - if self.use_blockquotes: - typed_fmt = '**%s** : %s' - untyped_fmt = '**%s**' - else: - typed_fmt = '%s : %s' - untyped_fmt = '%s' - - out = [] - if self[name]: - out += self._str_field_list(name) - out += [''] - for param, param_type, desc in self[name]: - if param_type: - out += self._str_indent([typed_fmt % (param.strip(), - param_type)]) - else: - out += self._str_indent([untyped_fmt % param.strip()]) - if desc and self.use_blockquotes: - out += [''] - elif not desc: - desc = ['..'] - out += self._str_indent(desc, 8) - out += [''] - return out - - def _process_param(self, param, desc, fake_autosummary): - """Determine how to display a parameter - - Emulates autosummary behavior if fake_autosummary - - Parameters - ---------- - param : str - The name of the parameter - desc : list of str - The parameter description as given in the docstring. This is - ignored when autosummary logic applies. - fake_autosummary : bool - If True, autosummary-style behaviour will apply for params - that are attributes of the class and have a docstring. - - Returns - ------- - display_param : str - The marked up parameter name for display. This may include a link - to the corresponding attribute's own documentation. - desc : list of str - A list of description lines. This may be identical to the input - ``desc``, if ``autosum is None`` or ``param`` is not a class - attribute, or it will be a summary of the class attribute's - docstring. - - Notes - ----- - This does not have the autosummary functionality to display a method's - signature, and hence is not used to format methods. It may be - complicated to incorporate autosummary's signature mangling, as it - relies on Sphinx's plugin mechanism. - """ - param = param.strip() - display_param = ('**%s**' if self.use_blockquotes else '%s') % param - - if not fake_autosummary: - return display_param, desc - - param_obj = getattr(self._obj, param, None) - if not (callable(param_obj) - or isinstance(param_obj, property) - or inspect.isgetsetdescriptor(param_obj)): - param_obj = None - obj_doc = pydoc.getdoc(param_obj) - - if not (param_obj and obj_doc): - return display_param, desc - - prefix = getattr(self, '_name', '') - if prefix: - autosum_prefix = '~%s.' % prefix - link_prefix = '%s.' % prefix - else: - autosum_prefix = '' - link_prefix = '' - - # Referenced object has a docstring - display_param = ':obj:`%s <%s%s>`' % (param, - link_prefix, - param) - if obj_doc: - # Overwrite desc. Take summary logic of autosummary - desc = re.split('\n\s*\n', obj_doc.strip(), 1)[0] - # XXX: Should this have DOTALL? - # It does not in autosummary - m = re.search(r"^([A-Z].*?\.)(?:\s|$)", - ' '.join(desc.split())) - if m: - desc = m.group(1).strip() - else: - desc = desc.partition('\n')[0] - desc = desc.split('\n') - return display_param, desc - - def _str_param_list(self, name, fake_autosummary=False): - """Generate RST for a listing of parameters or similar - - Parameter names are displayed as bold text, and descriptions - are in blockquotes. Descriptions may therefore contain block - markup as well. - - Parameters - ---------- - name : str - Section name (e.g. Parameters) - fake_autosummary : bool - When True, the parameter names may correspond to attributes of the - object beign documented, usually ``property`` instances on a class. - In this case, names will be linked to fuller descriptions. - - Returns - ------- - rst : list of str - """ - out = [] - if self[name]: - out += self._str_field_list(name) - out += [''] - for param, param_type, desc in self[name]: - display_param, desc = self._process_param(param, desc, - fake_autosummary) - - if param_type: - out += self._str_indent(['%s : %s' % (display_param, - param_type)]) - else: - out += self._str_indent([display_param]) - if desc and self.use_blockquotes: - out += [''] - elif not desc: - # empty definition - desc = ['..'] - out += self._str_indent(desc, 8) - out += [''] - - return out - - @property - def _obj(self): - if hasattr(self, '_cls'): - return self._cls - elif hasattr(self, '_f'): - return self._f - return None - - def _str_member_list(self, name): - """ - Generate a member listing, autosummary:: table where possible, - and a table where not. - - """ - out = [] - if self[name]: - out += ['.. rubric:: %s' % name, ''] - prefix = getattr(self, '_name', '') - - if prefix: - prefix = '~%s.' % prefix - - autosum = [] - others = [] - for param, param_type, desc in self[name]: - param = param.strip() - - # Check if the referenced member can have a docstring or not - param_obj = getattr(self._obj, param, None) - if not (callable(param_obj) - or isinstance(param_obj, property) - or inspect.isdatadescriptor(param_obj)): - param_obj = None - - if param_obj and pydoc.getdoc(param_obj): - # Referenced object has a docstring - autosum += [" %s%s" % (prefix, param)] - else: - others.append((param, param_type, desc)) - - if autosum: - out += ['.. autosummary::'] - if self.class_members_toctree: - out += [' :toctree:'] - out += [''] + autosum - - if others: - maxlen_0 = max(3, max([len(x[0]) + 4 for x in others])) - hdr = sixu("=") * maxlen_0 + sixu(" ") + sixu("=") * 10 - fmt = sixu('%%%ds %%s ') % (maxlen_0,) - out += ['', '', hdr] - for param, param_type, desc in others: - desc = sixu(" ").join(x.strip() for x in desc).strip() - if param_type: - desc = "(%s) %s" % (param_type, desc) - out += [fmt % ("**" + param.strip() + "**", desc)] - out += [hdr] - out += [''] - return out - - def _str_section(self, name): - out = [] - if self[name]: - out += self._str_header(name) - content = textwrap.dedent("\n".join(self[name])).split("\n") - out += content - out += [''] - return out - - def _str_see_also(self, func_role): - out = [] - if self['See Also']: - see_also = super(SphinxDocString, self)._str_see_also(func_role) - out = ['.. seealso::', ''] - out += self._str_indent(see_also[2:]) - return out - - def _str_warnings(self): - out = [] - if self['Warnings']: - out = ['.. warning::', ''] - out += self._str_indent(self['Warnings']) - out += [''] - return out - - def _str_index(self): - idx = self['index'] - out = [] - if len(idx) == 0: - return out - - out += ['.. index:: %s' % idx.get('default', '')] - for section, references in idx.items(): - if section == 'default': - continue - elif section == 'refguide': - out += [' single: %s' % (', '.join(references))] - else: - out += [' %s: %s' % (section, ','.join(references))] - out += [''] - return out - - def _str_references(self): - out = [] - if self['References']: - out += self._str_header('References') - if isinstance(self['References'], str): - self['References'] = [self['References']] - out.extend(self['References']) - out += [''] - # Latex collects all references to a separate bibliography, - # so we need to insert links to it - if sphinx.__version__ >= "0.6": - out += ['.. only:: latex', ''] - else: - out += ['.. latexonly::', ''] - items = [] - for line in self['References']: - m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) - if m: - items.append(m.group(1)) - out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] - return out - - def _str_examples(self): - examples_str = "\n".join(self['Examples']) - - if (self.use_plots and re.search(IMPORT_MATPLOTLIB_RE, examples_str) - and 'plot::' not in examples_str): - out = [] - out += self._str_header('Examples') - out += ['.. plot::', ''] - out += self._str_indent(self['Examples']) - out += [''] - return out - else: - return self._str_section('Examples') - - def __str__(self, indent=0, func_role="obj"): - ns = { - 'signature': self._str_signature(), - 'index': self._str_index(), - 'summary': self._str_summary(), - 'extended_summary': self._str_extended_summary(), - 'parameters': self._str_param_list('Parameters'), - 'returns': self._str_returns('Returns'), - 'yields': self._str_returns('Yields'), - 'other_parameters': self._str_param_list('Other Parameters'), - 'raises': self._str_param_list('Raises'), - 'warns': self._str_param_list('Warns'), - 'warnings': self._str_warnings(), - 'see_also': self._str_see_also(func_role), - 'notes': self._str_section('Notes'), - 'references': self._str_references(), - 'examples': self._str_examples(), - 'attributes': - self._str_param_list('Attributes', fake_autosummary=True) - if self.attributes_as_param_list - else self._str_member_list('Attributes'), - 'methods': self._str_member_list('Methods'), - } - ns = dict((k, '\n'.join(v)) for k, v in ns.items()) - - rendered = self.template.render(**ns) - return '\n'.join(self._str_indent(rendered.split('\n'), indent)) - - -class SphinxFunctionDoc(SphinxDocString, FunctionDoc): - def __init__(self, obj, doc=None, config={}): - self.load_config(config) - FunctionDoc.__init__(self, obj, doc=doc, config=config) - - -class SphinxClassDoc(SphinxDocString, ClassDoc): - def __init__(self, obj, doc=None, func_doc=None, config={}): - self.load_config(config) - ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) - - -class SphinxObjDoc(SphinxDocString): - def __init__(self, obj, doc=None, config={}): - self._f = obj - self.load_config(config) - SphinxDocString.__init__(self, doc, config=config) - - -def get_doc_object(obj, what=None, doc=None, config={}, builder=None): - if what is None: - if inspect.isclass(obj): - what = 'class' - elif inspect.ismodule(obj): - what = 'module' - elif isinstance(obj, collections.Callable): - what = 'function' - else: - what = 'object' - - template_dirs = [os.path.join(os.path.dirname(__file__), 'templates')] - if builder is not None: - template_loader = BuiltinTemplateLoader() - template_loader.init(builder, dirs=template_dirs) - else: - template_loader = FileSystemLoader(template_dirs) - template_env = SandboxedEnvironment(loader=template_loader) - config['template'] = template_env.get_template('numpydoc_docstring.rst') - - if what == 'class': - return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, - config=config) - elif what in ('function', 'method'): - return SphinxFunctionDoc(obj, doc=doc, config=config) - else: - if doc is None: - doc = pydoc.getdoc(obj) - return SphinxObjDoc(obj, doc, config=config) diff --git a/doc/sphinxext/numpydoc/numpydoc.py b/doc/sphinxext/numpydoc/numpydoc.py deleted file mode 100644 index dc20b3f828eb2..0000000000000 --- a/doc/sphinxext/numpydoc/numpydoc.py +++ /dev/null @@ -1,288 +0,0 @@ -""" -======== -numpydoc -======== - -Sphinx extension that handles docstrings in the Numpy standard format. [1] - -It will: - -- Convert Parameters etc. sections to field lists. -- Convert See Also section to a See also entry. -- Renumber references. -- Extract the signature from the docstring, if it can't be determined - otherwise. - -.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt - -""" -from __future__ import division, absolute_import, print_function - -import sys -import re -import pydoc -import sphinx -import inspect -import collections - -if sphinx.__version__ < '1.0.1': - raise RuntimeError("Sphinx 1.0.1 or newer is required") - -from .docscrape_sphinx import get_doc_object, SphinxDocString -from . import __version__ - -if sys.version_info[0] >= 3: - sixu = lambda s: s -else: - sixu = lambda s: unicode(s, 'unicode_escape') - - -def rename_references(app, what, name, obj, options, lines, - reference_offset=[0]): - # replace reference numbers so that there are no duplicates - references = set() - for line in lines: - line = line.strip() - m = re.match(sixu('^.. \\[(%s)\\]') % app.config.numpydoc_citation_re, - line, re.I) - if m: - references.add(m.group(1)) - - if references: - for r in references: - if r.isdigit(): - new_r = sixu("R%d") % (reference_offset[0] + int(r)) - else: - new_r = sixu("%s%d") % (r, reference_offset[0]) - - for i, line in enumerate(lines): - lines[i] = lines[i].replace(sixu('[%s]_') % r, - sixu('[%s]_') % new_r) - lines[i] = lines[i].replace(sixu('.. [%s]') % r, - sixu('.. [%s]') % new_r) - - reference_offset[0] += len(references) - - -DEDUPLICATION_TAG = ' !! processed by numpydoc !!' - - -def mangle_docstrings(app, what, name, obj, options, lines): - if DEDUPLICATION_TAG in lines: - return - - cfg = {'use_plots': app.config.numpydoc_use_plots, - 'use_blockquotes': app.config.numpydoc_use_blockquotes, - 'show_class_members': app.config.numpydoc_show_class_members, - 'show_inherited_class_members': - app.config.numpydoc_show_inherited_class_members, - 'class_members_toctree': app.config.numpydoc_class_members_toctree, - 'attributes_as_param_list': - app.config.numpydoc_attributes_as_param_list} - - u_NL = sixu('\n') - if what == 'module': - # Strip top title - pattern = '^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*' - title_re = re.compile(sixu(pattern), re.I | re.S) - lines[:] = title_re.sub(sixu(''), u_NL.join(lines)).split(u_NL) - else: - doc = get_doc_object(obj, what, u_NL.join(lines), config=cfg, - builder=app.builder) - if sys.version_info[0] >= 3: - doc = str(doc) - else: - doc = unicode(doc) - lines[:] = doc.split(u_NL) - - if (app.config.numpydoc_edit_link and hasattr(obj, '__name__') and - obj.__name__): - if hasattr(obj, '__module__'): - v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__)) - else: - v = dict(full_name=obj.__name__) - lines += [sixu(''), sixu('.. htmlonly::'), sixu('')] - lines += [sixu(' %s') % x for x in - (app.config.numpydoc_edit_link % v).split("\n")] - - # call function to replace reference numbers so that there are no - # duplicates - rename_references(app, what, name, obj, options, lines) - - lines += ['..', DEDUPLICATION_TAG] - - -def mangle_signature(app, what, name, obj, options, sig, retann): - # Do not try to inspect classes that don't define `__init__` - if (inspect.isclass(obj) and - (not hasattr(obj, '__init__') or - 'initializes x; see ' in pydoc.getdoc(obj.__init__))): - return '', '' - - if not (isinstance(obj, collections.Callable) or - hasattr(obj, '__argspec_is_invalid_')): - return - - if not hasattr(obj, '__doc__'): - return - doc = SphinxDocString(pydoc.getdoc(obj)) - sig = doc['Signature'] or getattr(obj, '__text_signature__', None) - if sig: - sig = re.sub(sixu("^[^(]*"), sixu(""), sig) - return sig, sixu('') - - -def setup(app, get_doc_object_=get_doc_object): - if not hasattr(app, 'add_config_value'): - return # probably called by nose, better bail out - - global get_doc_object - get_doc_object = get_doc_object_ - - app.connect('autodoc-process-docstring', mangle_docstrings) - app.connect('autodoc-process-signature', mangle_signature) - app.add_config_value('numpydoc_edit_link', None, False) - app.add_config_value('numpydoc_use_plots', None, False) - app.add_config_value('numpydoc_use_blockquotes', None, False) - app.add_config_value('numpydoc_show_class_members', True, True) - app.add_config_value('numpydoc_show_inherited_class_members', True, True) - app.add_config_value('numpydoc_class_members_toctree', True, True) - app.add_config_value('numpydoc_citation_re', '[a-z0-9_.-]+', True) - app.add_config_value('numpydoc_attributes_as_param_list', True, True) - - # Extra mangling domains - app.add_domain(NumpyPythonDomain) - app.add_domain(NumpyCDomain) - - app.setup_extension('sphinx.ext.autosummary') - - metadata = {'version': __version__, - 'parallel_read_safe': True} - return metadata - -# ------------------------------------------------------------------------------ -# Docstring-mangling domains -# ------------------------------------------------------------------------------ - -from docutils.statemachine import ViewList -from sphinx.domains.c import CDomain -from sphinx.domains.python import PythonDomain - - -class ManglingDomainBase(object): - directive_mangling_map = {} - - def __init__(self, *a, **kw): - super(ManglingDomainBase, self).__init__(*a, **kw) - self.wrap_mangling_directives() - - def wrap_mangling_directives(self): - for name, objtype in list(self.directive_mangling_map.items()): - self.directives[name] = wrap_mangling_directive( - self.directives[name], objtype) - - -class NumpyPythonDomain(ManglingDomainBase, PythonDomain): - name = 'np' - directive_mangling_map = { - 'function': 'function', - 'class': 'class', - 'exception': 'class', - 'method': 'function', - 'classmethod': 'function', - 'staticmethod': 'function', - 'attribute': 'attribute', - } - indices = [] - - -class NumpyCDomain(ManglingDomainBase, CDomain): - name = 'np-c' - directive_mangling_map = { - 'function': 'function', - 'member': 'attribute', - 'macro': 'function', - 'type': 'class', - 'var': 'object', - } - - -def match_items(lines, content_old): - """Create items for mangled lines. - - This function tries to match the lines in ``lines`` with the items (source - file references and line numbers) in ``content_old``. The - ``mangle_docstrings`` function changes the actual docstrings, but doesn't - keep track of where each line came from. The manging does many operations - on the original lines, which are hard to track afterwards. - - Many of the line changes come from deleting or inserting blank lines. This - function tries to match lines by ignoring blank lines. All other changes - (such as inserting figures or changes in the references) are completely - ignored, so the generated line numbers will be off if ``mangle_docstrings`` - does anything non-trivial. - - This is a best-effort function and the real fix would be to make - ``mangle_docstrings`` actually keep track of the ``items`` together with - the ``lines``. - - Examples - -------- - >>> lines = ['', 'A', '', 'B', ' ', '', 'C', 'D'] - >>> lines_old = ['a', '', '', 'b', '', 'c'] - >>> items_old = [('file1.py', 0), ('file1.py', 1), ('file1.py', 2), - ... ('file2.py', 0), ('file2.py', 1), ('file2.py', 2)] - >>> content_old = ViewList(lines_old, items=items_old) - >>> match_items(lines, content_old) # doctest: +NORMALIZE_WHITESPACE - [('file1.py', 0), ('file1.py', 0), ('file2.py', 0), ('file2.py', 0), - ('file2.py', 2), ('file2.py', 2), ('file2.py', 2), ('file2.py', 2)] - >>> # first 2 ``lines`` are matched to 'a', second 2 to 'b', rest to 'c' - >>> # actual content is completely ignored. - - Notes - ----- - The algorithm tries to match any line in ``lines`` with one in - ``lines_old``. It skips over all empty lines in ``lines_old`` and assigns - this line number to all lines in ``lines``, unless a non-empty line is - found in ``lines`` in which case it goes to the next line in ``lines_old``. - - """ - items_new = [] - lines_old = content_old.data - items_old = content_old.items - j = 0 - for i, line in enumerate(lines): - # go to next non-empty line in old: - # line.strip() checks whether the string is all whitespace - while j < len(lines_old) - 1 and not lines_old[j].strip(): - j += 1 - items_new.append(items_old[j]) - if line.strip() and j < len(lines_old) - 1: - j += 1 - assert(len(items_new) == len(lines)) - return items_new - - -def wrap_mangling_directive(base_directive, objtype): - class directive(base_directive): - def run(self): - env = self.state.document.settings.env - - name = None - if self.arguments: - m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) - name = m.group(2).strip() - - if not name: - name = self.arguments[0] - - lines = list(self.content) - mangle_docstrings(env.app, objtype, name, None, None, lines) - if self.content: - items = match_items(lines, self.content) - self.content = ViewList(lines, items=items, - parent=self.content.parent) - - return base_directive.run(self) - - return directive diff --git a/doc/sphinxext/numpydoc/templates/numpydoc_docstring.rst b/doc/sphinxext/numpydoc/templates/numpydoc_docstring.rst deleted file mode 100644 index 1900db53cee47..0000000000000 --- a/doc/sphinxext/numpydoc/templates/numpydoc_docstring.rst +++ /dev/null @@ -1,16 +0,0 @@ -{{index}} -{{summary}} -{{extended_summary}} -{{parameters}} -{{returns}} -{{yields}} -{{other_parameters}} -{{raises}} -{{warns}} -{{warnings}} -{{see_also}} -{{notes}} -{{references}} -{{examples}} -{{attributes}} -{{methods}} diff --git a/doc/sphinxext/numpydoc/tests/test_docscrape.py b/doc/sphinxext/numpydoc/tests/test_docscrape.py deleted file mode 100644 index 2fb4eb5ab277e..0000000000000 --- a/doc/sphinxext/numpydoc/tests/test_docscrape.py +++ /dev/null @@ -1,1204 +0,0 @@ -# -*- encoding:utf-8 -*- -from __future__ import division, absolute_import, print_function - -import re -import sys -import textwrap -import warnings - -import jinja2 - -from numpydoc.docscrape import ( - NumpyDocString, - FunctionDoc, - ClassDoc, - ParseError -) -from numpydoc.docscrape_sphinx import (SphinxDocString, SphinxClassDoc, - SphinxFunctionDoc) -from nose.tools import (assert_equal, assert_raises, assert_list_equal, - assert_true) - -assert_list_equal.__self__.maxDiff = None - -if sys.version_info[0] >= 3: - sixu = lambda s: s -else: - sixu = lambda s: unicode(s, 'unicode_escape') - - -doc_txt = '''\ - numpy.multivariate_normal(mean, cov, shape=None, spam=None) - - Draw values from a multivariate normal distribution with specified - mean and covariance. - - The multivariate normal or Gaussian distribution is a generalisation - of the one-dimensional normal distribution to higher dimensions. - - Parameters - ---------- - mean : (N,) ndarray - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - - cov : (N, N) ndarray - Covariance matrix of the distribution. - shape : tuple of ints - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - - Returns - ------- - out : ndarray - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - list of str - This is not a real return value. It exists to test - anonymous return values. - no_description - - Other Parameters - ---------------- - spam : parrot - A parrot off its mortal coil. - - Raises - ------ - RuntimeError - Some error - - Warns - ----- - RuntimeWarning - Some warning - - Warnings - -------- - Certain warnings apply. - - Notes - ----- - Instead of specifying the full covariance matrix, popular - approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - - This geometrical property can be seen in two dimensions by plotting - generated data-points: - - >>> mean = [0,0] - >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - - >>> x,y = multivariate_normal(mean,cov,5000).T - >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - - Note that the covariance matrix must be symmetric and non-negative - definite. - - References - ---------- - .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 - .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - - See Also - -------- - some, other, funcs - otherfunc : relationship - - Examples - -------- - >>> mean = (1,2) - >>> cov = [[1,0],[1,0]] - >>> x = multivariate_normal(mean,cov,(3,3)) - >>> print x.shape - (3, 3, 2) - - The following is probably true, given that 0.6 is roughly twice the - standard deviation: - - >>> print list( (x[0,0,:] - mean) < 0.6 ) - [True, True] - - .. index:: random - :refguide: random;distributions, random;gauss - - ''' -doc = NumpyDocString(doc_txt) - -doc_yields_txt = """ -Test generator - -Yields ------- -a : int - The number of apples. -b : int - The number of bananas. -int - The number of unknowns. -""" -doc_yields = NumpyDocString(doc_yields_txt) - - -def test_signature(): - assert doc['Signature'].startswith('numpy.multivariate_normal(') - assert doc['Signature'].endswith('spam=None)') - - -def test_summary(): - assert doc['Summary'][0].startswith('Draw values') - assert doc['Summary'][-1].endswith('covariance.') - - -def test_extended_summary(): - assert doc['Extended Summary'][0].startswith('The multivariate normal') - - -def test_parameters(): - assert_equal(len(doc['Parameters']), 3) - assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape']) - - arg, arg_type, desc = doc['Parameters'][1] - assert_equal(arg_type, '(N, N) ndarray') - assert desc[0].startswith('Covariance matrix') - assert doc['Parameters'][0][-1][-1] == ' (1+2+3)/3' - - -def test_other_parameters(): - assert_equal(len(doc['Other Parameters']), 1) - assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam']) - arg, arg_type, desc = doc['Other Parameters'][0] - assert_equal(arg_type, 'parrot') - assert desc[0].startswith('A parrot off its mortal coil') - - -def test_returns(): - assert_equal(len(doc['Returns']), 3) - arg, arg_type, desc = doc['Returns'][0] - assert_equal(arg, 'out') - assert_equal(arg_type, 'ndarray') - assert desc[0].startswith('The drawn samples') - assert desc[-1].endswith('distribution.') - - arg, arg_type, desc = doc['Returns'][1] - assert_equal(arg, 'list of str') - assert_equal(arg_type, '') - assert desc[0].startswith('This is not a real') - assert desc[-1].endswith('anonymous return values.') - - arg, arg_type, desc = doc['Returns'][2] - assert_equal(arg, 'no_description') - assert_equal(arg_type, '') - assert not ''.join(desc).strip() - - -def test_yields(): - section = doc_yields['Yields'] - assert_equal(len(section), 3) - truth = [('a', 'int', 'apples.'), - ('b', 'int', 'bananas.'), - ('int', '', 'unknowns.')] - for (arg, arg_type, desc), (arg_, arg_type_, end) in zip(section, truth): - assert_equal(arg, arg_) - assert_equal(arg_type, arg_type_) - assert desc[0].startswith('The number of') - assert desc[0].endswith(end) - - -def test_returnyield(): - doc_text = """ -Test having returns and yields. - -Returns -------- -int - The number of apples. - -Yields ------- -a : int - The number of apples. -b : int - The number of bananas. - -""" - assert_raises(ValueError, NumpyDocString, doc_text) - - -def test_section_twice(): - doc_text = """ -Test having a section Notes twice - -Notes ------ -See the next note for more information - -Notes ------ -That should break... -""" - assert_raises(ValueError, NumpyDocString, doc_text) - - # if we have a numpydoc object, we know where the error came from - class Dummy(object): - """ - Dummy class. - - Notes - ----- - First note. - - Notes - ----- - Second note. - - """ - def spam(self, a, b): - """Spam\n\nSpam spam.""" - pass - - def ham(self, c, d): - """Cheese\n\nNo cheese.""" - pass - - def dummy_func(arg): - """ - Dummy function. - - Notes - ----- - First note. - - Notes - ----- - Second note. - """ - - try: - SphinxClassDoc(Dummy) - except ValueError as e: - # python 3 version or python 2 version - assert_true("test_section_twice.<locals>.Dummy" in str(e) - or 'test_docscrape.Dummy' in str(e)) - - try: - SphinxFunctionDoc(dummy_func) - except ValueError as e: - # python 3 version or python 2 version - assert_true("test_section_twice.<locals>.dummy_func" in str(e) - or 'function dummy_func' in str(e)) - - -def test_notes(): - assert doc['Notes'][0].startswith('Instead') - assert doc['Notes'][-1].endswith('definite.') - assert_equal(len(doc['Notes']), 17) - - -def test_references(): - assert doc['References'][0].startswith('..') - assert doc['References'][-1].endswith('2001.') - - -def test_examples(): - assert doc['Examples'][0].startswith('>>>') - assert doc['Examples'][-1].endswith('True]') - - -def test_index(): - assert_equal(doc['index']['default'], 'random') - assert_equal(len(doc['index']), 2) - assert_equal(len(doc['index']['refguide']), 2) - - -def _strip_blank_lines(s): - "Remove leading, trailing and multiple blank lines" - s = re.sub(r'^\s*\n', '', s) - s = re.sub(r'\n\s*$', '', s) - s = re.sub(r'\n\s*\n', r'\n\n', s) - return s - - -def line_by_line_compare(a, b): - a = textwrap.dedent(a) - b = textwrap.dedent(b) - a = [l.rstrip() for l in _strip_blank_lines(a).split('\n')] - b = [l.rstrip() for l in _strip_blank_lines(b).split('\n')] - assert_list_equal(a, b) - - -def test_str(): - # doc_txt has the order of Notes and See Also sections flipped. - # This should be handled automatically, and so, one thing this test does - # is to make sure that See Also precedes Notes in the output. - line_by_line_compare(str(doc), -"""numpy.multivariate_normal(mean, cov, shape=None, spam=None) - -Draw values from a multivariate normal distribution with specified -mean and covariance. - -The multivariate normal or Gaussian distribution is a generalisation -of the one-dimensional normal distribution to higher dimensions. - -Parameters ----------- -mean : (N,) ndarray - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 -cov : (N, N) ndarray - Covariance matrix of the distribution. -shape : tuple of ints - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - -Returns -------- -out : ndarray - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. -list of str - This is not a real return value. It exists to test - anonymous return values. -no_description - -Other Parameters ----------------- -spam : parrot - A parrot off its mortal coil. - -Raises ------- -RuntimeError - Some error - -Warns ------ -RuntimeWarning - Some warning - -Warnings --------- -Certain warnings apply. - -See Also --------- - -`some`_, `other`_, `funcs`_ - -`otherfunc`_ - relationship - -Notes ------ -Instead of specifying the full covariance matrix, popular -approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - -This geometrical property can be seen in two dimensions by plotting -generated data-points: - ->>> mean = [0,0] ->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - ->>> x,y = multivariate_normal(mean,cov,5000).T ->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - -Note that the covariance matrix must be symmetric and non-negative -definite. - -References ----------- -.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 -.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - -Examples --------- ->>> mean = (1,2) ->>> cov = [[1,0],[1,0]] ->>> x = multivariate_normal(mean,cov,(3,3)) ->>> print x.shape -(3, 3, 2) - -The following is probably true, given that 0.6 is roughly twice the -standard deviation: - ->>> print list( (x[0,0,:] - mean) < 0.6 ) -[True, True] - -.. index:: random - :refguide: random;distributions, random;gauss""") - - -def test_yield_str(): - line_by_line_compare(str(doc_yields), -"""Test generator - -Yields ------- -a : int - The number of apples. -b : int - The number of bananas. -int - The number of unknowns. - -.. index:: """) - - -def test_sphinx_str(): - sphinx_doc = SphinxDocString(doc_txt) - line_by_line_compare(str(sphinx_doc), -""" -.. index:: random - single: random;distributions, random;gauss - -Draw values from a multivariate normal distribution with specified -mean and covariance. - -The multivariate normal or Gaussian distribution is a generalisation -of the one-dimensional normal distribution to higher dimensions. - -:Parameters: - - mean : (N,) ndarray - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - - cov : (N, N) ndarray - Covariance matrix of the distribution. - - shape : tuple of ints - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - -:Returns: - - out : ndarray - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - - list of str - This is not a real return value. It exists to test - anonymous return values. - - no_description - .. - -:Other Parameters: - - spam : parrot - A parrot off its mortal coil. - -:Raises: - - RuntimeError - Some error - -:Warns: - - RuntimeWarning - Some warning - -.. warning:: - - Certain warnings apply. - -.. seealso:: - - :obj:`some`, :obj:`other`, :obj:`funcs` - - :obj:`otherfunc` - relationship - -.. rubric:: Notes - -Instead of specifying the full covariance matrix, popular -approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - -This geometrical property can be seen in two dimensions by plotting -generated data-points: - ->>> mean = [0,0] ->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - ->>> x,y = multivariate_normal(mean,cov,5000).T ->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - -Note that the covariance matrix must be symmetric and non-negative -definite. - -.. rubric:: References - -.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 -.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - -.. only:: latex - - [1]_, [2]_ - -.. rubric:: Examples - ->>> mean = (1,2) ->>> cov = [[1,0],[1,0]] ->>> x = multivariate_normal(mean,cov,(3,3)) ->>> print x.shape -(3, 3, 2) - -The following is probably true, given that 0.6 is roughly twice the -standard deviation: - ->>> print list( (x[0,0,:] - mean) < 0.6 ) -[True, True] -""") - - -def test_sphinx_yields_str(): - sphinx_doc = SphinxDocString(doc_yields_txt) - line_by_line_compare(str(sphinx_doc), -"""Test generator - -:Yields: - - a : int - The number of apples. - - b : int - The number of bananas. - - int - The number of unknowns. -""") - - -doc2 = NumpyDocString(""" - Returns array of indices of the maximum values of along the given axis. - - Parameters - ---------- - a : {array_like} - Array to look in. - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis""") - - -def test_parameters_without_extended_description(): - assert_equal(len(doc2['Parameters']), 2) - - -doc3 = NumpyDocString(""" - my_signature(*params, **kwds) - - Return this and that. - """) - - -def test_escape_stars(): - signature = str(doc3).split('\n')[0] - assert_equal(signature, 'my_signature(\*params, \*\*kwds)') - - def my_func(a, b, **kwargs): - pass - - fdoc = FunctionDoc(func=my_func) - assert_equal(fdoc['Signature'], 'my_func(a, b, \*\*kwargs)') - - -doc4 = NumpyDocString( - """a.conj() - - Return an array with all complex-valued elements conjugated.""") - - -def test_empty_extended_summary(): - assert_equal(doc4['Extended Summary'], []) - - -doc5 = NumpyDocString( - """ - a.something() - - Raises - ------ - LinAlgException - If array is singular. - - Warns - ----- - SomeWarning - If needed - """) - - -def test_raises(): - assert_equal(len(doc5['Raises']), 1) - name,_,desc = doc5['Raises'][0] - assert_equal(name,'LinAlgException') - assert_equal(desc,['If array is singular.']) - - -def test_warns(): - assert_equal(len(doc5['Warns']), 1) - name,_,desc = doc5['Warns'][0] - assert_equal(name,'SomeWarning') - assert_equal(desc,['If needed']) - - -def test_see_also(): - doc6 = NumpyDocString( - """ - z(x,theta) - - See Also - -------- - func_a, func_b, func_c - func_d : some equivalent func - foo.func_e : some other func over - multiple lines - func_f, func_g, :meth:`func_h`, func_j, - func_k - :obj:`baz.obj_q` - :obj:`~baz.obj_r` - :class:`class_j`: fubar - foobar - """) - - assert len(doc6['See Also']) == 13 - for func, desc, role in doc6['See Also']: - if func in ('func_a', 'func_b', 'func_c', 'func_f', - 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q', - '~baz.obj_r'): - assert(not desc) - else: - assert(desc) - - if func == 'func_h': - assert role == 'meth' - elif func == 'baz.obj_q' or func == '~baz.obj_r': - assert role == 'obj' - elif func == 'class_j': - assert role == 'class' - else: - assert role is None - - if func == 'func_d': - assert desc == ['some equivalent func'] - elif func == 'foo.func_e': - assert desc == ['some other func over', 'multiple lines'] - elif func == 'class_j': - assert desc == ['fubar', 'foobar'] - - -def test_see_also_parse_error(): - text = ( - """ - z(x,theta) - - See Also - -------- - :func:`~foo` - """) - with assert_raises(ParseError) as err: - NumpyDocString(text) - assert_equal( - str(r":func:`~foo` is not a item name in '\n z(x,theta)\n\n See Also\n --------\n :func:`~foo`\n '"), - str(err.exception) - ) - -def test_see_also_print(): - class Dummy(object): - """ - See Also - -------- - func_a, func_b - func_c : some relationship - goes here - func_d - """ - pass - - obj = Dummy() - s = str(FunctionDoc(obj, role='func')) - assert(':func:`func_a`, :func:`func_b`' in s) - assert(' some relationship' in s) - assert(':func:`func_d`' in s) - - -def test_unknown_section(): - doc_text = """ -Test having an unknown section - -Mope ----- -This should be ignored and warned about -""" - - class BadSection(object): - """Class with bad section. - - Nope - ---- - This class has a nope section. - """ - pass - - with warnings.catch_warnings(record=True) as w: - NumpyDocString(doc_text) - assert len(w) == 1 - assert "Unknown section Mope" == str(w[0].message) - - with warnings.catch_warnings(record=True) as w: - SphinxClassDoc(BadSection) - assert len(w) == 1 - assert_true('test_docscrape.test_unknown_section.<locals>.BadSection' - in str(w[0].message) - or 'test_docscrape.BadSection' in str(w[0].message)) - - -doc7 = NumpyDocString(""" - - Doc starts on second line. - - """) - - -def test_empty_first_line(): - assert doc7['Summary'][0].startswith('Doc starts') - - -def test_no_summary(): - str(SphinxDocString(""" - Parameters - ----------""")) - - -def test_unicode(): - doc = SphinxDocString(""" - öäöäöäöäöåååå - - öäöäöäööäååå - - Parameters - ---------- - ååå : äää - ööö - - Returns - ------- - ååå : ööö - äää - - """) - assert isinstance(doc['Summary'][0], str) - assert doc['Summary'][0] == 'öäöäöäöäöåååå' - - -def test_plot_examples(): - cfg = dict(use_plots=True) - - doc = SphinxDocString(""" - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> plt.plot([1,2,3],[4,5,6]) - >>> plt.show() - """, config=cfg) - assert 'plot::' in str(doc), str(doc) - - doc = SphinxDocString(""" - Examples - -------- - >>> from matplotlib import pyplot as plt - >>> plt.plot([1,2,3],[4,5,6]) - >>> plt.show() - """, config=cfg) - assert 'plot::' in str(doc), str(doc) - - doc = SphinxDocString(""" - Examples - -------- - .. plot:: - - import matplotlib.pyplot as plt - plt.plot([1,2,3],[4,5,6]) - plt.show() - """, config=cfg) - assert str(doc).count('plot::') == 1, str(doc) - - -def test_use_blockquotes(): - cfg = dict(use_blockquotes=True) - doc = SphinxDocString(""" - Parameters - ---------- - abc : def - ghi - jkl - mno - - Returns - ------- - ABC : DEF - GHI - JKL - MNO - """, config=cfg) - line_by_line_compare(str(doc), ''' - :Parameters: - - **abc** : def - - ghi - - **jkl** - - mno - - :Returns: - - **ABC** : DEF - - GHI - - **JKL** - - MNO - ''') - - -def test_class_members(): - - class Dummy(object): - """ - Dummy class. - - """ - def spam(self, a, b): - """Spam\n\nSpam spam.""" - pass - def ham(self, c, d): - """Cheese\n\nNo cheese.""" - pass - @property - def spammity(self): - """Spammity index""" - return 0.95 - - class Ignorable(object): - """local class, to be ignored""" - pass - - for cls in (ClassDoc, SphinxClassDoc): - doc = cls(Dummy, config=dict(show_class_members=False)) - assert 'Methods' not in str(doc), (cls, str(doc)) - assert 'spam' not in str(doc), (cls, str(doc)) - assert 'ham' not in str(doc), (cls, str(doc)) - assert 'spammity' not in str(doc), (cls, str(doc)) - assert 'Spammity index' not in str(doc), (cls, str(doc)) - - doc = cls(Dummy, config=dict(show_class_members=True)) - assert 'Methods' in str(doc), (cls, str(doc)) - assert 'spam' in str(doc), (cls, str(doc)) - assert 'ham' in str(doc), (cls, str(doc)) - assert 'spammity' in str(doc), (cls, str(doc)) - - if cls is SphinxClassDoc: - assert '.. autosummary::' in str(doc), str(doc) - else: - assert 'Spammity index' in str(doc), str(doc) - - class SubDummy(Dummy): - """ - Subclass of Dummy class. - - """ - def ham(self, c, d): - """Cheese\n\nNo cheese.\nOverloaded Dummy.ham""" - pass - - def bar(self, a, b): - """Bar\n\nNo bar""" - pass - - for cls in (ClassDoc, SphinxClassDoc): - doc = cls(SubDummy, config=dict(show_class_members=True, - show_inherited_class_members=False)) - assert 'Methods' in str(doc), (cls, str(doc)) - assert 'spam' not in str(doc), (cls, str(doc)) - assert 'ham' in str(doc), (cls, str(doc)) - assert 'bar' in str(doc), (cls, str(doc)) - assert 'spammity' not in str(doc), (cls, str(doc)) - - if cls is SphinxClassDoc: - assert '.. autosummary::' in str(doc), str(doc) - else: - assert 'Spammity index' not in str(doc), str(doc) - - doc = cls(SubDummy, config=dict(show_class_members=True, - show_inherited_class_members=True)) - assert 'Methods' in str(doc), (cls, str(doc)) - assert 'spam' in str(doc), (cls, str(doc)) - assert 'ham' in str(doc), (cls, str(doc)) - assert 'bar' in str(doc), (cls, str(doc)) - assert 'spammity' in str(doc), (cls, str(doc)) - - if cls is SphinxClassDoc: - assert '.. autosummary::' in str(doc), str(doc) - else: - assert 'Spammity index' in str(doc), str(doc) - - -def test_duplicate_signature(): - # Duplicate function signatures occur e.g. in ufuncs, when the - # automatic mechanism adds one, and a more detailed comes from the - # docstring itself. - - doc = NumpyDocString( - """ - z(x1, x2) - - z(a, theta) - """) - - assert doc['Signature'].strip() == 'z(a, theta)' - - -class_doc_txt = """ - Foo - - Parameters - ---------- - f : callable ``f(t, y, *f_args)`` - Aaa. - jac : callable ``jac(t, y, *jac_args)`` - - Bbb. - - Attributes - ---------- - t : float - Current time. - y : ndarray - Current variable values. - - * hello - * world - an_attribute : float - The docstring is printed instead - no_docstring : str - But a description - no_docstring2 : str - multiline_sentence - midword_period - no_period - - Methods - ------- - a - b - c - - Examples - -------- - For usage examples, see `ode`. -""" - - -def test_class_members_doc(): - doc = ClassDoc(None, class_doc_txt) - line_by_line_compare(str(doc), - """ - Foo - - Parameters - ---------- - f : callable ``f(t, y, *f_args)`` - Aaa. - jac : callable ``jac(t, y, *jac_args)`` - Bbb. - - Examples - -------- - For usage examples, see `ode`. - - Attributes - ---------- - t : float - Current time. - y : ndarray - Current variable values. - - * hello - * world - an_attribute : float - The docstring is printed instead - no_docstring : str - But a description - no_docstring2 : str - multiline_sentence - midword_period - no_period - - Methods - ------- - a - b - c - - .. index:: - - """) - - -def test_class_members_doc_sphinx(): - class Foo: - @property - def an_attribute(self): - """Test attribute""" - return None - - @property - def no_docstring(self): - return None - - @property - def no_docstring2(self): - return None - - @property - def multiline_sentence(self): - """This is a - sentence. It spans multiple lines.""" - return None - - @property - def midword_period(self): - """The sentence for numpy.org.""" - return None - - @property - def no_period(self): - """This does not have a period - so we truncate its summary to the first linebreak - - Apparently. - """ - return None - - doc = SphinxClassDoc(Foo, class_doc_txt) - line_by_line_compare(str(doc), - """ - Foo - - :Parameters: - - f : callable ``f(t, y, *f_args)`` - Aaa. - - jac : callable ``jac(t, y, *jac_args)`` - Bbb. - - .. rubric:: Examples - - For usage examples, see `ode`. - - :Attributes: - - t : float - Current time. - - y : ndarray - Current variable values. - - * hello - * world - - :obj:`an_attribute <an_attribute>` : float - Test attribute - - no_docstring : str - But a description - - no_docstring2 : str - .. - - :obj:`multiline_sentence <multiline_sentence>` - This is a sentence. - - :obj:`midword_period <midword_period>` - The sentence for numpy.org. - - :obj:`no_period <no_period>` - This does not have a period - - .. rubric:: Methods - - ===== ========== - **a** - **b** - **c** - ===== ========== - - """) - - -def test_templated_sections(): - doc = SphinxClassDoc(None, class_doc_txt, - config={'template': jinja2.Template('{{examples}}\n{{parameters}}')}) - line_by_line_compare(str(doc), - """ - .. rubric:: Examples - - For usage examples, see `ode`. - - :Parameters: - - f : callable ``f(t, y, *f_args)`` - Aaa. - - jac : callable ``jac(t, y, *jac_args)`` - Bbb. - - """) - - -if __name__ == "__main__": - import nose - nose.run() diff --git a/environment.yml b/environment.yml index e31511e5b8afe..a2342d37dd6a4 100644 --- a/environment.yml +++ b/environment.yml @@ -22,6 +22,7 @@ dependencies: - pytest>=4.0 - sphinx - sphinxcontrib-spelling + - numpydoc # optional - beautifulsoup4>=4.2.1 diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 41e3f4581587e..61bf73cbc280f 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -531,7 +531,7 @@ def _get_op_name(op, special): for missing data in one of the inputs. With reverse version, `{reverse}`. Among flexible wrappers (`add`, `sub`, `mul`, `div`, `mod`, `pow`) to -arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**. +arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`. Parameters ---------- diff --git a/requirements-dev.txt b/requirements-dev.txt index facadf384f770..dfaf280d65f1c 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -13,6 +13,7 @@ moto pytest>=4.0 sphinx sphinxcontrib-spelling +numpydoc beautifulsoup4>=4.2.1 blosc bottleneck>=1.2.0
- Replace custom numpydoc in `doc/sphinxext/numpydoc` with official numpydoc release - Remove `numpydoc_use_blockquotes` parameter
https://api.github.com/repos/pandas-dev/pandas/pulls/24098
2018-12-04T19:23:02Z
2018-12-15T16:39:44Z
2018-12-15T16:39:44Z
2018-12-17T13:55:32Z
BUG: fix mutation of DTI backing Series/DataFrame
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 198e832ca4603..9c2d4cd5729d2 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -2923,7 +2923,9 @@ def _try_coerce_result(self, result): # allow passing of > 1dim if its trivial if result.ndim > 1: result = result.reshape(np.prod(result.shape)) - result = self.values._shallow_copy(result) + + # GH#24096 new values invalidates a frequency + result = self.values._shallow_copy(result, freq=None) return result diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index 5859dc9e858b7..910690a986c1c 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -196,6 +196,14 @@ def init_dict(data, index, columns, dtype=None): arrays.loc[missing] = [v] * missing.sum() else: + + for key in data: + if (isinstance(data[key], ABCDatetimeIndex) and + data[key].tz is not None): + # GH#24096 need copy to be deep for datetime64tz case + # TODO: See if we can avoid these copies + data[key] = data[key].copy(deep=True) + keys = com.dict_keys_to_ordered_list(data) columns = data_names = Index(keys) arrays = [data[k] for k in keys] diff --git a/pandas/core/series.py b/pandas/core/series.py index 6b4c9927ef0f1..6f5ab43ff6756 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -21,7 +21,7 @@ is_extension_array_dtype, is_extension_type, is_hashable, is_integer, is_iterator, is_list_like, is_scalar, is_string_like, is_timedelta64_dtype) from pandas.core.dtypes.generic import ( - ABCDataFrame, ABCSeries, ABCSparseArray, ABCSparseSeries) + ABCDataFrame, ABCDatetimeIndex, ABCSeries, ABCSparseArray, ABCSparseSeries) from pandas.core.dtypes.missing import ( isna, na_value_for_dtype, notna, remove_na_arraylike) @@ -182,6 +182,11 @@ def __init__(self, data=None, index=None, dtype=None, name=None, else: # need to copy to avoid aliasing issues data = data._values.copy() + if (isinstance(data, ABCDatetimeIndex) and + data.tz is not None): + # GH#24096 need copy to be deep for datetime64tz case + # TODO: See if we can avoid these copies + data = data._values.copy(deep=True) copy = False elif isinstance(data, np.ndarray): diff --git a/pandas/tests/frame/test_block_internals.py b/pandas/tests/frame/test_block_internals.py index 224e56777f6b4..647077a0428f3 100644 --- a/pandas/tests/frame/test_block_internals.py +++ b/pandas/tests/frame/test_block_internals.py @@ -28,6 +28,22 @@ class TestDataFrameBlockInternals(): + def test_setitem_invalidates_datetime_index_freq(self): + # GH#24096 altering a datetime64tz column inplace invalidates the + # `freq` attribute on the underlying DatetimeIndex + + dti = date_range('20130101', periods=3, tz='US/Eastern') + ts = dti[1] + + df = DataFrame({'B': dti}) + assert df['B']._values.freq == 'D' + + df.iloc[1, 0] = pd.NaT + assert df['B']._values.freq is None + + # check that the DatetimeIndex was not altered in place + assert dti.freq == 'D' + assert dti[1] == ts def test_cast_internals(self, float_frame): casted = DataFrame(float_frame._data, dtype=int) diff --git a/pandas/tests/series/test_block_internals.py b/pandas/tests/series/test_block_internals.py new file mode 100644 index 0000000000000..ccfb169cc2f8d --- /dev/null +++ b/pandas/tests/series/test_block_internals.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- + +import pandas as pd + +# Segregated collection of methods that require the BlockManager internal data +# structure + + +class TestSeriesBlockInternals(object): + + def test_setitem_invalidates_datetime_index_freq(self): + # GH#24096 altering a datetime64tz Series inplace invalidates the + # `freq` attribute on the underlying DatetimeIndex + + dti = pd.date_range('20130101', periods=3, tz='US/Eastern') + ts = dti[1] + ser = pd.Series(dti) + assert ser._values is not dti + assert ser._values._data.base is not dti._data.base + assert dti.freq == 'D' + ser.iloc[1] = pd.NaT + assert ser._values.freq is None + + # check that the DatetimeIndex was not altered in place + assert ser._values is not dti + assert ser._values._data.base is not dti._data.base + assert dti[1] == ts + assert dti.freq == 'D' + + def test_dt64tz_setitem_does_not_mutate_dti(self): + # GH#21907, GH#24096 + dti = pd.date_range('2016-01-01', periods=10, tz='US/Pacific') + ts = dti[0] + ser = pd.Series(dti) + assert ser._values is not dti + assert ser._values._data.base is not dti._data.base + assert ser._data.blocks[0].values is not dti + assert ser._data.blocks[0].values._data.base is not dti._data.base + + ser[::3] = pd.NaT + assert ser[0] is pd.NaT + assert dti[0] == ts
Fixes (at least some) verify_integrity bugs discussed in #24074. - [x] closes #21907 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24096
2018-12-04T17:51:54Z
2018-12-05T20:19:47Z
2018-12-05T20:19:47Z
2018-12-05T20:29:38Z
CI: remove failing line
diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py index 1ab88dc9f9e6d..7dcd7b284d66d 100644 --- a/asv_bench/benchmarks/algorithms.py +++ b/asv_bench/benchmarks/algorithms.py @@ -1,10 +1,11 @@ -import warnings from importlib import import_module import numpy as np + import pandas as pd from pandas.util import testing as tm + for imp in ['pandas.util', 'pandas.tools.hashing']: try: hashing = import_module(imp) @@ -73,10 +74,6 @@ def setup(self): self.uniques = tm.makeStringIndex(1000).values self.all = self.uniques.repeat(10) - def time_match_string(self): - with warnings.catch_warnings(record=True): - pd.match(self.all, self.uniques) - class Hashing(object): diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index dcecaf60ed578..3c0dd646aa502 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -1,10 +1,10 @@ import string -import warnings import numpy as np + +from pandas import ( + DataFrame, MultiIndex, NaT, Series, date_range, isnull, period_range) import pandas.util.testing as tm -from pandas import (DataFrame, Series, MultiIndex, date_range, period_range, - isnull, NaT) class GetNumericData(object): @@ -61,9 +61,6 @@ def time_reindex_axis1(self): def time_reindex_both_axes(self): self.df.reindex(index=self.idx, columns=self.idx) - def time_reindex_both_axes_ix(self): - self.df.ix[self.idx, self.idx] - def time_reindex_upcast(self): self.df2.reindex(np.random.permutation(range(1200))) diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index dbd79185ec006..ee5ae69555d16 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -1,11 +1,13 @@ -import warnings -from string import ascii_letters -from itertools import product from functools import partial +from itertools import product +from string import ascii_letters +import warnings import numpy as np -from pandas import (DataFrame, Series, MultiIndex, date_range, period_range, - TimeGrouper, Categorical, Timestamp) + +from pandas import ( + Categorical, DataFrame, MultiIndex, Series, TimeGrouper, Timestamp, + date_range, period_range) import pandas.util.testing as tm @@ -210,7 +212,7 @@ def time_multi_int_nunique(self, df): class AggFunctions(object): - def setup_cache(): + def setup_cache(self): N = 10**5 fac1 = np.array(['A', 'B', 'C'], dtype='O') fac2 = np.array(['one', 'two'], dtype='O') diff --git a/pandas/core/sparse/scipy_sparse.py b/pandas/core/sparse/scipy_sparse.py index ab4fdeb05f8f1..2d0ce2d5e5951 100644 --- a/pandas/core/sparse/scipy_sparse.py +++ b/pandas/core/sparse/scipy_sparse.py @@ -58,15 +58,7 @@ def _get_label_to_i_dict(labels, sort_labels=False): return (d) def _get_index_subset_to_coord_dict(index, subset, sort_labels=False): - def robust_get_level_values(i): - # if index has labels (that are not None) use those, - # else use the level location - try: - return index.get_level_values(index.names[i]) - except KeyError: - return index.get_level_values(i) - - ilabels = list(zip(*[robust_get_level_values(i) for i in subset])) + ilabels = list(zip(*[index._get_level_values(i) for i in subset])) labels_to_i = _get_label_to_i_dict(ilabels, sort_labels=sort_labels) labels_to_i = Series(labels_to_i) diff --git a/pandas/io/formats/html.py b/pandas/io/formats/html.py index bf92ce7ee0f67..6425e655959bd 100644 --- a/pandas/io/formats/html.py +++ b/pandas/io/formats/html.py @@ -5,7 +5,6 @@ from __future__ import print_function -from distutils.version import LooseVersion from textwrap import dedent from pandas.compat import OrderedDict, lzip, map, range, u, unichr, zip @@ -161,15 +160,7 @@ def write_result(self, buf): _classes.extend(self.classes) if self.notebook: - div_style = '' - try: - import IPython - if IPython.__version__ < LooseVersion('3.0.0'): - div_style = ' style="max-width:1500px;overflow:auto;"' - except (ImportError, AttributeError): - pass - - self.write('<div{style}>'.format(style=div_style)) + self.write('<div>') self.write_style() diff --git a/pandas/tests/io/conftest.py b/pandas/tests/io/conftest.py index b0cdbe2b5bedb..928519d39aed3 100644 --- a/pandas/tests/io/conftest.py +++ b/pandas/tests/io/conftest.py @@ -37,6 +37,11 @@ def s3_resource(tips_file, jsonl_file): """ pytest.importorskip('s3fs') boto3 = pytest.importorskip('boto3') + # GH-24092. See if boto.plugin skips the test or fails. + try: + pytest.importorskip("boto.plugin") + except AttributeError: + raise pytest.skip("moto/moto error") moto = pytest.importorskip('moto') test_s3_files = [ diff --git a/pandas/tests/io/json/test_pandas.py b/pandas/tests/io/json/test_pandas.py index d047970ce2f08..ed954c76294b6 100644 --- a/pandas/tests/io/json/test_pandas.py +++ b/pandas/tests/io/json/test_pandas.py @@ -846,6 +846,7 @@ def test_misc_example(self): assert_frame_equal(result, expected) @network + @pytest.mark.single def test_round_trip_exception_(self): # GH 3867 csv = 'https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv' @@ -856,6 +857,7 @@ def test_round_trip_exception_(self): index=df.index, columns=df.columns), df) @network + @pytest.mark.single def test_url(self): url = 'https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5' # noqa result = read_json(url, convert_dates=True) diff --git a/pandas/tests/sparse/frame/test_to_from_scipy.py b/pandas/tests/sparse/frame/test_to_from_scipy.py index 1a10ff83d3097..e5c50e9574f90 100644 --- a/pandas/tests/sparse/frame/test_to_from_scipy.py +++ b/pandas/tests/sparse/frame/test_to_from_scipy.py @@ -1,5 +1,6 @@ import pytest import numpy as np +import pandas as pd from pandas.util import testing as tm from pandas import SparseDataFrame, SparseSeries from pandas.core.sparse.api import SparseDtype @@ -168,3 +169,16 @@ def test_from_scipy_fillna(spmatrix): expected[col].fill_value = -1 tm.assert_sp_frame_equal(sdf, expected) + + +def test_index_names_multiple_nones(): + # https://github.com/pandas-dev/pandas/pull/24092 + sparse = pytest.importorskip("scipy.sparse") + + s = (pd.Series(1, index=pd.MultiIndex.from_product([['A', 'B'], [0, 1]])) + .to_sparse()) + result, _, _ = s.to_coo() + assert isinstance(result, sparse.coo_matrix) + result = result.toarray() + expected = np.ones((2, 2), dtype="int64") + tm.assert_numpy_array_equal(result, expected) diff --git a/setup.cfg b/setup.cfg index 25f713822f127..44df79d1b60d2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -361,5 +361,40 @@ skip= pandas/tests/computation/test_compat.py, pandas/tests/computation/test_eval.py, pandas/types/common.py, - pandas/tests/extension/arrow/test_bool.py - doc/source/conf.py + pandas/tests/extension/arrow/test_bool.py, + doc/source/conf.py, + asv_bench/benchmarks/algorithms.py, + asv_bench/benchmarks/attrs_caching.py, + asv_bench/benchmarks/binary_ops.py, + asv_bench/benchmarks/categoricals.py, + asv_bench/benchmarks/ctors.py, + asv_bench/benchmarks/eval.py, + asv_bench/benchmarks/frame_ctor.py, + asv_bench/benchmarks/frame_methods.py, + asv_bench/benchmarks/gil.py, + asv_bench/benchmarks/groupby.py, + asv_bench/benchmarks/index_object.py, + asv_bench/benchmarks/indexing.py, + asv_bench/benchmarks/inference.py, + asv_bench/benchmarks/io/csv.py, + asv_bench/benchmarks/io/excel.py, + asv_bench/benchmarks/io/hdf.py, + asv_bench/benchmarks/io/json.py, + asv_bench/benchmarks/io/msgpack.py, + asv_bench/benchmarks/io/pickle.py, + asv_bench/benchmarks/io/sql.py, + asv_bench/benchmarks/io/stata.py, + asv_bench/benchmarks/join_merge.py, + asv_bench/benchmarks/multiindex_object.py, + asv_bench/benchmarks/panel_ctor.py, + asv_bench/benchmarks/panel_methods.py, + asv_bench/benchmarks/plotting.py, + asv_bench/benchmarks/reindex.py, + asv_bench/benchmarks/replace.py, + asv_bench/benchmarks/reshape.py, + asv_bench/benchmarks/rolling.py, + asv_bench/benchmarks/series_methods.py, + asv_bench/benchmarks/sparse.py, + asv_bench/benchmarks/stat_ops.py, + asv_bench/benchmarks/timeseries.py +
https://api.github.com/repos/pandas-dev/pandas/pulls/24092
2018-12-04T15:33:59Z
2018-12-04T22:05:39Z
2018-12-04T22:05:39Z
2018-12-04T22:08:20Z
TST: Work around statsmodels bug
diff --git a/pandas/tests/plotting/test_datetimelike.py b/pandas/tests/plotting/test_datetimelike.py index 1d6c8dc404d2b..7a28f05514dd5 100644 --- a/pandas/tests/plotting/test_datetimelike.py +++ b/pandas/tests/plotting/test_datetimelike.py @@ -1,5 +1,5 @@ """ Test cases for time series specific (freq conversion, etc) """ - +import sys from datetime import datetime, timedelta, date, time import pickle @@ -1557,7 +1557,10 @@ def _check_plot_works(f, freq=None, series=None, *args, **kwargs): # GH18439 # this is supported only in Python 3 pickle since # pickle in Python2 doesn't support instancemethod pickling - if PY3: + # TODO(statsmodels 0.10.0): Remove the statsmodels check + # https://github.com/pandas-dev/pandas/issues/24088 + # https://github.com/statsmodels/statsmodels/issues/4772 + if PY3 and 'statsmodels' not in sys.modules: with ensure_clean(return_filelike=True) as path: pickle.dump(fig, path) finally:
Closes https://github.com/pandas-dev/pandas/issues/24088
https://api.github.com/repos/pandas-dev/pandas/pulls/24090
2018-12-04T14:25:25Z
2018-12-04T15:19:06Z
2018-12-04T15:19:06Z
2020-04-29T02:34:41Z
DOC: Fix PEP-8 issues in indexing-, missing_data-, options- and release.rst
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 49289862a3acd..add1a4e587240 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -134,9 +134,10 @@ indexing functionality: .. ipython:: python dates = pd.date_range('1/1/2000', periods=8) - df = pd.DataFrame(np.random.randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D']) + df = pd.DataFrame(np.random.randn(8, 4), + index=dates, columns=['A', 'B', 'C', 'D']) df - panel = pd.Panel({'one' : df, 'two' : df - df.mean()}) + panel = pd.Panel({'one': df, 'two': df - df.mean()}) panel .. note:: @@ -174,14 +175,14 @@ columns. .. ipython:: python df[['A', 'B']] - df.loc[:,['B', 'A']] = df[['A', 'B']] + df.loc[:, ['B', 'A']] = df[['A', 'B']] df[['A', 'B']] The correct way to swap column values is by using raw values: .. ipython:: python - df.loc[:,['B', 'A']] = df[['A', 'B']].to_numpy() + df.loc[:, ['B', 'A']] = df[['A', 'B']].to_numpy() df[['A', 'B']] @@ -199,7 +200,7 @@ as an attribute: .. ipython:: python - sa = pd.Series([1,2,3],index=list('abc')) + sa = pd.Series([1, 2, 3], index=list('abc')) dfa = df.copy() .. ipython:: python @@ -239,7 +240,7 @@ You can also assign a ``dict`` to a row of a ``DataFrame``: .. ipython:: python x = pd.DataFrame({'x': [1, 2, 3], 'y': [3, 4, 5]}) - x.iloc[1] = dict(x=9, y=99) + x.iloc[1] = {'x': 9, 'y': 99} x You can use attribute access to modify an existing element of a Series or column of a DataFrame, but be careful; @@ -248,10 +249,10 @@ new column. In 0.21.0 and later, this will raise a ``UserWarning``: .. code-block:: ipython - In[1]: df = pd.DataFrame({'one': [1., 2., 3.]}) - In[2]: df.two = [4, 5, 6] + In [1]: df = pd.DataFrame({'one': [1., 2., 3.]}) + In [2]: df.two = [4, 5, 6] UserWarning: Pandas doesn't allow Series to be assigned into nonexistent columns - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute_access - In[3]: df + In [3]: df Out[3]: one 0 1.0 @@ -308,7 +309,9 @@ Selection By Label .. ipython:: python - dfl = pd.DataFrame(np.random.randn(5,4), columns=list('ABCD'), index=pd.date_range('20130101',periods=5)) + dfl = pd.DataFrame(np.random.randn(5, 4), + columns=list('ABCD'), + index=pd.date_range('20130101', periods=5)) dfl .. code-block:: ipython @@ -345,7 +348,7 @@ The ``.loc`` attribute is the primary access method. The following are valid inp .. ipython:: python - s1 = pd.Series(np.random.randn(6),index=list('abcdef')) + s1 = pd.Series(np.random.randn(6), index=list('abcdef')) s1 s1.loc['c':] s1.loc['b'] @@ -361,7 +364,7 @@ With a DataFrame: .. ipython:: python - df1 = pd.DataFrame(np.random.randn(6,4), + df1 = pd.DataFrame(np.random.randn(6, 4), index=list('abcdef'), columns=list('ABCD')) df1 @@ -404,7 +407,7 @@ are returned: .. ipython:: python - s = pd.Series(list('abcde'), index=[0,3,2,5,4]) + s = pd.Series(list('abcde'), index=[0, 3, 2, 5, 4]) s.loc[3:5] If at least one of the two is absent, but the index is sorted, and can be @@ -444,7 +447,7 @@ The ``.iloc`` attribute is the primary access method. The following are valid in .. ipython:: python - s1 = pd.Series(np.random.randn(5), index=list(range(0,10,2))) + s1 = pd.Series(np.random.randn(5), index=list(range(0, 10, 2))) s1 s1.iloc[:3] s1.iloc[3] @@ -460,9 +463,9 @@ With a DataFrame: .. ipython:: python - df1 = pd.DataFrame(np.random.randn(6,4), - index=list(range(0,12,2)), - columns=list(range(0,8,2))) + df1 = pd.DataFrame(np.random.randn(6, 4), + index=list(range(0, 12, 2)), + columns=list(range(0, 8, 2))) df1 Select via integer slicing: @@ -516,7 +519,7 @@ an empty axis (e.g. an empty DataFrame being returned). .. ipython:: python - dfl = pd.DataFrame(np.random.randn(5,2), columns=list('AB')) + dfl = pd.DataFrame(np.random.randn(5, 2), columns=list('AB')) dfl dfl.iloc[:, 2:3] dfl.iloc[:, 1:3] @@ -818,7 +821,7 @@ In the ``Series`` case this is effectively an appending operation. .. ipython:: python - se = pd.Series([1,2,3]) + se = pd.Series([1, 2, 3]) se se[5] = 5. se @@ -827,10 +830,10 @@ A ``DataFrame`` can be enlarged on either axis via ``.loc``. .. ipython:: python - dfi = pd.DataFrame(np.arange(6).reshape(3,2), - columns=['A','B']) + dfi = pd.DataFrame(np.arange(6).reshape(3, 2), + columns=['A', 'B']) dfi - dfi.loc[:,'C'] = dfi.loc[:,'A'] + dfi.loc[:, 'C'] = dfi.loc[:, 'A'] dfi This is like an ``append`` operation on the ``DataFrame``. @@ -870,7 +873,7 @@ You can also set using these same indexers. .. ipython:: python - df.at[dates[-1]+1, 0] = 7 + df.at[dates[-1] + 1, 0] = 7 df Boolean indexing @@ -908,9 +911,9 @@ more complex criteria: .. ipython:: python - df2 = pd.DataFrame({'a' : ['one', 'one', 'two', 'three', 'two', 'one', 'six'], - 'b' : ['x', 'y', 'y', 'x', 'y', 'x', 'x'], - 'c' : np.random.randn(7)}) + df2 = pd.DataFrame({'a': ['one', 'one', 'two', 'three', 'two', 'one', 'six'], + 'b': ['x', 'y', 'y', 'x', 'y', 'x', 'x'], + 'c': np.random.randn(7)}) # only want 'two' or 'three' criterion = df2['a'].map(lambda x: x.startswith('t')) @@ -928,7 +931,7 @@ and :ref:`Advanced Indexing <advanced>` you may select along more than one axis .. ipython:: python - df2.loc[criterion & (df2['b'] == 'x'),'b':'c'] + df2.loc[criterion & (df2['b'] == 'x'), 'b':'c'] .. _indexing.basics.indexing_isin: @@ -1032,7 +1035,8 @@ The code below is equivalent to ``df.where(df < 0)``. :suppress: dates = pd.date_range('1/1/2000', periods=8) - df = pd.DataFrame(np.random.randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D']) + df = pd.DataFrame(np.random.randn(8, 4), + index=dates, columns=['A', 'B', 'C', 'D']) .. ipython:: python @@ -1065,7 +1069,7 @@ without creating a copy: .. ipython:: python df_orig = df.copy() - df_orig.where(df > 0, -df, inplace=True); + df_orig.where(df > 0, -df, inplace=True) df_orig .. note:: @@ -1086,7 +1090,7 @@ partial setting via ``.loc`` (but on the contents rather than the axis labels). .. ipython:: python df2 = df.copy() - df2[ df2[1:4] > 0] = 3 + df2[df2[1:4] > 0] = 3 df2 Where can also accept ``axis`` and ``level`` parameters to align the input when @@ -1095,14 +1099,14 @@ performing the ``where``. .. ipython:: python df2 = df.copy() - df2.where(df2>0,df2['A'],axis='index') + df2.where(df2 > 0, df2['A'], axis='index') This is equivalent to (but faster than) the following. .. ipython:: python df2 = df.copy() - df.apply(lambda x, y: x.where(x>0,y), y=df['A']) + df.apply(lambda x, y: x.where(x > 0, y), y=df['A']) .. versionadded:: 0.18.1 @@ -1163,25 +1167,12 @@ with the name ``a``. If instead you don't want to or cannot name your index, you can use the name ``index`` in your query expression: -.. ipython:: python - :suppress: - - old_index = index - del index - .. ipython:: python df = pd.DataFrame(np.random.randint(n, size=(n, 2)), columns=list('bc')) df df.query('index < b < c') -.. ipython:: python - :suppress: - - index = old_index - del old_index - - .. note:: If the name of your index overlaps with a column name, the column name is @@ -1191,7 +1182,7 @@ If instead you don't want to or cannot name your index, you can use the name df = pd.DataFrame({'a': np.random.randint(5, size=5)}) df.index.name = 'a' - df.query('a > 2') # uses the column 'a', not the index + df.query('a > 2') # uses the column 'a', not the index You can still use the index in a query expression by using the special identifier 'index': @@ -1293,15 +1284,6 @@ The ``in`` and ``not in`` operators ``not in`` comparison operators, providing a succinct syntax for calling the ``isin`` method of a ``Series`` or ``DataFrame``. -.. ipython:: python - :suppress: - - try: - old_d = d - del d - except NameError: - pass - .. ipython:: python # get all rows where columns "a" and "b" have overlapping values @@ -1325,7 +1307,8 @@ You can combine this with other expressions for very succinct queries: .. ipython:: python - # rows where cols a and b have overlapping values and col c's values are less than col d's + # rows where cols a and b have overlapping values + # and col c's values are less than col d's df.query('a in b and c < d') # pure Python @@ -1401,15 +1384,6 @@ Of course, expressions can be arbitrarily complex too: shorter == longer -.. ipython:: python - :suppress: - - try: - d = old_d - del old_d - except NameError: - pass - Performance of :meth:`~pandas.DataFrame.query` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1433,7 +1407,8 @@ floating point values generated using ``numpy.random.randn()``. .. ipython:: python :suppress: - df = pd.DataFrame(np.random.randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D']) + df = pd.DataFrame(np.random.randn(8, 4), + index=dates, columns=['A', 'B', 'C', 'D']) df2 = df.copy() @@ -1500,8 +1475,8 @@ default value. .. ipython:: python - s = pd.Series([1,2,3], index=['a','b','c']) - s.get('a') # equivalent to s['a'] + s = pd.Series([1, 2, 3], index=['a', 'b', 'c']) + s.get('a') # equivalent to s['a'] s.get('x', default=-1) The :meth:`~pandas.DataFrame.lookup` Method @@ -1513,8 +1488,8 @@ NumPy array. For instance: .. ipython:: python - dflookup = pd.DataFrame(np.random.rand(20,4), columns = ['A','B','C','D']) - dflookup.lookup(list(range(0,10,2)), ['B','C','A','B','D']) + dflookup = pd.DataFrame(np.random.rand(20, 4), columns = ['A', 'B', 'C', 'D']) + dflookup.lookup(list(range(0, 10, 2)), ['B', 'C', 'A', 'B', 'D']) .. _indexing.class: @@ -1641,7 +1616,9 @@ Missing values idx1 idx1.fillna(2) - idx2 = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]) + idx2 = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), + pd.NaT, + pd.Timestamp('2011-01-03')]) idx2 idx2.fillna(pd.Timestamp('2011-01-02')) @@ -1664,10 +1641,10 @@ To create a new, re-indexed DataFrame: .. ipython:: python :suppress: - data = pd.DataFrame({'a' : ['bar', 'bar', 'foo', 'foo'], - 'b' : ['one', 'two', 'one', 'two'], - 'c' : ['z', 'y', 'x', 'w'], - 'd' : [1., 2., 3, 4]}) + data = pd.DataFrame({'a': ['bar', 'bar', 'foo', 'foo'], + 'b': ['one', 'two', 'one', 'two'], + 'c': ['z', 'y', 'x', 'w'], + 'd': [1., 2., 3, 4]}) .. ipython:: python @@ -1746,8 +1723,8 @@ When setting values in a pandas object, care must be taken to avoid what is call list('efgh'), list('ijkl'), list('mnop')], - columns=pd.MultiIndex.from_product([['one','two'], - ['first','second']])) + columns=pd.MultiIndex.from_product([['one', 'two'], + ['first', 'second']])) dfmi Compare these two access methods: @@ -1758,7 +1735,7 @@ Compare these two access methods: .. ipython:: python - dfmi.loc[:,('one','second')] + dfmi.loc[:, ('one', 'second')] These both yield the same results, so which should you use? It is instructive to understand the order of operations on these and why method 2 (``.loc``) is much preferred over method 1 (chained ``[]``). @@ -1783,6 +1760,11 @@ But it turns out that assigning to the product of chained indexing has inherently unpredictable results. To see this, think about how the Python interpreter executes this code: +.. ipython:: python + :suppress: + + value = None + .. code-block:: python dfmi.loc[:, ('one', 'second')] = value @@ -1820,7 +1802,8 @@ that you've done this: def do_something(df): foo = df[['bar', 'baz']] # Is foo a view? A copy? Nobody knows! # ... many lines here ... - foo['quux'] = value # We don't know whether this will modify df or not! + # We don't know whether this will modify df or not! + foo['quux'] = value return foo Yikes! @@ -1850,9 +1833,9 @@ chained indexing expression, you can set the :ref:`option <options>` .. ipython:: python :okwarning: - dfb = pd.DataFrame({'a' : ['one', 'one', 'two', - 'three', 'two', 'one', 'six'], - 'c' : np.arange(7)}) + dfb = pd.DataFrame({'a': ['one', 'one', 'two', + 'three', 'two', 'one', 'six'], + 'c': np.arange(7)}) # This will show the SettingWithCopyWarning # but the frame values will be set @@ -1880,8 +1863,8 @@ This is the correct access method: .. ipython:: python - dfc = pd.DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]}) - dfc.loc[0,'A'] = 11 + dfc = pd.DataFrame({'A': ['aaa', 'bbb', 'ccc'], 'B': [1, 2, 3]}) + dfc.loc[0, 'A'] = 11 dfc This *can* work at times, but it is not guaranteed to, and therefore should be avoided: diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index ebe577feb706c..6a089decde3f5 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -81,7 +81,7 @@ Series and DataFrame objects: .. ipython:: python - None == None + None == None # noqa: E711 np.nan == np.nan So as compared to above, a scalar equality comparison versus a ``None/np.nan`` doesn't provide useful information. @@ -102,7 +102,7 @@ pandas objects provide compatibility between ``NaT`` and ``NaN``. df2 = df.copy() df2['timestamp'] = pd.Timestamp('20120101') df2 - df2.loc[['a','c','h'],['one','timestamp']] = np.nan + df2.loc[['a', 'c', 'h'], ['one', 'timestamp']] = np.nan df2 df2.get_dtype_counts() @@ -187,7 +187,7 @@ The sum of an empty or all-NA Series or column of a DataFrame is 0. .. ipython:: python pd.Series([np.nan]).sum() - + pd.Series([]).sum() The product of an empty or all-NA Series or column of a DataFrame is 1. @@ -195,7 +195,7 @@ The product of an empty or all-NA Series or column of a DataFrame is 1. .. ipython:: python pd.Series([np.nan]).prod() - + pd.Series([]).prod() @@ -287,10 +287,10 @@ use case of this is to fill a DataFrame with the mean of that column. .. ipython:: python - dff = pd.DataFrame(np.random.randn(10,3), columns=list('ABC')) - dff.iloc[3:5,0] = np.nan - dff.iloc[4:6,1] = np.nan - dff.iloc[5:8,2] = np.nan + dff = pd.DataFrame(np.random.randn(10, 3), columns=list('ABC')) + dff.iloc[3:5, 0] = np.nan + dff.iloc[4:6, 1] = np.nan + dff.iloc[5:8, 2] = np.nan dff dff.fillna(dff.mean()) @@ -473,7 +473,8 @@ filled since the last valid observation: .. ipython:: python - ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, np.nan, 13, np.nan, np.nan]) + ser = pd.Series([np.nan, np.nan, 5, np.nan, np.nan, + np.nan, 13, np.nan, np.nan]) # fill all consecutive values in a forward direction ser.interpolate() diff --git a/doc/source/options.rst b/doc/source/options.rst index 31359c337fdb8..e91be3e6ae730 100644 --- a/doc/source/options.rst +++ b/doc/source/options.rst @@ -38,9 +38,9 @@ and so passing in a substring will work - as long as it is unambiguous: .. ipython:: python pd.get_option("display.max_rows") - pd.set_option("display.max_rows",101) + pd.set_option("display.max_rows", 101) pd.get_option("display.max_rows") - pd.set_option("max_r",102) + pd.set_option("max_r", 102) pd.get_option("display.max_rows") @@ -93,7 +93,7 @@ All options also have a default value, and you can use ``reset_option`` to do ju .. ipython:: python pd.get_option("display.max_rows") - pd.set_option("display.max_rows",999) + pd.set_option("display.max_rows", 999) pd.get_option("display.max_rows") pd.reset_option("display.max_rows") pd.get_option("display.max_rows") @@ -113,9 +113,9 @@ are restored automatically when you exit the `with` block: .. ipython:: python - with pd.option_context("display.max_rows",10,"display.max_columns", 5): - print(pd.get_option("display.max_rows")) - print(pd.get_option("display.max_columns")) + with pd.option_context("display.max_rows", 10, "display.max_columns", 5): + print(pd.get_option("display.max_rows")) + print(pd.get_option("display.max_columns")) print(pd.get_option("display.max_rows")) print(pd.get_option("display.max_columns")) @@ -150,7 +150,7 @@ lines are replaced by an ellipsis. .. ipython:: python - df = pd.DataFrame(np.random.randn(7,2)) + df = pd.DataFrame(np.random.randn(7, 2)) pd.set_option('max_rows', 7) df pd.set_option('max_rows', 5) @@ -162,7 +162,7 @@ dataframes to stretch across pages, wrapped over the full column vs row-wise. .. ipython:: python - df = pd.DataFrame(np.random.randn(5,10)) + df = pd.DataFrame(np.random.randn(5, 10)) pd.set_option('expand_frame_repr', True) df pd.set_option('expand_frame_repr', False) @@ -174,7 +174,7 @@ dataframes to stretch across pages, wrapped over the full column vs row-wise. .. ipython:: python - df = pd.DataFrame(np.random.randn(10,10)) + df = pd.DataFrame(np.random.randn(10, 10)) pd.set_option('max_rows', 5) pd.set_option('large_repr', 'truncate') df @@ -190,7 +190,7 @@ of this length or longer will be truncated with an ellipsis. df = pd.DataFrame(np.array([['foo', 'bar', 'bim', 'uncomfortably long string'], ['horse', 'cow', 'banana', 'apple']])) - pd.set_option('max_colwidth',40) + pd.set_option('max_colwidth', 40) df pd.set_option('max_colwidth', 6) df @@ -201,7 +201,7 @@ will be given. .. ipython:: python - df = pd.DataFrame(np.random.randn(10,10)) + df = pd.DataFrame(np.random.randn(10, 10)) pd.set_option('max_info_columns', 11) df.info() pd.set_option('max_info_columns', 5) @@ -215,7 +215,7 @@ can specify the option ``df.info(null_counts=True)`` to override on showing a pa .. ipython:: python - df = pd.DataFrame(np.random.choice([0,1,np.nan], size=(10,10))) + df = pd.DataFrame(np.random.choice([0, 1, np.nan], size=(10, 10))) df pd.set_option('max_info_rows', 11) df.info() @@ -228,10 +228,10 @@ This is only a suggestion. .. ipython:: python - df = pd.DataFrame(np.random.randn(5,5)) - pd.set_option('precision',7) + df = pd.DataFrame(np.random.randn(5, 5)) + pd.set_option('precision', 7) df - pd.set_option('precision',4) + pd.set_option('precision', 4) df ``display.chop_threshold`` sets at what level pandas rounds to zero when @@ -240,7 +240,7 @@ precision at which the number is stored. .. ipython:: python - df = pd.DataFrame(np.random.randn(6,6)) + df = pd.DataFrame(np.random.randn(6, 6)) pd.set_option('chop_threshold', 0) df pd.set_option('chop_threshold', .5) @@ -252,7 +252,9 @@ The options are 'right', and 'left'. .. ipython:: python - df = pd.DataFrame(np.array([np.random.randn(6), np.random.randint(1,9,6)*.1, np.zeros(6)]).T, + df = pd.DataFrame(np.array([np.random.randn(6), + np.random.randint(1, 9, 6) * .1, + np.zeros(6)]).T, columns=['A', 'B', 'C'], dtype='float') pd.set_option('colheader_justify', 'right') df @@ -454,14 +456,14 @@ For instance: pd.set_eng_float_format(accuracy=3, use_eng_prefix=True) s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e']) - s/1.e3 - s/1.e6 + s / 1.e3 + s / 1.e6 .. ipython:: python :suppress: :okwarning: - pd.reset_option('^display\.') + pd.reset_option("^display") To round floats on a case-by-case basis, you can also use :meth:`~pandas.Series.round` and :meth:`~pandas.DataFrame.round`. @@ -483,7 +485,7 @@ If a DataFrame or Series contains these characters, the default output mode may .. ipython:: python df = pd.DataFrame({u'国籍': ['UK', u'日本'], u'名前': ['Alice', u'しのぶ']}) - df; + df .. image:: _static/option_unicode01.png @@ -494,7 +496,7 @@ times than the standard ``len`` function. .. ipython:: python pd.set_option('display.unicode.east_asian_width', True) - df; + df .. image:: _static/option_unicode02.png @@ -506,7 +508,7 @@ By default, an "Ambiguous" character's width, such as "¡" (inverted exclamation .. ipython:: python df = pd.DataFrame({'a': ['xxx', u'¡¡'], 'b': ['yyy', u'¡¡']}) - df; + df .. image:: _static/option_unicode03.png @@ -518,7 +520,7 @@ However, setting this option incorrectly for your terminal will cause these char .. ipython:: python pd.set_option('display.unicode.ambiguous_as_wide', True) - df; + df .. image:: _static/option_unicode04.png diff --git a/doc/source/release.rst b/doc/source/release.rst index 67a30984ff0a7..abbba9d6ff8ec 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -2,11 +2,6 @@ {{ header }} -.. ipython:: python - :suppress: - - import pandas.util.testing as tm - ************* Release Notes ************* @@ -2851,7 +2846,7 @@ API Changes In [5]: arr / arr2 Out[5]: array([0, 0, 1, 4]) - In [6]: pd.Series(arr) / pd.Series(arr2) # no future import required + In [6]: pd.Series(arr) / pd.Series(arr2) # no future import required Out[6]: 0 0.200000 1 0.666667 @@ -3662,12 +3657,12 @@ Improvements to existing features .. ipython:: python - p = pd.Panel(np.random.randn(3,4,4),items=['ItemA','ItemB','ItemC'], - major_axis=pd.date_range('20010102',periods=4), - minor_axis=['A','B','C','D']) + p = pd.Panel(np.random.randn(3, 4, 4), items=['ItemA', 'ItemB', 'ItemC'], + major_axis=pd.date_range('20010102', periods=4), + minor_axis=['A', 'B', 'C', 'D']) p p.reindex(items=['ItemA']).squeeze() - p.reindex(items=['ItemA'],minor=['B']).squeeze() + p.reindex(items=['ItemA'], minor=['B']).squeeze() - Improvement to Yahoo API access in ``pd.io.data.Options`` (:issue:`2758`) - added option `display.max_seq_items` to control the number of elements printed per sequence pprinting it. (:issue:`2979`) @@ -3681,10 +3676,10 @@ Improvements to existing features .. ipython:: python idx = pd.date_range("2001-10-1", periods=5, freq='M') - ts = pd.Series(np.random.rand(len(idx)),index=idx) + ts = pd.Series(np.random.rand(len(idx)), index=idx) ts['2001'] - df = pd.DataFrame(dict(A = ts)) + df = pd.DataFrame({'A': ts}) df['2001'] - added option `display.mpl_style` providing a sleeker visual style for plots. Based on https://gist.github.com/huyng/816622 (:issue:`3075`). diff --git a/setup.cfg b/setup.cfg index 30b4d13bd0a66..fd258e7334ff0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -77,10 +77,6 @@ exclude = doc/source/contributing_docstring.rst doc/source/enhancingperf.rst doc/source/groupby.rst - doc/source/indexing.rst - doc/source/missing_data.rst - doc/source/options.rst - doc/source/release.rst [yapf]
https://api.github.com/repos/pandas-dev/pandas/pulls/24089
2018-12-04T14:15:58Z
2018-12-12T13:29:13Z
2018-12-12T13:29:13Z
2018-12-12T13:29:30Z
DOC: Use a standard header for all rst files
diff --git a/doc/source/10min.rst b/doc/source/10min.rst index e04a8253e0bef..53618e008a403 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -1,24 +1,6 @@ .. _10min: -.. currentmodule:: pandas - -.. ipython:: python - :suppress: - - import os - import numpy as np - - import pandas as pd - - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 - - # portions of this were borrowed from the - # Pandas cheatsheet - # created during the PyData Workshop-Sprint 2012 - # Hannah Chen, Henry Chow, Eric Cox, Robert Mauriello - +{{ header }} ******************** 10 Minutes to pandas @@ -773,6 +755,7 @@ CSV .. ipython:: python :suppress: + import os os.remove('foo.csv') HDF5 diff --git a/doc/source/advanced.rst b/doc/source/advanced.rst index 0cc2cea774bbd..822bc7407a0c7 100644 --- a/doc/source/advanced.rst +++ b/doc/source/advanced.rst @@ -1,15 +1,6 @@ .. _advanced: -.. currentmodule:: pandas - -.. ipython:: python - :suppress: - - import numpy as np - import pandas as pd - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 +{{ header }} ****************************** MultiIndex / Advanced Indexing diff --git a/doc/source/basics.rst b/doc/source/basics.rst index 25e2c8cd1ff9a..dfb8764b5c8e8 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -1,16 +1,7 @@ -.. currentmodule:: pandas - -.. ipython:: python - :suppress: - - import numpy as np - import pandas as pd - - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 - .. _basics: +{{ header }} + ============================== Essential Basic Functionality ============================== diff --git a/doc/source/categorical.rst b/doc/source/categorical.rst index 31f2430e4be88..e57fd0ac0f96d 100644 --- a/doc/source/categorical.rst +++ b/doc/source/categorical.rst @@ -1,16 +1,6 @@ .. _categorical: -.. currentmodule:: pandas - -.. ipython:: python - :suppress: - - import numpy as np - import pandas as pd - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 - +{{ header }} **************** Categorical Data diff --git a/doc/source/comparison_with_r.rst b/doc/source/comparison_with_r.rst index 704b0c4d80537..a0143d717105c 100644 --- a/doc/source/comparison_with_r.rst +++ b/doc/source/comparison_with_r.rst @@ -1,12 +1,6 @@ -.. currentmodule:: pandas .. _compare_with_r: -.. ipython:: python - :suppress: - - import pandas as pd - import numpy as np - pd.options.display.max_rows = 15 +{{ header }} Comparison with R / R libraries ******************************* diff --git a/doc/source/comparison_with_sas.rst b/doc/source/comparison_with_sas.rst index c4d121c10538c..d24647df81808 100644 --- a/doc/source/comparison_with_sas.rst +++ b/doc/source/comparison_with_sas.rst @@ -1,6 +1,7 @@ -.. currentmodule:: pandas .. _compare_with_sas: +{{ header }} + Comparison with SAS ******************** For potential users coming from `SAS <https://en.wikipedia.org/wiki/SAS_(software)>`__ diff --git a/doc/source/comparison_with_sql.rst b/doc/source/comparison_with_sql.rst index 021f37eb5c66f..366fdd546f58b 100644 --- a/doc/source/comparison_with_sql.rst +++ b/doc/source/comparison_with_sql.rst @@ -1,6 +1,7 @@ -.. currentmodule:: pandas .. _compare_with_sql: +{{ header }} + Comparison with SQL ******************** Since many potential pandas users have some familiarity with diff --git a/doc/source/comparison_with_stata.rst b/doc/source/comparison_with_stata.rst index e039843b22065..bf2b03176ecd8 100644 --- a/doc/source/comparison_with_stata.rst +++ b/doc/source/comparison_with_stata.rst @@ -1,6 +1,7 @@ -.. currentmodule:: pandas .. _compare_with_stata: +{{ header }} + Comparison with Stata ********************* For potential users coming from `Stata <https://en.wikipedia.org/wiki/Stata>`__ @@ -675,5 +676,3 @@ If out of core processing is needed, one possibility is the `dask.dataframe <http://dask.pydata.org/en/latest/dataframe.html>`_ library, which provides a subset of pandas functionality for an on-disk ``DataFrame``. - - diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 251dce5141ea5..e72662be7730b 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -1,21 +1,7 @@ -.. currentmodule:: pandas - -.. ipython:: python - :suppress: - - import numpy as np - import matplotlib.pyplot as plt - - import pandas as pd - - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 - - plt.close('all') - .. _computation: +{{ header }} + Computational tools =================== diff --git a/doc/source/conf.py b/doc/source/conf.py index d88b5e9757423..56f77f667df88 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -301,25 +301,25 @@ } -common_imports = """\ +header = """\ .. currentmodule:: pandas .. ipython:: python :suppress: import numpy as np - from pandas import * import pandas as pd + randn = np.random.randn + np.random.seed(123456) np.set_printoptions(precision=4, suppress=True) - options.display.max_rows = 15 - from pandas.compat import StringIO + pd.options.display.max_rows = 15 """ html_context = { 'redirects': {old: new for old, new in moved_api_pages}, - 'common_imports': common_imports, + 'header': header } # If false, no module index is generated. diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index c55452cf27309..2ce3649666346 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -1,5 +1,7 @@ .. _contributing: +{{ header }} + ********************** Contributing to pandas ********************** @@ -672,6 +674,7 @@ Otherwise, you need to do it manually: import warnings + def old_func(): """Summary of the function. @@ -681,6 +684,7 @@ Otherwise, you need to do it manually: warnings.warn('Use new_func instead.', FutureWarning, stacklevel=2) new_func() + def new_func(): pass @@ -816,7 +820,6 @@ We would name this file ``test_cool_feature.py`` and put in an appropriate place import pytest import numpy as np import pandas as pd - from pandas.util import testing as tm @pytest.mark.parametrize('dtype', ['int8', 'int16', 'int32', 'int64']) @@ -938,8 +941,10 @@ If your change involves checking that a warning is actually emitted, use .. code-block:: python - df = pd.DataFrame() + import pandas.util.testing as tm + + df = pd.DataFrame() with tm.assert_produces_warning(FutureWarning): df.some_operation() diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index 16d756acaca51..1b2e856e979a8 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -1,25 +1,6 @@ .. _cookbook: -.. currentmodule:: pandas - -.. ipython:: python - :suppress: - - import datetime - import functools - import glob - import itertools - import os - - import numpy as np - import pandas as pd - from pandas.compat import StringIO - - - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 - +{{ header }} ******** Cookbook @@ -186,6 +167,8 @@ One could hard code: .. ipython:: python + import functools + CritList = [Crit1, Crit2, Crit3] AllCrit = functools.reduce(lambda x, y: x & y, CritList) @@ -409,6 +392,8 @@ To take the cross section of the 1st level and 1st axis the index: .. ipython:: python + import itertools + index = list(itertools.product(['Ada', 'Quinn', 'Violet'], ['Comp', 'Math', 'Sci'])) headr = list(itertools.product(['Exams', 'Labs'], ['I', 'II'])) @@ -1022,6 +1007,9 @@ You can use the same approach to read all files matching a pattern. Here is an .. ipython:: python + import glob + import os + files = glob.glob('file_*.csv') result = pd.concat([pd.read_csv(f) for f in files], ignore_index=True) @@ -1081,6 +1069,8 @@ Option 1: pass rows explicitly to skip rows .. ipython:: python + from pandas.compat import StringIO + pd.read_csv(StringIO(data), sep=';', skiprows=[11, 12], index_col=0, parse_dates=True, header=10) @@ -1327,6 +1317,8 @@ The :ref:`Timedeltas <timedeltas.timedeltas>` docs. .. ipython:: python + import datetime + s = pd.Series(pd.date_range('2012-1-1', periods=3, freq='D')) s - s.max() diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index 968b30d7e9e2b..8bdb0005de53c 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -1,21 +1,7 @@ -.. currentmodule:: pandas - -.. ipython:: python - :suppress: - - import numpy as np - np.set_printoptions(precision=4, suppress=True) - import pandas as pd - pd.set_option('display.precision', 4, 'display.max_columns', 8) - pd.options.display.max_rows = 15 - - import matplotlib - # matplotlib.style.use('default') - import matplotlib.pyplot as plt - plt.close('all') - .. _dsintro: +{{ header }} + ************************ Intro to Data Structures ************************ diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index 1c873d604cfe0..c0546d653d532 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -1,19 +1,6 @@ .. _enhancingperf: -.. currentmodule:: pandas - -.. ipython:: python - :suppress: - - import numpy as np - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - import pandas as pd - pd.options.display.max_rows=15 - - import os - import csv - +{{ header }} ********************* Enhancing Performance diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst index c62b836ed1f33..853e9e4bdf574 100644 --- a/doc/source/gotchas.rst +++ b/doc/source/gotchas.rst @@ -1,20 +1,11 @@ -.. currentmodule:: pandas .. _gotchas: +{{ header }} + ******************************** Frequently Asked Questions (FAQ) ******************************** -.. ipython:: python - :suppress: - - import numpy as np - import pandas as pd - - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 - .. _df-memory-usage: DataFrame memory usage diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index de188846cce76..76481b8cc765a 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -1,19 +1,6 @@ -.. currentmodule:: pandas .. _groupby: -.. ipython:: python - :suppress: - - import numpy as np - import matplotlib.pyplot as plt - - import pandas as pd - - plt.close('all') - - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 +{{ header }} ***************************** Group By: split-apply-combine diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 6ad9c573249a3..49289862a3acd 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -1,15 +1,6 @@ .. _indexing: -.. currentmodule:: pandas - -.. ipython:: python - :suppress: - - import numpy as np - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - import pandas as pd - pd.options.display.max_rows=15 +{{ header }} *************************** Indexing and Selecting Data diff --git a/doc/source/install.rst b/doc/source/install.rst index 4a71dbcec17e6..d939b14fd064a 100644 --- a/doc/source/install.rst +++ b/doc/source/install.rst @@ -1,6 +1,6 @@ .. _install: -.. currentmodule:: pandas +{{ header }} ============ Installation @@ -207,7 +207,6 @@ installed), make sure you have `pytest :: - >>> import pandas as pd >>> pd.test() running: pytest --skip-slow --skip-network C:\Users\TP\Anaconda3\envs\py36\lib\site-packages\pandas ============================= test session starts ============================= diff --git a/doc/source/internals.rst b/doc/source/internals.rst index c39dafa88db92..9c434928c214e 100644 --- a/doc/source/internals.rst +++ b/doc/source/internals.rst @@ -1,16 +1,6 @@ .. _internals: -.. currentmodule:: pandas - -.. ipython:: python - :suppress: - - import numpy as np - import pandas as pd - - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 +{{ header }} ********* Internals diff --git a/doc/source/io.rst b/doc/source/io.rst index c6e7bccdd8aad..fd83f1a24edab 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1,22 +1,10 @@ .. _io: -.. currentmodule:: pandas +{{ header }} .. ipython:: python :suppress: - import csv - import os - - import matplotlib.pyplot as plt - import numpy as np - import pandas as pd - from pandas.compat import StringIO, BytesIO - - - np.set_printoptions(precision=4, suppress=True) - plt.close('all') - pd.options.display.max_rows = 15 clipdf = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6], 'C': ['p', 'q', 'r']}, index=['x', 'y', 'z']) @@ -143,6 +131,7 @@ usecols : list-like or callable, default ``None`` .. ipython:: python + from pandas.compat import StringIO, BytesIO data = ('col1,col2,col3\n' 'a,b,1\n' 'a,b,2\n' @@ -452,6 +441,8 @@ worth trying. .. ipython:: python :suppress: + import os + os.remove('foo.csv') .. _io.categorical: @@ -1308,6 +1299,7 @@ We can get around this using ``dialect``: .. ipython:: python :okwarning: + import csv dia = csv.excel() dia.quoting = csv.QUOTE_NONE pd.read_csv(StringIO(data), dialect=dia) diff --git a/doc/source/merging.rst b/doc/source/merging.rst index 8a25d991c149b..af767d7687749 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -1,18 +1,10 @@ -.. currentmodule:: pandas .. _merging: +{{ header }} + .. ipython:: python :suppress: - import numpy as np - np.random.seed(123456) - import pandas as pd - pd.options.display.max_rows=15 - randn = np.random.randn - np.set_printoptions(precision=4, suppress=True) - - import matplotlib.pyplot as plt - plt.close('all') import pandas.util._doctools as doctools p = doctools.TablePlotter() diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index 7b6d338ee5b6a..ebe577feb706c 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -1,17 +1,7 @@ -.. currentmodule:: pandas - -.. ipython:: python - :suppress: - - import numpy as np - import pandas as pd - pd.options.display.max_rows=15 - import matplotlib - # matplotlib.style.use('default') - import matplotlib.pyplot as plt - .. _missing_data: +{{ header }} + ************************* Working with missing data ************************* diff --git a/doc/source/r_interface.rst b/doc/source/r_interface.rst index f40f9199aaf66..4ec43a64cf98a 100644 --- a/doc/source/r_interface.rst +++ b/doc/source/r_interface.rst @@ -1,10 +1,6 @@ .. _rpy: -.. ipython:: python - :suppress: - - import pandas as pd - pd.options.display.max_rows = 15 +{{ header }} ****************** diff --git a/doc/source/release.rst b/doc/source/release.rst index af6fc23e12b78..67a30984ff0a7 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -1,18 +1,10 @@ .. _release: -.. currentmodule:: pandas +{{ header }} .. ipython:: python :suppress: - import pandas as pd - import numpy as np - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - import matplotlib.pyplot as plt - plt.close('all') - - pd.options.display.max_rows=15 import pandas.util.testing as tm ************* diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index 19857db1743e8..059e6eb2138f3 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -1,15 +1,6 @@ -.. currentmodule:: pandas .. _reshaping: -.. ipython:: python - :suppress: - - import numpy as np - import pandas as pd - - np.random.seed(123456) - pd.options.display.max_rows = 15 - np.set_printoptions(precision=4, suppress=True) +{{ header }} ************************** Reshaping and Pivot Tables diff --git a/doc/source/sparse.rst b/doc/source/sparse.rst index 5a4a211a5e6b4..9316dad762453 100644 --- a/doc/source/sparse.rst +++ b/doc/source/sparse.rst @@ -1,15 +1,6 @@ -.. currentmodule:: pandas .. _sparse: -.. ipython:: python - :suppress: - - import numpy as np - import pandas as pd - - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 +{{ header }} ********************** Sparse data structures diff --git a/doc/source/text.rst b/doc/source/text.rst index d677cc38c9888..2361be765d889 100644 --- a/doc/source/text.rst +++ b/doc/source/text.rst @@ -1,14 +1,6 @@ -.. currentmodule:: pandas .. _text: -.. ipython:: python - :suppress: - - import numpy as np - import pandas as pd - - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 +{{ header }} ====================== Working with Text Data diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst index 8dab39aafbf67..b32603cb78795 100644 --- a/doc/source/timedeltas.rst +++ b/doc/source/timedeltas.rst @@ -1,15 +1,6 @@ -.. currentmodule:: pandas .. _timedeltas: -.. ipython:: python - :suppress: - - import numpy as np - import pandas as pd - - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 +{{ header }} .. _timedeltas.timedeltas: diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index bca7b6a601dd2..0d6fc735f3025 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -1,15 +1,6 @@ -.. currentmodule:: pandas .. _timeseries: -.. ipython:: python - :suppress: - - import numpy as np - import pandas as pd - - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 +{{ header }} ******************************** Time Series / Date functionality diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index 050d754d0ac8b..ce3c335b431ad 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -1,16 +1,6 @@ -.. currentmodule:: pandas .. _visualization: -.. ipython:: python - :suppress: - - import numpy as np - import pandas as pd - - np.random.seed(123456) - np.set_printoptions(precision=4, suppress=True) - pd.options.display.max_rows = 15 - +{{ header }} ************* Visualization @@ -21,6 +11,7 @@ We use the standard convention for referencing the matplotlib API: .. ipython:: python import matplotlib.pyplot as plt + plt.close('all') We provide the basics in pandas to easily create decent looking plots. See the :ref:`ecosystem <ecosystem.visualization>` section for visualization diff --git a/doc/source/whatsnew/v0.10.0.rst b/doc/source/whatsnew/v0.10.0.rst index 27f20111dbf96..9e45efca02eed 100644 --- a/doc/source/whatsnew/v0.10.0.rst +++ b/doc/source/whatsnew/v0.10.0.rst @@ -3,7 +3,13 @@ v0.10.0 (December 17, 2012) --------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.9.1 and includes many new features and enhancements along with a large number of bug fixes. There are also a number of diff --git a/doc/source/whatsnew/v0.10.1.rst b/doc/source/whatsnew/v0.10.1.rst index a627454561759..98d8214b08943 100644 --- a/doc/source/whatsnew/v0.10.1.rst +++ b/doc/source/whatsnew/v0.10.1.rst @@ -3,7 +3,13 @@ v0.10.1 (January 22, 2013) --------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor release from 0.10.0 and includes new features, enhancements, and bug fixes. In particular, there is substantial new HDFStore functionality diff --git a/doc/source/whatsnew/v0.11.0.rst b/doc/source/whatsnew/v0.11.0.rst index 051d735e539aa..971caddcbd47f 100644 --- a/doc/source/whatsnew/v0.11.0.rst +++ b/doc/source/whatsnew/v0.11.0.rst @@ -3,7 +3,13 @@ v0.11.0 (April 22, 2013) ------------------------ -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.10.1 and includes many new features and enhancements along with a large number of bug fixes. The methods of Selecting diff --git a/doc/source/whatsnew/v0.12.0.rst b/doc/source/whatsnew/v0.12.0.rst index a462359b6e3c0..f2816915955cf 100644 --- a/doc/source/whatsnew/v0.12.0.rst +++ b/doc/source/whatsnew/v0.12.0.rst @@ -3,7 +3,13 @@ v0.12.0 (July 24, 2013) ------------------------ -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.11.0 and includes several new features and enhancements along with a large number of bug fixes. diff --git a/doc/source/whatsnew/v0.13.0.rst b/doc/source/whatsnew/v0.13.0.rst index 037347afb1d59..b11aed9284ab7 100644 --- a/doc/source/whatsnew/v0.13.0.rst +++ b/doc/source/whatsnew/v0.13.0.rst @@ -3,7 +3,13 @@ v0.13.0 (January 3, 2014) --------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.12.0 and includes a number of API changes, several new features and enhancements along with a large number of bug fixes. diff --git a/doc/source/whatsnew/v0.13.1.rst b/doc/source/whatsnew/v0.13.1.rst index 6a1b578cc08fb..63708e2565f4b 100644 --- a/doc/source/whatsnew/v0.13.1.rst +++ b/doc/source/whatsnew/v0.13.1.rst @@ -3,7 +3,13 @@ v0.13.1 (February 3, 2014) -------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor release from 0.13.0 and includes a small number of API changes, several new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all diff --git a/doc/source/whatsnew/v0.14.0.rst b/doc/source/whatsnew/v0.14.0.rst index 9606bbac2a1b3..6ef2a61228ad2 100644 --- a/doc/source/whatsnew/v0.14.0.rst +++ b/doc/source/whatsnew/v0.14.0.rst @@ -3,7 +3,13 @@ v0.14.0 (May 31 , 2014) ----------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.13.1 and includes a small number of API changes, several new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all diff --git a/doc/source/whatsnew/v0.14.1.rst b/doc/source/whatsnew/v0.14.1.rst index 3b0ff5650d90d..3c2a5c60209db 100644 --- a/doc/source/whatsnew/v0.14.1.rst +++ b/doc/source/whatsnew/v0.14.1.rst @@ -3,7 +3,13 @@ v0.14.1 (July 11, 2014) ----------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor release from 0.14.0 and includes a small number of API changes, several new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all diff --git a/doc/source/whatsnew/v0.15.0.rst b/doc/source/whatsnew/v0.15.0.rst index 00eda927a9c73..e7a7d8a7e3b06 100644 --- a/doc/source/whatsnew/v0.15.0.rst +++ b/doc/source/whatsnew/v0.15.0.rst @@ -3,7 +3,13 @@ v0.15.0 (October 18, 2014) -------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.14.1 and includes a small number of API changes, several new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all diff --git a/doc/source/whatsnew/v0.15.1.rst b/doc/source/whatsnew/v0.15.1.rst index 88127d4e1b8d8..be7cf04bcdd68 100644 --- a/doc/source/whatsnew/v0.15.1.rst +++ b/doc/source/whatsnew/v0.15.1.rst @@ -3,7 +3,13 @@ v0.15.1 (November 9, 2014) -------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor bug-fix release from 0.15.0 and includes a small number of API changes, several new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all diff --git a/doc/source/whatsnew/v0.15.2.rst b/doc/source/whatsnew/v0.15.2.rst index dd988cde88145..437dd3f8d3df6 100644 --- a/doc/source/whatsnew/v0.15.2.rst +++ b/doc/source/whatsnew/v0.15.2.rst @@ -3,7 +3,13 @@ v0.15.2 (December 12, 2014) --------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor release from 0.15.1 and includes a large number of bug fixes along with several new features, enhancements, and performance improvements. diff --git a/doc/source/whatsnew/v0.16.0.rst b/doc/source/whatsnew/v0.16.0.rst index d394b43a7ec88..8d2d589c44e1b 100644 --- a/doc/source/whatsnew/v0.16.0.rst +++ b/doc/source/whatsnew/v0.16.0.rst @@ -3,7 +3,13 @@ v0.16.0 (March 22, 2015) ------------------------ -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.15.2 and includes a small number of API changes, several new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all diff --git a/doc/source/whatsnew/v0.16.1.rst b/doc/source/whatsnew/v0.16.1.rst index aae96a5d63c14..5d98d3715a933 100644 --- a/doc/source/whatsnew/v0.16.1.rst +++ b/doc/source/whatsnew/v0.16.1.rst @@ -3,7 +3,13 @@ v0.16.1 (May 11, 2015) ---------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor bug-fix release from 0.16.0 and includes a a large number of bug fixes along several new features, enhancements, and performance improvements. diff --git a/doc/source/whatsnew/v0.16.2.rst b/doc/source/whatsnew/v0.16.2.rst index acae3a55d5f78..932f70d3e0e19 100644 --- a/doc/source/whatsnew/v0.16.2.rst +++ b/doc/source/whatsnew/v0.16.2.rst @@ -3,7 +3,13 @@ v0.16.2 (June 12, 2015) ----------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor bug-fix release from 0.16.1 and includes a a large number of bug fixes along some new features (:meth:`~DataFrame.pipe` method), enhancements, and performance improvements. diff --git a/doc/source/whatsnew/v0.17.0.rst b/doc/source/whatsnew/v0.17.0.rst index abde8d953f4df..51b86899e1bc5 100644 --- a/doc/source/whatsnew/v0.17.0.rst +++ b/doc/source/whatsnew/v0.17.0.rst @@ -3,7 +3,13 @@ v0.17.0 (October 9, 2015) ------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.16.2 and includes a small number of API changes, several new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all diff --git a/doc/source/whatsnew/v0.17.1.rst b/doc/source/whatsnew/v0.17.1.rst index 44554a88fba04..77dda4cfb1f44 100644 --- a/doc/source/whatsnew/v0.17.1.rst +++ b/doc/source/whatsnew/v0.17.1.rst @@ -3,7 +3,13 @@ v0.17.1 (November 21, 2015) --------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + .. note:: diff --git a/doc/source/whatsnew/v0.18.0.rst b/doc/source/whatsnew/v0.18.0.rst index 5cd4163b1a7a5..da7d409fb8922 100644 --- a/doc/source/whatsnew/v0.18.0.rst +++ b/doc/source/whatsnew/v0.18.0.rst @@ -3,7 +3,13 @@ v0.18.0 (March 13, 2016) ------------------------ -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.17.1 and includes a small number of API changes, several new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all diff --git a/doc/source/whatsnew/v0.18.1.rst b/doc/source/whatsnew/v0.18.1.rst index 1dc01d7f1f745..65f0285ad32c9 100644 --- a/doc/source/whatsnew/v0.18.1.rst +++ b/doc/source/whatsnew/v0.18.1.rst @@ -3,7 +3,13 @@ v0.18.1 (May 3, 2016) --------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor bug-fix release from 0.18.0 and includes a large number of bug fixes along with several new features, enhancements, and performance improvements. diff --git a/doc/source/whatsnew/v0.19.0.rst b/doc/source/whatsnew/v0.19.0.rst index 467319a4527d1..1e4e7a6c80fa4 100644 --- a/doc/source/whatsnew/v0.19.0.rst +++ b/doc/source/whatsnew/v0.19.0.rst @@ -3,7 +3,13 @@ v0.19.0 (October 2, 2016) ------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.18.1 and includes number of API changes, several new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all diff --git a/doc/source/whatsnew/v0.19.1.rst b/doc/source/whatsnew/v0.19.1.rst index 0c909fa4195d7..12f3e985565e0 100644 --- a/doc/source/whatsnew/v0.19.1.rst +++ b/doc/source/whatsnew/v0.19.1.rst @@ -3,7 +3,13 @@ v0.19.1 (November 3, 2016) -------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor bug-fix release from 0.19.0 and includes some small regression fixes, bug fixes and performance improvements. diff --git a/doc/source/whatsnew/v0.19.2.rst b/doc/source/whatsnew/v0.19.2.rst index 1cded6d2c94e2..14310ceb45b4a 100644 --- a/doc/source/whatsnew/v0.19.2.rst +++ b/doc/source/whatsnew/v0.19.2.rst @@ -3,7 +3,13 @@ v0.19.2 (December 24, 2016) --------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor bug-fix release in the 0.19.x series and includes some small regression fixes, bug fixes and performance improvements. diff --git a/doc/source/whatsnew/v0.20.0.rst b/doc/source/whatsnew/v0.20.0.rst index 2df686a79e837..d5a2422e456ee 100644 --- a/doc/source/whatsnew/v0.20.0.rst +++ b/doc/source/whatsnew/v0.20.0.rst @@ -3,7 +3,13 @@ v0.20.1 (May 5, 2017) --------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.19.2 and includes a number of API changes, deprecations, new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all diff --git a/doc/source/whatsnew/v0.20.2.rst b/doc/source/whatsnew/v0.20.2.rst index 784cd09edff30..b2592579eb03f 100644 --- a/doc/source/whatsnew/v0.20.2.rst +++ b/doc/source/whatsnew/v0.20.2.rst @@ -3,7 +3,13 @@ v0.20.2 (June 4, 2017) ---------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor bug-fix release in the 0.20.x series and includes some small regression fixes, bug fixes and performance improvements. diff --git a/doc/source/whatsnew/v0.20.3.rst b/doc/source/whatsnew/v0.20.3.rst index 47bfcc761b088..99f7ff2e36d25 100644 --- a/doc/source/whatsnew/v0.20.3.rst +++ b/doc/source/whatsnew/v0.20.3.rst @@ -3,7 +3,13 @@ v0.20.3 (July 7, 2017) ----------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor bug-fix release in the 0.20.x series and includes some small regression fixes and bug fixes. We recommend that all users upgrade to this version. diff --git a/doc/source/whatsnew/v0.21.0.rst b/doc/source/whatsnew/v0.21.0.rst index c9a90f3ada7e5..73bdedb3d3194 100644 --- a/doc/source/whatsnew/v0.21.0.rst +++ b/doc/source/whatsnew/v0.21.0.rst @@ -3,7 +3,13 @@ v0.21.0 (October 27, 2017) -------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.20.3 and includes a number of API changes, deprecations, new features, enhancements, and performance improvements along with a large number of bug fixes. We recommend that all diff --git a/doc/source/whatsnew/v0.21.1.rst b/doc/source/whatsnew/v0.21.1.rst index bf13d5d67ed63..c8897ca86e8cf 100644 --- a/doc/source/whatsnew/v0.21.1.rst +++ b/doc/source/whatsnew/v0.21.1.rst @@ -3,7 +3,13 @@ v0.21.1 (December 12, 2017) --------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor bug-fix release in the 0.21.x series and includes some small regression fixes, bug fixes and performance improvements. diff --git a/doc/source/whatsnew/v0.22.0.rst b/doc/source/whatsnew/v0.22.0.rst index f05b84a9d8902..1fb87c9f5433f 100644 --- a/doc/source/whatsnew/v0.22.0.rst +++ b/doc/source/whatsnew/v0.22.0.rst @@ -3,7 +3,13 @@ v0.22.0 (December 29, 2017) --------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.21.1 and includes a single, API-breaking change. We recommend that all users upgrade to this version after carefully reading the diff --git a/doc/source/whatsnew/v0.23.0.rst b/doc/source/whatsnew/v0.23.0.rst index f84517a3e3b9c..d8eb337fd27d2 100644 --- a/doc/source/whatsnew/v0.23.0.rst +++ b/doc/source/whatsnew/v0.23.0.rst @@ -3,7 +3,13 @@ What's new in 0.23.0 (May 15, 2018) ----------------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.22.0 and includes a number of API changes, deprecations, new features, enhancements, and performance improvements along diff --git a/doc/source/whatsnew/v0.23.1.rst b/doc/source/whatsnew/v0.23.1.rst index e8e0060c48337..f8bfced171a7c 100644 --- a/doc/source/whatsnew/v0.23.1.rst +++ b/doc/source/whatsnew/v0.23.1.rst @@ -3,7 +3,13 @@ What's New in 0.23.1 (June 12, 2018) ------------------------------------ -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes and bug fixes. We recommend that all users upgrade to this version. diff --git a/doc/source/whatsnew/v0.23.2.rst b/doc/source/whatsnew/v0.23.2.rst index 573a30f17846b..9523724827722 100644 --- a/doc/source/whatsnew/v0.23.2.rst +++ b/doc/source/whatsnew/v0.23.2.rst @@ -3,7 +3,13 @@ What's New in 0.23.2 (July 5, 2018) ----------------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes and bug fixes. We recommend that all users upgrade to this version. diff --git a/doc/source/whatsnew/v0.23.3.rst b/doc/source/whatsnew/v0.23.3.rst index 29758e54b437b..3b1a0cfa5f273 100644 --- a/doc/source/whatsnew/v0.23.3.rst +++ b/doc/source/whatsnew/v0.23.3.rst @@ -3,7 +3,7 @@ What's New in 0.23.3 (July 7, 2018) ----------------------------------- -{{ common_imports }} +{{ header }} This release fixes a build issue with the sdist for Python 3.7 (:issue:`21785`) There are no other changes. diff --git a/doc/source/whatsnew/v0.23.4.rst b/doc/source/whatsnew/v0.23.4.rst index c8f08d0bb7091..75fb18a648d0a 100644 --- a/doc/source/whatsnew/v0.23.4.rst +++ b/doc/source/whatsnew/v0.23.4.rst @@ -3,7 +3,13 @@ What's New in 0.23.4 (August 3, 2018) ------------------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor bug-fix release in the 0.23.x series and includes some small regression fixes and bug fixes. We recommend that all users upgrade to this version. diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 37810e95ccd1b..4c78fcb76f4c6 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -8,7 +8,13 @@ What's New in 0.24.0 (Month XX, 2018) Starting January 1, 2019, pandas feature releases will support Python 3 only. See :ref:`install.dropping-27` for more. -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + These are the changes in pandas 0.24.0. See :ref:`release` for a full changelog including other versions of pandas. diff --git a/doc/source/whatsnew/v0.4.x.rst b/doc/source/whatsnew/v0.4.x.rst index e54614849c93b..ebf5286a41b6e 100644 --- a/doc/source/whatsnew/v0.4.x.rst +++ b/doc/source/whatsnew/v0.4.x.rst @@ -3,7 +3,7 @@ v.0.4.3 through v0.4.1 (September 25 - October 9, 2011) ------------------------------------------------------- -{{ common_imports }} +{{ header }} New Features ~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v0.5.0.rst b/doc/source/whatsnew/v0.5.0.rst index c6d17cb1e1290..4e635a5fe6859 100644 --- a/doc/source/whatsnew/v0.5.0.rst +++ b/doc/source/whatsnew/v0.5.0.rst @@ -4,7 +4,13 @@ v.0.5.0 (October 24, 2011) -------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + New Features ~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v0.6.0.rst b/doc/source/whatsnew/v0.6.0.rst index de45b3b383129..ba2c6aec40f50 100644 --- a/doc/source/whatsnew/v0.6.0.rst +++ b/doc/source/whatsnew/v0.6.0.rst @@ -3,7 +3,13 @@ v.0.6.0 (November 25, 2011) --------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + New Features ~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v0.7.0.rst b/doc/source/whatsnew/v0.7.0.rst index e278bc0738108..7049e836e2034 100644 --- a/doc/source/whatsnew/v0.7.0.rst +++ b/doc/source/whatsnew/v0.7.0.rst @@ -3,7 +3,13 @@ v.0.7.0 (February 9, 2012) -------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + New features ~~~~~~~~~~~~ diff --git a/doc/source/whatsnew/v0.7.1.rst b/doc/source/whatsnew/v0.7.1.rst index f1a133797fd59..db14c5af71923 100644 --- a/doc/source/whatsnew/v0.7.1.rst +++ b/doc/source/whatsnew/v0.7.1.rst @@ -3,7 +3,13 @@ v.0.7.1 (February 29, 2012) --------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This release includes a few new features and addresses over a dozen bugs in 0.7.0. diff --git a/doc/source/whatsnew/v0.7.2.rst b/doc/source/whatsnew/v0.7.2.rst index b870db956f4f1..4898a209fb33b 100644 --- a/doc/source/whatsnew/v0.7.2.rst +++ b/doc/source/whatsnew/v0.7.2.rst @@ -3,7 +3,13 @@ v.0.7.2 (March 16, 2012) --------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This release targets bugs in 0.7.1, and adds a few minor features. diff --git a/doc/source/whatsnew/v0.7.3.rst b/doc/source/whatsnew/v0.7.3.rst index 30e22f105656c..6f7927499db78 100644 --- a/doc/source/whatsnew/v0.7.3.rst +++ b/doc/source/whatsnew/v0.7.3.rst @@ -3,7 +3,13 @@ v.0.7.3 (April 12, 2012) ------------------------ -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a minor release from 0.7.2 and fixes many minor bugs and adds a number of nice new features. There are also a couple of API changes to note; these diff --git a/doc/source/whatsnew/v0.8.0.rst b/doc/source/whatsnew/v0.8.0.rst index eedaaa3dfa8bd..3457774f98811 100644 --- a/doc/source/whatsnew/v0.8.0.rst +++ b/doc/source/whatsnew/v0.8.0.rst @@ -3,7 +3,13 @@ v0.8.0 (June 29, 2012) ------------------------ -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a major release from 0.7.3 and includes extensive work on the time series handling and processing infrastructure as well as a great deal of new diff --git a/doc/source/whatsnew/v0.8.1.rst b/doc/source/whatsnew/v0.8.1.rst index 468b99341163c..20338cf79215c 100644 --- a/doc/source/whatsnew/v0.8.1.rst +++ b/doc/source/whatsnew/v0.8.1.rst @@ -3,7 +3,13 @@ v0.8.1 (July 22, 2012) ---------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This release includes a few new features, performance enhancements, and over 30 bug fixes from 0.8.0. New features include notably NA friendly string diff --git a/doc/source/whatsnew/v0.9.0.rst b/doc/source/whatsnew/v0.9.0.rst index ee4e8c338c984..eebc044255bbc 100644 --- a/doc/source/whatsnew/v0.9.0.rst +++ b/doc/source/whatsnew/v0.9.0.rst @@ -1,6 +1,12 @@ .. _whatsnew_0900: -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + v0.9.0 (October 7, 2012) ------------------------ diff --git a/doc/source/whatsnew/v0.9.1.rst b/doc/source/whatsnew/v0.9.1.rst index fe3de9be95a74..6620f644cf527 100644 --- a/doc/source/whatsnew/v0.9.1.rst +++ b/doc/source/whatsnew/v0.9.1.rst @@ -3,7 +3,13 @@ v0.9.1 (November 14, 2012) -------------------------- -{{ common_imports }} +{{ header }} + +.. ipython:: python + :suppress: + + from pandas import * # noqa F401, F403 + This is a bug fix release from 0.9.0 and includes several new features and enhancements along with a large number of bug fixes. The new features include diff --git a/setup.cfg b/setup.cfg index 44df79d1b60d2..bfffe56e088eb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -31,6 +31,9 @@ exclude = env # exclude asv benchmark environments from linting [flake8-rst] +bootstrap = + import pandas as pd + import numpy as np ignore = E402, # module level import not at top of file W503, # line break before binary operator exclude = @@ -70,6 +73,7 @@ exclude = doc/source/basics.rst doc/source/categorical.rst doc/source/contributing_docstring.rst + doc/source/contributing.rst doc/source/dsintro.rst doc/source/enhancingperf.rst doc/source/extending.rst @@ -79,6 +83,9 @@ exclude = doc/source/missing_data.rst doc/source/options.rst doc/source/release.rst + doc/source/comparison_with_sas.rst + doc/source/comparison_with_sql.rst + doc/source/comparison_with_stata.rst doc/source/reshaping.rst doc/source/visualization.rst @@ -397,4 +404,3 @@ skip= asv_bench/benchmarks/sparse.py, asv_bench/benchmarks/stat_ops.py, asv_bench/benchmarks/timeseries.py - diff --git a/tmp.xlsx b/tmp.xlsx new file mode 100644 index 0000000000000..199e9049bfa96 Binary files /dev/null and b/tmp.xlsx differ
- [x] closes #23952 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24086
2018-12-04T08:50:54Z
2018-12-09T17:53:01Z
2018-12-09T17:53:01Z
2018-12-10T12:28:43Z
PERF: consolidate imports inside parse_time_string
diff --git a/asv_bench/benchmarks/period.py b/asv_bench/benchmarks/period.py index fc34a47fee3e1..1af1ba1fb7b0b 100644 --- a/asv_bench/benchmarks/period.py +++ b/asv_bench/benchmarks/period.py @@ -43,6 +43,7 @@ class PeriodIndexConstructor(object): def setup(self, freq): self.rng = date_range('1985', periods=1000) self.rng2 = date_range('1985', periods=1000).to_pydatetime() + self.ints = list(range(2000, 3000)) def time_from_date_range(self, freq): PeriodIndex(self.rng, freq=freq) @@ -50,6 +51,9 @@ def time_from_date_range(self, freq): def time_from_pydatetime(self, freq): PeriodIndex(self.rng2, freq=freq) + def time_from_ints(self, freq): + PeriodIndex(self.ints, freq=freq) + class DataFramePeriodColumn(object): diff --git a/pandas/_libs/tslibs/parsing.pyx b/pandas/_libs/tslibs/parsing.pyx index 71bb8f79642dc..9a01bf378e549 100644 --- a/pandas/_libs/tslibs/parsing.pyx +++ b/pandas/_libs/tslibs/parsing.pyx @@ -118,12 +118,12 @@ def parse_time_string(arg, freq=None, dayfirst=None, yearfirst=None): if getattr(freq, "_typ", None) == "dateoffset": freq = freq.rule_code - if dayfirst is None: + if dayfirst is None or yearfirst is None: from pandas.core.config import get_option - dayfirst = get_option("display.date_dayfirst") - if yearfirst is None: - from pandas.core.config import get_option - yearfirst = get_option("display.date_yearfirst") + if dayfirst is None: + dayfirst = get_option("display.date_dayfirst") + if yearfirst is None: + yearfirst = get_option("display.date_yearfirst") res = parse_datetime_string_with_reso(arg, freq=freq, dayfirst=dayfirst,
This PR adds an asv benchmark for the creation of `Period` objects directly from integers, and an associated speedup for the same case. When calling `Period._from_ordinal()`, the runtime is unfortunately dominated by a pair of import statements needed to pull in the global config settings `display.date_dayfirst` and `display.date_yearfirst`. Thus, simply consolidating them leads to a significant speedup: ``` Benchmarks that have improved: before after ratio [08395af4] [696b40f1] <parse_time_string~1> <parse_time_string> - 176±3ms 98.7±3ms 0.56 period.PeriodIndexConstructor.time_from_ints('D') ``` - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24084
2018-12-04T00:38:56Z
2018-12-05T12:17:29Z
2018-12-05T12:17:29Z
2018-12-05T12:17:32Z
PERF: Define PeriodArray._values_for_argsort
diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index e258e474f4154..9aa83892d3b64 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -828,6 +828,9 @@ def _check_timedeltalike_freq_compat(self, other): .format(cls=type(self).__name__, freqstr=self.freqstr)) + def _values_for_argsort(self): + return self._data + PeriodArray._add_comparison_ops() PeriodArray._add_datetimelike_methods()
This PR speeds up `.groupby()` and `.set_index()` operations involving a `PeriodArray` by 25-64x: ``` asv compare upstream/master HEAD -s --sort ratio Benchmarks that have improved: before after ratio [08395af4] [696b40f1] <period_array_argsort~1> <parse_time_string> - 4.77±0.1s 191±3ms 0.04 period.DataFramePeriodColumn.time_set_index - 2.23±0.2s 35.6±2ms 0.02 groupby.Datelike.time_sum('period_range') ``` The underlying issue was that `pd.core.algorithms.factorize()` calls `argsort()` on the input arrays. Calling this resulted in raw `Period` objects being sorted via equality comparisons that also generated `Offset` objects. Assuming all elements of the array have the same frequency, we can simply sort the underlying `ordinals` and achieve the same result far faster. - [ ] closes #xxxx - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24083
2018-12-04T00:24:46Z
2018-12-04T02:33:44Z
2018-12-04T02:33:44Z
2018-12-04T02:33:47Z
remove calls to DataFrame.consolidate in benchmarks
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index 527a2f129cf37..dcecaf60ed578 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -13,8 +13,7 @@ def setup(self): self.df = DataFrame(np.random.randn(10000, 25)) self.df['foo'] = 'bar' self.df['bar'] = 'baz' - with warnings.catch_warnings(record=True): - self.df = self.df.consolidate() + self.df = self.df._consolidate() def time_frame_get_numeric_data(self): self.df._get_numeric_data() diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py index 5b28d8a4eec62..84ccc10e8302f 100644 --- a/asv_bench/benchmarks/join_merge.py +++ b/asv_bench/benchmarks/join_merge.py @@ -23,11 +23,7 @@ def setup(self): self.mdf1['obj1'] = 'bar' self.mdf1['obj2'] = 'bar' self.mdf1['int1'] = 5 - try: - with warnings.catch_warnings(record=True): - self.mdf1.consolidate(inplace=True) - except (AttributeError, TypeError): - pass + self.mdf1 = self.mdf1._consolidate() self.mdf2 = self.mdf1.copy() self.mdf2.index = self.df2.index
The ``DataFrame.consolidate`` method has been removed for 0.24 (in #23377), but there are still some calls to it in the asv benchmarks. I had a failure in #23752 because of this, so hereby a fix. This PR uses the internal ``_consolidate`` method instead.
https://api.github.com/repos/pandas-dev/pandas/pulls/24080
2018-12-03T23:03:31Z
2018-12-04T02:51:03Z
2018-12-04T02:51:03Z
2018-12-09T18:48:42Z
CI/TST: Making ci/run_tests.sh fail if one of the steps fail
diff --git a/ci/run_tests.sh b/ci/run_tests.sh index 77efc60a8cf97..ee46da9f52eab 100755 --- a/ci/run_tests.sh +++ b/ci/run_tests.sh @@ -1,5 +1,7 @@ #!/bin/bash +set -e + if [ "$DOC" ]; then echo "We are not running pytest as this is a doc-build" exit 0 @@ -43,10 +45,14 @@ do NUM_JOBS=2 fi - pytest -m "$TYPE_PATTERN$PATTERN" -n $NUM_JOBS -s --strict --durations=10 --junitxml=test-data-$TYPE.xml $TEST_ARGS $COVERAGE pandas + PYTEST_CMD="pytest -m \"$TYPE_PATTERN$PATTERN\" -n $NUM_JOBS -s --strict --durations=10 --junitxml=test-data-$TYPE.xml $TEST_ARGS $COVERAGE pandas" + echo $PYTEST_CMD + # if no tests are found (the case of "single and slow"), pytest exits with code 5, and would make the script fail, if not for the below code + sh -c "$PYTEST_CMD; ret=\$?; [ \$ret = 5 ] && exit 0 || exit \$ret" if [[ "$COVERAGE" && $? == 0 ]]; then echo "uploading coverage for $TYPE tests" - bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME + echo "bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME" + bash <(curl -s https://codecov.io/bash) -Z -c -F $TYPE -f $COVERAGE_FNAME fi done
Looks like when simplifying the running of the tests in the CI (#23924), I missed the `-e` in the bash header. And that makes the `ci/run_tests.sh` exit with status code 0, even if the calls to pytests fail. This left the CI in green, even when tests fail for the last 3 days (sorry about that). I think nothing is broken. This PR fixes the problem. CC: @pandas-dev/pandas-core
https://api.github.com/repos/pandas-dev/pandas/pulls/24075
2018-12-03T18:47:14Z
2018-12-04T11:21:40Z
2018-12-04T11:21:40Z
2018-12-04T11:43:25Z
Implement DatetimeArray._from_sequence
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 33f71bcb2fef2..39b6a977f87fa 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -6,7 +6,6 @@ import numpy as np from pandas._libs import NaT, iNaT, lib -from pandas._libs.tslibs import timezones from pandas._libs.tslibs.period import ( DIFFERENT_FREQ_INDEX, IncompatibleFrequency, Period) from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds @@ -21,8 +20,7 @@ is_bool_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_array_dtype, is_float_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_offsetlike, - is_period_dtype, is_timedelta64_dtype, needs_i8_conversion, pandas_dtype) -from pandas.core.dtypes.dtypes import DatetimeTZDtype + is_period_dtype, is_timedelta64_dtype, needs_i8_conversion) from pandas.core.dtypes.generic import ABCDataFrame, ABCIndexClass, ABCSeries from pandas.core.dtypes.missing import isna @@ -1127,6 +1125,41 @@ def validate_endpoints(closed): return left_closed, right_closed +def validate_inferred_freq(freq, inferred_freq, freq_infer): + """ + If the user passes a freq and another freq is inferred from passed data, + require that they match. + + Parameters + ---------- + freq : DateOffset or None + inferred_freq : DateOffset or None + freq_infer : bool + + Returns + ------- + freq : DateOffset or None + freq_infer : bool + + Notes + ----- + We assume at this point that `maybe_infer_freq` has been called, so + `freq` is either a DateOffset object or None. + """ + if inferred_freq is not None: + if freq is not None and freq != inferred_freq: + raise ValueError('Inferred frequency {inferred} from passed ' + 'values does not conform to passed frequency ' + '{passed}' + .format(inferred=inferred_freq, + passed=freq.freqstr)) + elif freq is None: + freq = inferred_freq + freq_infer = False + + return freq, freq_infer + + def maybe_infer_freq(freq): """ Comparing a DateOffset to the string "infer" raises, so we need to @@ -1154,78 +1187,6 @@ def maybe_infer_freq(freq): return freq, freq_infer -def validate_tz_from_dtype(dtype, tz): - """ - If the given dtype is a DatetimeTZDtype, extract the implied - tzinfo object from it and check that it does not conflict with the given - tz. - - Parameters - ---------- - dtype : dtype, str - tz : None, tzinfo - - Returns - ------- - tz : consensus tzinfo - - Raises - ------ - ValueError : on tzinfo mismatch - """ - if dtype is not None: - if isinstance(dtype, compat.string_types): - try: - dtype = DatetimeTZDtype.construct_from_string(dtype) - except TypeError: - # Things like `datetime64[ns]`, which is OK for the - # constructors, but also nonsense, which should be validated - # but not by us. We *do* allow non-existent tz errors to - # go through - pass - dtz = getattr(dtype, 'tz', None) - if dtz is not None: - if tz is not None and not timezones.tz_compare(tz, dtz): - raise ValueError("cannot supply both a tz and a dtype" - " with a tz") - tz = dtz - return tz - - -def validate_dtype_freq(dtype, freq): - """ - If both a dtype and a freq are available, ensure they match. If only - dtype is available, extract the implied freq. - - Parameters - ---------- - dtype : dtype - freq : DateOffset or None - - Returns - ------- - freq : DateOffset - - Raises - ------ - ValueError : non-period dtype - IncompatibleFrequency : mismatch between dtype and freq - """ - if freq is not None: - freq = frequencies.to_offset(freq) - - if dtype is not None: - dtype = pandas_dtype(dtype) - if not is_period_dtype(dtype): - raise ValueError('dtype must be PeriodDtype') - if freq is None: - freq = dtype.freq - elif freq != dtype.freq: - raise IncompatibleFrequency('specified freq and dtype ' - 'are different') - return freq - - def _ensure_datetimelike_to_i8(other, to_utc=False): """ Helper for coercing an input scalar or array to i8. diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index f453a9b734d17..1ae11c60f3b15 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -14,9 +14,9 @@ from pandas.util._decorators import Appender from pandas.core.dtypes.common import ( - _NS_DTYPE, is_datetime64_dtype, is_datetime64tz_dtype, is_extension_type, - is_float_dtype, is_int64_dtype, is_object_dtype, is_period_dtype, - is_timedelta64_dtype) + _INT64_DTYPE, _NS_DTYPE, is_datetime64_dtype, is_datetime64tz_dtype, + is_extension_type, is_float_dtype, is_int64_dtype, is_object_dtype, + is_period_dtype, is_string_dtype, is_timedelta64_dtype) from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries from pandas.core.dtypes.missing import isna @@ -209,51 +209,35 @@ def _simple_new(cls, values, freq=None, tz=None): result._tz = timezones.tz_standardize(tz) return result - def __new__(cls, values, freq=None, tz=None, dtype=None): + def __new__(cls, values, freq=None, tz=None, dtype=None, copy=False, + dayfirst=False, yearfirst=False, ambiguous='raise'): + return cls._from_sequence( + values, freq=freq, tz=tz, dtype=dtype, copy=copy, + dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous) - if freq is None and hasattr(values, "freq"): - # i.e. DatetimeArray, DatetimeIndex - freq = values.freq + @classmethod + def _from_sequence(cls, data, dtype=None, copy=False, + tz=None, freq=None, + dayfirst=False, yearfirst=False, ambiguous='raise'): freq, freq_infer = dtl.maybe_infer_freq(freq) - # if dtype has an embedded tz, capture it - tz = dtl.validate_tz_from_dtype(dtype, tz) - - if not hasattr(values, "dtype"): - if np.ndim(values) == 0: - # i.e. iterator - values = list(values) - values = np.array(values) - - if is_object_dtype(values): - # kludge; dispatch until the DatetimeArray constructor is complete - from pandas import DatetimeIndex - values = DatetimeIndex(values, freq=freq, tz=tz) + subarr, tz, inferred_freq = sequence_to_dt64ns( + data, dtype=dtype, copy=copy, tz=tz, + dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous) - if isinstance(values, ABCSeries): - # extract to ndarray or DatetimeIndex - values = values._values + freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, + freq_infer) - if isinstance(values, DatetimeArrayMixin): - # extract nanosecond unix timestamps - if tz is None: - tz = values.tz - values = values.asi8 + result = cls._simple_new(subarr, freq=freq, tz=tz) - if values.dtype == 'i8': - values = values.view('M8[ns]') + if inferred_freq is None and freq is not None: + # this condition precludes `freq_infer` + cls._validate_frequency(result, freq, ambiguous=ambiguous) - assert isinstance(values, np.ndarray), type(values) - assert is_datetime64_dtype(values) # not yet assured nanosecond - values = conversion.ensure_datetime64ns(values, copy=False) - - result = cls._simple_new(values, freq=freq, tz=tz) - if freq_infer: + elif freq_infer: result.freq = to_offset(result.inferred_freq) - # NB: Among other things not yet ported from the DatetimeIndex - # constructor, this does not call _deepcopy_if_needed return result @classmethod @@ -1444,81 +1428,108 @@ def to_julian_date(self): # ------------------------------------------------------------------- # Constructor Helpers -def maybe_infer_tz(tz, inferred_tz): +def sequence_to_dt64ns(data, dtype=None, copy=False, + tz=None, + dayfirst=False, yearfirst=False, ambiguous='raise'): """ - If a timezone is inferred from data, check that it is compatible with - the user-provided timezone, if any. - Parameters ---------- - tz : tzinfo or None - inferred_tz : tzinfo or None + data : list-like + dtype : dtype, str, or None, default None + copy : bool, default False + tz : tzinfo, str, or None, default None + dayfirst : bool, default False + yearfirst : bool, default False + ambiguous : str, bool, or arraylike, default 'raise' + See pandas._libs.tslibs.conversion.tz_localize_to_utc Returns ------- + result : numpy.ndarray + The sequence converted to a numpy array with dtype ``datetime64[ns]``. tz : tzinfo or None + Either the user-provided tzinfo or one inferred from the data. + inferred_freq : Tick or None + The inferred frequency of the sequence. Raises ------ - TypeError : if both timezones are present but do not match + TypeError : PeriodDType data is passed """ - if tz is None: - tz = inferred_tz - elif inferred_tz is None: - pass - elif not timezones.tz_compare(tz, inferred_tz): - raise TypeError('data is already tz-aware {inferred_tz}, unable to ' - 'set specified tz: {tz}' - .format(inferred_tz=inferred_tz, tz=tz)) - return tz + inferred_freq = None -def maybe_convert_dtype(data, copy): - """ - Convert data based on dtype conventions, issuing deprecation warnings - or errors where appropriate. + if not hasattr(data, "dtype"): + # e.g. list, tuple + if np.ndim(data) == 0: + # i.e. generator + data = list(data) + data = np.asarray(data) + copy = False + elif isinstance(data, ABCSeries): + data = data._values - Parameters - ---------- - data : np.ndarray or pd.Index - copy : bool + if hasattr(data, "freq"): + # i.e. DatetimeArray/Index + inferred_freq = data.freq - Returns - ------- - data : np.ndarray or pd.Index - copy : bool + # if dtype has an embedded tz, capture it + tz = validate_tz_from_dtype(dtype, tz) - Raises - ------ - TypeError : PeriodDType data is passed - """ - if is_float_dtype(data): - # Note: we must cast to datetime64[ns] here in order to treat these - # as wall-times instead of UTC timestamps. - data = data.astype(_NS_DTYPE) + # By this point we are assured to have either a numpy array or Index + data, copy = maybe_convert_dtype(data, copy) + + if is_object_dtype(data) or is_string_dtype(data): + # TODO: We do not have tests specific to string-dtypes, + # also complex or categorical or other extension copy = False - # TODO: deprecate this behavior to instead treat symmetrically - # with integer dtypes. See discussion in GH#23675 + if lib.infer_dtype(data) == 'integer': + data = data.astype(np.int64) + else: + # data comes back here as either i8 to denote UTC timestamps + # or M8[ns] to denote wall times + data, inferred_tz = objects_to_datetime64ns( + data, dayfirst=dayfirst, yearfirst=yearfirst) + tz = maybe_infer_tz(tz, inferred_tz) + + if is_datetime64tz_dtype(data): + tz = maybe_infer_tz(tz, data.tz) + result = data._data + + elif is_datetime64_dtype(data): + # tz-naive DatetimeArray/Index or ndarray[datetime64] + data = getattr(data, "_data", data) + if data.dtype != _NS_DTYPE: + data = conversion.ensure_datetime64ns(data) - elif is_timedelta64_dtype(data): - warnings.warn("Passing timedelta64-dtype data is deprecated, will " - "raise a TypeError in a future version", - FutureWarning, stacklevel=3) - data = data.view(_NS_DTYPE) + if tz is not None: + # Convert tz-naive to UTC + tz = timezones.maybe_get_tz(tz) + data = conversion.tz_localize_to_utc(data.view('i8'), tz, + ambiguous=ambiguous) + data = data.view(_NS_DTYPE) - elif is_period_dtype(data): - # Note: without explicitly raising here, PeriondIndex - # test_setops.test_join_does_not_recur fails - raise TypeError("Passing PeriodDtype data is invalid. " - "Use `data.to_timestamp()` instead") + assert data.dtype == _NS_DTYPE, data.dtype + result = data - elif is_extension_type(data) and not is_datetime64tz_dtype(data): - # Includes categorical - # TODO: We have no tests for these - data = np.array(data, dtype=np.object_) - copy = False + else: + # must be integer dtype otherwise + # assume this data are epoch timestamps + if data.dtype != _INT64_DTYPE: + data = data.astype(np.int64, copy=False) + result = data.view(_NS_DTYPE) - return data, copy + if copy: + # TODO: should this be deepcopy? + result = result.copy() + + assert isinstance(result, np.ndarray), type(result) + assert result.dtype == 'M8[ns]', result.dtype + + # We have to call this again after possibly inferring a tz above + validate_tz_from_dtype(dtype, tz) + + return result, tz, inferred_freq def objects_to_datetime64ns(data, dayfirst, yearfirst, @@ -1598,6 +1609,54 @@ def objects_to_datetime64ns(data, dayfirst, yearfirst, raise TypeError(result) +def maybe_convert_dtype(data, copy): + """ + Convert data based on dtype conventions, issuing deprecation warnings + or errors where appropriate. + + Parameters + ---------- + data : np.ndarray or pd.Index + copy : bool + + Returns + ------- + data : np.ndarray or pd.Index + copy : bool + + Raises + ------ + TypeError : PeriodDType data is passed + """ + if is_float_dtype(data): + # Note: we must cast to datetime64[ns] here in order to treat these + # as wall-times instead of UTC timestamps. + data = data.astype(_NS_DTYPE) + copy = False + # TODO: deprecate this behavior to instead treat symmetrically + # with integer dtypes. See discussion in GH#23675 + + elif is_timedelta64_dtype(data): + warnings.warn("Passing timedelta64-dtype data is deprecated, will " + "raise a TypeError in a future version", + FutureWarning, stacklevel=5) + data = data.view(_NS_DTYPE) + + elif is_period_dtype(data): + # Note: without explicitly raising here, PeriondIndex + # test_setops.test_join_does_not_recur fails + raise TypeError("Passing PeriodDtype data is invalid. " + "Use `data.to_timestamp()` instead") + + elif is_extension_type(data) and not is_datetime64tz_dtype(data): + # Includes categorical + # TODO: We have no tests for these + data = np.array(data, dtype=np.object_) + copy = False + + return data, copy + + def _generate_regular_range(cls, start, end, periods, freq): """ Generate a range of dates with the spans between dates described by @@ -1699,6 +1758,84 @@ def _generate_range_overflow_safe(endpoint, periods, stride, side='start'): return other_end +# ------------------------------------------------------------------- +# Validation and Inference + +def maybe_infer_tz(tz, inferred_tz): + """ + If a timezone is inferred from data, check that it is compatible with + the user-provided timezone, if any. + + Parameters + ---------- + tz : tzinfo or None + inferred_tz : tzinfo or None + + Returns + ------- + tz : tzinfo or None + + Raises + ------ + TypeError : if both timezones are present but do not match + """ + if tz is None: + tz = inferred_tz + elif inferred_tz is None: + pass + elif not timezones.tz_compare(tz, inferred_tz): + raise TypeError('data is already tz-aware {inferred_tz}, unable to ' + 'set specified tz: {tz}' + .format(inferred_tz=inferred_tz, tz=tz)) + return tz + + +def validate_tz_from_dtype(dtype, tz): + """ + If the given dtype is a DatetimeTZDtype, extract the implied + tzinfo object from it and check that it does not conflict with the given + tz. + + Parameters + ---------- + dtype : dtype, str + tz : None, tzinfo + + Returns + ------- + tz : consensus tzinfo + + Raises + ------ + ValueError : on tzinfo mismatch + """ + if dtype is not None: + if isinstance(dtype, compat.string_types): + try: + dtype = DatetimeTZDtype.construct_from_string(dtype) + except TypeError: + # Things like `datetime64[ns]`, which is OK for the + # constructors, but also nonsense, which should be validated + # but not by us. We *do* allow non-existent tz errors to + # go through + pass + dtz = getattr(dtype, 'tz', None) + if dtz is not None: + if tz is not None and not timezones.tz_compare(tz, dtz): + raise ValueError("cannot supply both a tz and a dtype" + " with a tz") + tz = dtz + + if tz is not None and is_datetime64_dtype(dtype): + # We also need to check for the case where the user passed a + # tz-naive dtype (i.e. datetime64[ns]) + if tz is not None and not timezones.tz_compare(tz, dtz): + raise ValueError("cannot supply both a tz and a " + "timezone-naive dtype (i.e. datetime64[ns]") + + return tz + + def _infer_tz_from_endpoints(start, end, tz): """ If a timezone is not explicitly given via `tz`, see if one can diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 4d466ef7281b7..759bdf20eaae9 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -155,7 +155,7 @@ class PeriodArray(dtl.DatetimeLikeArrayMixin, ExtensionArray): # Constructors def __init__(self, values, freq=None, dtype=None, copy=False): - freq = dtl.validate_dtype_freq(dtype, freq) + freq = validate_dtype_freq(dtype, freq) if freq is not None: freq = Period._maybe_convert_freq(freq) @@ -915,6 +915,40 @@ def period_array(data, freq=None, copy=False): return PeriodArray._from_sequence(data, dtype=dtype) +def validate_dtype_freq(dtype, freq): + """ + If both a dtype and a freq are available, ensure they match. If only + dtype is available, extract the implied freq. + + Parameters + ---------- + dtype : dtype + freq : DateOffset or None + + Returns + ------- + freq : DateOffset + + Raises + ------ + ValueError : non-period dtype + IncompatibleFrequency : mismatch between dtype and freq + """ + if freq is not None: + freq = frequencies.to_offset(freq) + + if dtype is not None: + dtype = pandas_dtype(dtype) + if not is_period_dtype(dtype): + raise ValueError('dtype must be PeriodDtype') + if freq is None: + freq = dtype.freq + elif freq != dtype.freq: + raise IncompatibleFrequency('specified freq and dtype ' + 'are different') + return freq + + def dt64arr_to_periodarr(data, freq, tz=None): """ Convert an datetime-like array to values Period ordinals. diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index f803144e0a78f..bc7e49d90a9e3 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -146,27 +146,19 @@ def _simple_new(cls, values, freq=None, dtype=_TD_DTYPE): return result def __new__(cls, values, freq=None, dtype=_TD_DTYPE, copy=False): - return cls._from_sequence(values, freq=freq, dtype=dtype, copy=copy) + return cls._from_sequence(values, dtype=dtype, copy=copy, freq=freq) @classmethod - def _from_sequence(cls, data, freq=None, unit=None, - dtype=_TD_DTYPE, copy=False): + def _from_sequence(cls, data, dtype=_TD_DTYPE, copy=False, + freq=None, unit=None): if dtype != _TD_DTYPE: raise ValueError("Only timedelta64[ns] dtype is valid.") freq, freq_infer = dtl.maybe_infer_freq(freq) data, inferred_freq = sequence_to_td64ns(data, copy=copy, unit=unit) - if inferred_freq is not None: - if freq is not None and freq != inferred_freq: - raise ValueError('Inferred frequency {inferred} from passed ' - 'values does not conform to passed frequency ' - '{passed}' - .format(inferred=inferred_freq, - passed=freq.freqstr)) - elif freq is None: - freq = inferred_freq - freq_infer = False + freq, freq_infer = dtl.validate_inferred_freq(freq, inferred_freq, + freq_infer) result = cls._simple_new(data, freq=freq) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index b778b2132cd96..fd4a1527c07b7 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -9,24 +9,19 @@ from pandas._libs import ( Timestamp, index as libindex, join as libjoin, lib, tslib as libts) -from pandas._libs.tslibs import ( - ccalendar, conversion, fields, parsing, timezones) +from pandas._libs.tslibs import ccalendar, fields, parsing, timezones import pandas.compat as compat from pandas.util._decorators import Appender, Substitution, cache_readonly from pandas.core.dtypes.common import ( - _INT64_DTYPE, _NS_DTYPE, ensure_int64, is_datetime64_dtype, - is_datetime64_ns_dtype, is_datetime64tz_dtype, is_dtype_equal, is_float, - is_integer, is_list_like, is_object_dtype, is_period_dtype, is_scalar, - is_string_dtype, is_string_like, pandas_dtype) + _NS_DTYPE, ensure_int64, is_datetime64_ns_dtype, is_dtype_equal, is_float, + is_integer, is_list_like, is_period_dtype, is_scalar, is_string_like, + pandas_dtype) import pandas.core.dtypes.concat as _concat -from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna -from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays.datetimes import ( - DatetimeArrayMixin as DatetimeArray, _to_m8, maybe_convert_dtype, - maybe_infer_tz, objects_to_datetime64ns) + DatetimeArrayMixin as DatetimeArray, _to_m8) from pandas.core.base import _shared_docs import pandas.core.common as com from pandas.core.indexes.base import Index, _index_shared_docs @@ -49,12 +44,17 @@ def _new_DatetimeIndex(cls, d): # so need to localize tz = d.pop('tz', None) - with warnings.catch_warnings(): - # we ignore warnings from passing verify_integrity=False - # TODO: If we knew what was going in to **d, we might be able to - # go through _simple_new instead - warnings.simplefilter("ignore") - result = cls.__new__(cls, verify_integrity=False, **d) + if "data" in d and not isinstance(d["data"], DatetimeIndex): + # Avoid need to verify integrity by calling simple_new directly + data = d.pop("data") + result = cls._simple_new(data, **d) + else: + with warnings.catch_warnings(): + # we ignore warnings from passing verify_integrity=False + # TODO: If we knew what was going in to **d, we might be able to + # go through _simple_new instead + warnings.simplefilter("ignore") + result = cls.__new__(cls, verify_integrity=False, **d) if tz is not None: result = result.tz_localize('UTC').tz_convert(tz) @@ -260,81 +260,12 @@ def __new__(cls, data=None, if name is None and hasattr(data, 'name'): name = data.name - freq, freq_infer = dtl.maybe_infer_freq(freq) - if freq is None and hasattr(data, "freq"): - # i.e. DatetimeArray/Index - freq = data.freq - verify_integrity = False - - # if dtype has an embedded tz, capture it - tz = dtl.validate_tz_from_dtype(dtype, tz) - - if not hasattr(data, "dtype"): - # e.g. list, tuple - if np.ndim(data) == 0: - # i.e. generator - data = list(data) - data = np.asarray(data) - copy = False - elif isinstance(data, ABCSeries): - data = data._values - - # By this point we are assured to have either a numpy array or Index - data, copy = maybe_convert_dtype(data, copy) - - if is_object_dtype(data) or is_string_dtype(data): - # TODO: We do not have tests specific to string-dtypes, - # also complex or categorical or other extension - copy = False - if lib.infer_dtype(data) == 'integer': - data = data.astype(np.int64) - else: - # data comes back here as either i8 to denote UTC timestamps - # or M8[ns] to denote wall times - data, inferred_tz = objects_to_datetime64ns( - data, dayfirst=dayfirst, yearfirst=yearfirst) - tz = maybe_infer_tz(tz, inferred_tz) - - if is_datetime64tz_dtype(data): - tz = maybe_infer_tz(tz, data.tz) - subarr = data._data - - elif is_datetime64_dtype(data): - # tz-naive DatetimeArray/Index or ndarray[datetime64] - data = getattr(data, "_data", data) - if data.dtype != _NS_DTYPE: - data = conversion.ensure_datetime64ns(data) - - if tz is not None: - # Convert tz-naive to UTC - tz = timezones.maybe_get_tz(tz) - data = conversion.tz_localize_to_utc(data.view('i8'), tz, - ambiguous=ambiguous) - subarr = data.view(_NS_DTYPE) + dtarr = DatetimeArray._from_sequence( + data, dtype=dtype, copy=copy, tz=tz, freq=freq, + dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous) - else: - # must be integer dtype otherwise - # assume this data are epoch timestamps - if data.dtype != _INT64_DTYPE: - data = data.astype(np.int64, copy=False) - subarr = data.view(_NS_DTYPE) - - assert isinstance(subarr, np.ndarray), type(subarr) - assert subarr.dtype == 'M8[ns]', subarr.dtype - - subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz) - if dtype is not None: - if not is_dtype_equal(subarr.dtype, dtype): - # dtype must be coerced to DatetimeTZDtype above - if subarr.tz is not None: - raise ValueError("cannot localize from non-UTC data") - - if verify_integrity and len(subarr) > 0: - if freq is not None and not freq_infer: - cls._validate_frequency(subarr, freq, ambiguous=ambiguous) - - if freq_infer: - subarr.freq = to_offset(subarr.inferred_freq) + subarr = cls._simple_new(dtarr._data, name=name, + freq=dtarr.freq, tz=dtarr.tz) return subarr._deepcopy_if_needed(ref_to_data, copy) diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 26e51e4f63101..3d69a0a84f7ae 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -19,9 +19,9 @@ from pandas.core import common as com from pandas.core.accessor import delegate_names from pandas.core.algorithms import unique1d -import pandas.core.arrays.datetimelike as dtl from pandas.core.arrays.datetimelike import DatelikeOps -from pandas.core.arrays.period import PeriodArray, period_array +from pandas.core.arrays.period import ( + PeriodArray, period_array, validate_dtype_freq) from pandas.core.base import _shared_docs import pandas.core.indexes.base as ibase from pandas.core.indexes.base import _index_shared_docs, ensure_index @@ -185,7 +185,7 @@ def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None, freq, fields) data = PeriodArray(data, freq=freq) else: - freq = dtl.validate_dtype_freq(dtype, freq) + freq = validate_dtype_freq(dtype, freq) # PeriodIndex allow PeriodIndex(period_index, freq=different) # Let's not encourage that kind of behavior in PeriodArray.
Removes dependence of `DatetimeArray.__new__` on `DatetimeIndex`. De-duplicated `DatetimeIndex.__new__`/`DatetimeArray.__new__`. The contents of `DatetimeArray._from_sequence` are basically just moved from `DatetimeIndex.__new__`. This is feasible because #23675 disentangled `to_datetime` from `DatetimeIndex.__new__`. cc @TomAugspurger this is the last thing on my todo list for DTA/TDA. LMK if I can be helpful with the composition transition.
https://api.github.com/repos/pandas-dev/pandas/pulls/24074
2018-12-03T18:04:31Z
2018-12-05T22:44:50Z
2018-12-05T22:44:50Z
2018-12-06T00:13:18Z
TST: use s3_resource fixture consistently
diff --git a/pandas/tests/io/json/test_compression.py b/pandas/tests/io/json/test_compression.py index c50b6f68b8839..46a5e511fe748 100644 --- a/pandas/tests/io/json/test_compression.py +++ b/pandas/tests/io/json/test_compression.py @@ -33,24 +33,19 @@ def test_read_zipped_json(datapath): @td.skip_if_not_us_locale -def test_with_s3_url(compression): - boto3 = pytest.importorskip('boto3') - pytest.importorskip('s3fs') - moto = pytest.importorskip('moto') +def test_with_s3_url(compression, s3_resource): + # Bucket "pandas-test" created in tests/io/conftest.py df = pd.read_json('{"a": [1, 2, 3], "b": [4, 5, 6]}') - with moto.mock_s3(): - conn = boto3.resource("s3", region_name="us-east-1") - bucket = conn.create_bucket(Bucket="pandas-test") - with tm.ensure_clean() as path: - df.to_json(path, compression=compression) - with open(path, 'rb') as f: - bucket.put_object(Key='test-1', Body=f) + with tm.ensure_clean() as path: + df.to_json(path, compression=compression) + with open(path, 'rb') as f: + s3_resource.Bucket("pandas-test").put_object(Key='test-1', Body=f) - roundtripped_df = pd.read_json('s3://pandas-test/test-1', - compression=compression) - assert_frame_equal(df, roundtripped_df) + roundtripped_df = pd.read_json('s3://pandas-test/test-1', + compression=compression) + assert_frame_equal(df, roundtripped_df) def test_lines_with_compression(compression): diff --git a/pandas/tests/io/test_excel.py b/pandas/tests/io/test_excel.py index 564364ea01432..8cc3dee6648a8 100644 --- a/pandas/tests/io/test_excel.py +++ b/pandas/tests/io/test_excel.py @@ -723,25 +723,19 @@ def test_read_from_http_url(self, ext): local_table = self.get_exceldf('test1', ext) tm.assert_frame_equal(url_table, local_table) - @td.skip_if_no("s3fs") @td.skip_if_not_us_locale - def test_read_from_s3_url(self, ext): - moto = pytest.importorskip("moto") - boto3 = pytest.importorskip("boto3") - - with moto.mock_s3(): - conn = boto3.resource("s3", region_name="us-east-1") - conn.create_bucket(Bucket="pandas-test") - file_name = os.path.join(self.dirpath, 'test1' + ext) - - with open(file_name, "rb") as f: - conn.Bucket("pandas-test").put_object(Key="test1" + ext, - Body=f) - - url = ('s3://pandas-test/test1' + ext) - url_table = read_excel(url) - local_table = self.get_exceldf('test1', ext) - tm.assert_frame_equal(url_table, local_table) + def test_read_from_s3_url(self, ext, s3_resource): + # Bucket "pandas-test" created in tests/io/conftest.py + file_name = os.path.join(self.dirpath, 'test1' + ext) + + with open(file_name, "rb") as f: + s3_resource.Bucket("pandas-test").put_object(Key="test1" + ext, + Body=f) + + url = ('s3://pandas-test/test1' + ext) + url_table = read_excel(url) + local_table = self.get_exceldf('test1', ext) + tm.assert_frame_equal(url_table, local_table) @pytest.mark.slow # ignore warning from old xlrd
- [x] split off from #23731 - [x] tests modified / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` There are two boto tests that are not using the dedicated `s3_resource` fixture which takes care of all the mocking etc. This PR adapts those tests accordingly, which also allows unified treatment of things like #23754.
https://api.github.com/repos/pandas-dev/pandas/pulls/24073
2018-12-03T18:02:18Z
2018-12-04T02:46:23Z
2018-12-04T02:46:23Z
2018-12-04T07:42:05Z
DOC: Correct/update skipna docstrings for `any` and `all` (#23109)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1e26c3f45f660..9eb3eb37a01cc 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -9894,11 +9894,11 @@ def _add_numeric_operations(cls): axis_descr, name, name2 = _doc_parms(cls) cls.any = _make_logical_function( - cls, 'any', name, name2, axis_descr, - _any_desc, nanops.nanany, _any_examples, _any_see_also) + cls, 'any', name, name2, axis_descr, _any_desc, nanops.nanany, + _any_examples, _any_see_also, empty_value=False) cls.all = _make_logical_function( - cls, 'all', name, name2, axis_descr, _all_doc, - nanops.nanall, _all_examples, _all_see_also) + cls, 'all', name, name2, axis_descr, _all_desc, nanops.nanall, + _all_examples, _all_see_also, empty_value=True) @Substitution(outname='mad', desc="Return the mean absolute deviation of the values " @@ -10219,12 +10219,14 @@ def _doc_parms(cls): original index. * None : reduce all axes, return a scalar. -bool_only : boolean, default None +bool_only : bool, default None Include only boolean columns. If None, will attempt to use everything, then use only boolean data. Not implemented for Series. -skipna : boolean, default True - Exclude NA/null values. If an entire row/column is NA, the result - will be NA. +skipna : bool, default True + Exclude NA/null values. If the entire row/column is NA and skipna is + True, then the result will be %(empty_value)s, as for an empty row/column. + If skipna is False, then NA are treated as True, because these are not + equal to zero. level : int or level name, default None If the axis is a MultiIndex (hierarchical), count along a particular level, collapsing into a %(name1)s. @@ -10234,28 +10236,37 @@ def _doc_parms(cls): Returns ------- -%(outname)s : %(name1)s or %(name2)s (if level specified) +%(name1)s or %(name2)s + If level is specified, then, %(name2)s is returned; otherwise, %(name1)s + is returned. %(see_also)s %(examples)s""" -_all_doc = """\ +_all_desc = """\ Return whether all elements are True, potentially over an axis. -Returns True if all elements within a series or along a Dataframe -axis are non-zero, not-empty or not-False.""" +Returns True unless there at least one element within a series or +along a Dataframe axis that is False or equivalent (e.g. zero or +empty).""" _all_examples = """\ Examples -------- -Series +**Series** >>> pd.Series([True, True]).all() True >>> pd.Series([True, False]).all() False +>>> pd.Series([]).all() +True +>>> pd.Series([np.nan]).all() +True +>>> pd.Series([np.nan]).all(skipna=False) +True -DataFrames +**DataFrames** Create a dataframe from a dictionary. @@ -10597,10 +10608,11 @@ def _doc_parms(cls): """ _any_desc = """\ -Return whether any element is True over requested axis. +Return whether any element is True, potentially over an axis. -Unlike :meth:`DataFrame.all`, this performs an *or* operation. If any of the -values along the specified axis is True, this will return True.""" +Returns False unless there at least one element within a series or +along a Dataframe axis that is True or equivalent (e.g. non-zero or +non-empty).""" _any_examples = """\ Examples @@ -10610,8 +10622,16 @@ def _doc_parms(cls): For Series input, the output is a scalar indicating whether any element is True. +>>> pd.Series([False, False]).any() +False >>> pd.Series([True, False]).any() True +>>> pd.Series([]).any() +False +>>> pd.Series([np.nan]).any() +False +>>> pd.Series([np.nan]).any(skipna=False) +True **DataFrame** @@ -10897,9 +10917,10 @@ def cum_func(self, axis=None, skipna=True, *args, **kwargs): def _make_logical_function(cls, name, name1, name2, axis_descr, desc, f, - examples, see_also): + examples, see_also, empty_value): @Substitution(outname=name, desc=desc, name1=name1, name2=name2, - axis_descr=axis_descr, examples=examples, see_also=see_also) + axis_descr=axis_descr, examples=examples, see_also=see_also, + empty_value=empty_value) @Appender(_bool_doc) def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
Also, include examples with NA values and describe treatment of NA with `skipna == False` - [x] closes #23109 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24069
2018-12-03T15:15:45Z
2018-12-10T15:22:15Z
2018-12-10T15:22:14Z
2018-12-17T15:10:11Z
DOC: Fix quotes position in Series docstrings
diff --git a/pandas/core/accessor.py b/pandas/core/accessor.py index f2ae7f6b56551..fa1dc751c17da 100644 --- a/pandas/core/accessor.py +++ b/pandas/core/accessor.py @@ -41,7 +41,9 @@ def __dir__(self): class PandasDelegate(object): - """ an abstract base class for delegating methods/properties """ + """ + an abstract base class for delegating methods/properties + """ def _delegate_property_get(self, name, *args, **kwargs): raise TypeError("You cannot access the " @@ -146,7 +148,8 @@ def add_delegate_accessors(cls): # 2. We use a UserWarning instead of a custom Warning class CachedAccessor(object): - """Custom property-like object (descriptor) for caching accessors. + """ + Custom property-like object (descriptor) for caching accessors. Parameters ---------- @@ -189,7 +192,8 @@ def decorator(accessor): return decorator -_doc = """Register a custom accessor on %(klass)s objects. +_doc = """\ +Register a custom accessor on %(klass)s objects. Parameters ---------- diff --git a/pandas/core/arrays/base.py b/pandas/core/arrays/base.py index eb2fef482ff17..8877436dcf51c 100644 --- a/pandas/core/arrays/base.py +++ b/pandas/core/arrays/base.py @@ -22,7 +22,8 @@ class ExtensionArray(object): - """Abstract base class for custom 1-D array types. + """ + Abstract base class for custom 1-D array types. pandas will recognize instances of this class as proper arrays with a custom type and will not attempt to coerce them to objects. They @@ -100,7 +101,8 @@ class ExtensionArray(object): # ------------------------------------------------------------------------ @classmethod def _from_sequence(cls, scalars, dtype=None, copy=False): - """Construct a new ExtensionArray from a sequence of scalars. + """ + Construct a new ExtensionArray from a sequence of scalars. Parameters ---------- @@ -121,7 +123,8 @@ def _from_sequence(cls, scalars, dtype=None, copy=False): @classmethod def _from_factorized(cls, values, original): - """Reconstruct an ExtensionArray after factorization. + """ + Reconstruct an ExtensionArray after factorization. Parameters ---------- @@ -143,7 +146,8 @@ def _from_factorized(cls, values, original): def __getitem__(self, item): # type (Any) -> Any - """Select a subset of self. + """ + Select a subset of self. Parameters ---------- @@ -174,7 +178,8 @@ def __getitem__(self, item): def __setitem__(self, key, value): # type: (Union[int, np.ndarray], Any) -> None - """Set one or more values inplace. + """ + Set one or more values inplace. This method is not required to satisfy the pandas extension array interface. @@ -219,7 +224,8 @@ def __setitem__(self, key, value): def __len__(self): # type: () -> int - """Length of this array + """ + Length of this array Returns ------- @@ -228,8 +234,8 @@ def __len__(self): raise AbstractMethodError(self) def __iter__(self): - """Iterate over elements of the array. - + """ + Iterate over elements of the array. """ # This needs to be implemented so that pandas recognizes extension # arrays as list-like. The default implementation makes successive @@ -243,26 +249,32 @@ def __iter__(self): @property def dtype(self): # type: () -> ExtensionDtype - """An instance of 'ExtensionDtype'.""" + """ + An instance of 'ExtensionDtype'. + """ raise AbstractMethodError(self) @property def shape(self): # type: () -> Tuple[int, ...] - """Return a tuple of the array dimensions.""" + """ + Return a tuple of the array dimensions. + """ return (len(self),) @property def ndim(self): # type: () -> int - """Extension Arrays are only allowed to be 1-dimensional.""" + """ + Extension Arrays are only allowed to be 1-dimensional. + """ return 1 @property def nbytes(self): # type: () -> int - """The number of bytes needed to store this object in memory. - + """ + The number of bytes needed to store this object in memory. """ # If this is expensive to compute, return an approximate lower bound # on the number of bytes needed. @@ -272,7 +284,8 @@ def nbytes(self): # Additional Methods # ------------------------------------------------------------------------ def astype(self, dtype, copy=True): - """Cast to a NumPy array with 'dtype'. + """ + Cast to a NumPy array with 'dtype'. Parameters ---------- @@ -315,7 +328,8 @@ def isna(self): def _values_for_argsort(self): # type: () -> ndarray - """Return values for sorting. + """ + Return values for sorting. Returns ------- @@ -365,7 +379,8 @@ def argsort(self, ascending=True, kind='quicksort', *args, **kwargs): return result def fillna(self, value=None, method=None, limit=None): - """ Fill NA/NaN values using the specified method. + """ + Fill NA/NaN values using the specified method. Parameters ---------- @@ -418,7 +433,8 @@ def fillna(self, value=None, method=None, limit=None): return new_values def dropna(self): - """ Return ExtensionArray without NA values + """ + Return ExtensionArray without NA values Returns ------- @@ -462,7 +478,8 @@ def shift(self, periods=1): return self._concat_same_type([a, b]) def unique(self): - """Compute the ExtensionArray of unique values. + """ + Compute the ExtensionArray of unique values. Returns ------- @@ -475,7 +492,8 @@ def unique(self): def _values_for_factorize(self): # type: () -> Tuple[ndarray, Any] - """Return an array and missing value suitable for factorization. + """ + Return an array and missing value suitable for factorization. Returns ------- @@ -499,7 +517,8 @@ def _values_for_factorize(self): def factorize(self, na_sentinel=-1): # type: (int) -> Tuple[ndarray, ExtensionArray] - """Encode the extension array as an enumerated type. + """ + Encode the extension array as an enumerated type. Parameters ---------- @@ -552,7 +571,8 @@ def factorize(self, na_sentinel=-1): def take(self, indices, allow_fill=False, fill_value=None): # type: (Sequence[int], bool, Optional[Any]) -> ExtensionArray - """Take elements from an array. + """ + Take elements from an array. Parameters ---------- @@ -641,7 +661,8 @@ def take(self, indices, allow_fill=False, fill_value=None): def copy(self, deep=False): # type: (bool) -> ExtensionArray - """Return a copy of the array. + """ + Return a copy of the array. Parameters ---------- @@ -661,13 +682,16 @@ def copy(self, deep=False): def _formatting_values(self): # type: () -> np.ndarray # At the moment, this has to be an array since we use result.dtype - """An array of values to be printed in, e.g. the Series repr""" + """ + An array of values to be printed in, e.g. the Series repr + """ return np.array(self) @classmethod def _concat_same_type(cls, to_concat): # type: (Sequence[ExtensionArray]) -> ExtensionArray - """Concatenate multiple array + """ + Concatenate multiple array Parameters ---------- @@ -689,7 +713,8 @@ def _concat_same_type(cls, to_concat): @property def _ndarray_values(self): # type: () -> np.ndarray - """Internal pandas method for lossy conversion to a NumPy ndarray. + """ + Internal pandas method for lossy conversion to a NumPy ndarray. This method is not part of the pandas interface. diff --git a/pandas/core/arrays/categorical.py b/pandas/core/arrays/categorical.py index 5db851d4bf021..c8d9cf4a79dfe 100644 --- a/pandas/core/arrays/categorical.py +++ b/pandas/core/arrays/categorical.py @@ -200,7 +200,8 @@ def contains(cat, key, container): return any(loc_ in container for loc_ in loc) -_codes_doc = """The category codes of this categorical. +_codes_doc = """\ +The category codes of this categorical. Level codes are an array if integer which are the positions of the real values in the categories array. @@ -425,7 +426,8 @@ def __init__(self, values, categories=None, ordered=None, dtype=None, @property def categories(self): - """The categories of this categorical. + """ + The categories of this categorical. Setting assigns new values to each category (effectively a rename of each individual category). @@ -464,12 +466,16 @@ def categories(self, categories): @property def ordered(self): - """Whether the categories have an ordered relationship""" + """ + Whether the categories have an ordered relationship + """ return self.dtype.ordered @property def dtype(self): - """The :class:`~pandas.api.types.CategoricalDtype` for this instance""" + """ + The :class:`~pandas.api.types.CategoricalDtype` for this instance + """ return self._dtype @property @@ -485,7 +491,9 @@ def _from_sequence(cls, scalars, dtype=None, copy=False): return Categorical(scalars, dtype=dtype) def copy(self): - """ Copy constructor. """ + """ + Copy constructor. + """ return self._constructor(values=self._codes.copy(), dtype=self.dtype, fastpath=True) @@ -516,17 +524,23 @@ def astype(self, dtype, copy=True): @cache_readonly def ndim(self): - """Number of dimensions of the Categorical """ + """ + Number of dimensions of the Categorical + """ return self._codes.ndim @cache_readonly def size(self): - """ return the len of myself """ + """ + return the len of myself + """ return len(self) @cache_readonly def itemsize(self): - """ return the size of a single category """ + """ + return the size of a single category + """ return self.categories.itemsize def tolist(self): @@ -541,7 +555,9 @@ def tolist(self): @property def base(self): - """ compat, we are always our own object """ + """ + compat, we are always our own object + """ return None @classmethod @@ -660,7 +676,8 @@ def from_codes(cls, codes, categories, ordered=False): _codes = None def _get_codes(self): - """ Get the codes. + """ + Get the codes. Returns ------- @@ -680,7 +697,8 @@ def _set_codes(self, codes): codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc) def _set_categories(self, categories, fastpath=False): - """ Sets new categories inplace + """ + Sets new categories inplace Parameters ---------- @@ -713,7 +731,8 @@ def _set_categories(self, categories, fastpath=False): self._dtype = new_dtype def _set_dtype(self, dtype): - """Internal method for directly updating the CategoricalDtype + """ + Internal method for directly updating the CategoricalDtype Parameters ---------- @@ -775,7 +794,8 @@ def as_unordered(self, inplace=False): def set_categories(self, new_categories, ordered=None, rename=False, inplace=False): - """ Sets the categories to the specified new_categories. + """ + Sets the categories to the specified new_categories. `new_categories` can include new categories (which will result in unused categories) or remove old categories (which results in values @@ -845,7 +865,8 @@ def set_categories(self, new_categories, ordered=None, rename=False, return cat def rename_categories(self, new_categories, inplace=False): - """ Renames categories. + """ + Renames categories. Raises ------ @@ -938,7 +959,8 @@ def rename_categories(self, new_categories, inplace=False): return cat def reorder_categories(self, new_categories, ordered=None, inplace=False): - """ Reorders categories as specified in new_categories. + """ + Reorders categories as specified in new_categories. `new_categories` need to include all old categories and no new category items. @@ -980,7 +1002,8 @@ def reorder_categories(self, new_categories, ordered=None, inplace=False): inplace=inplace) def add_categories(self, new_categories, inplace=False): - """ Add new categories. + """ + Add new categories. `new_categories` will be included at the last/highest place in the categories and will be unused directly after this call. @@ -1029,7 +1052,8 @@ def add_categories(self, new_categories, inplace=False): return cat def remove_categories(self, removals, inplace=False): - """ Removes the specified categories. + """ + Removes the specified categories. `removals` must be included in the old categories. Values which were in the removed categories will be set to NaN @@ -1081,7 +1105,8 @@ def remove_categories(self, removals, inplace=False): rename=False, inplace=inplace) def remove_unused_categories(self, inplace=False): - """ Removes categories which are not used. + """ + Removes categories which are not used. Parameters ---------- @@ -1204,7 +1229,8 @@ def map(self, mapper): # for Series/ndarray like compat @property def shape(self): - """ Shape of the Categorical. + """ + Shape of the Categorical. For internal compatibility with numpy arrays. @@ -1296,6 +1322,9 @@ def __setstate__(self, state): @property def T(self): + """ + Return transposed numpy array. + """ return self @property @@ -1449,7 +1478,8 @@ def value_counts(self, dropna=True): return Series(count, index=CategoricalIndex(ix), dtype='int64') def get_values(self): - """ Return the values. + """ + Return the values. For internal compatibility with pandas formatting. @@ -1478,7 +1508,8 @@ def argsort(self, *args, **kwargs): # TODO(PY2): use correct signature # We have to do *args, **kwargs to avoid a a py2-only signature # issue since np.argsort differs from argsort. - """Return the indices that would sort the Categorical. + """ + Return the indices that would sort the Categorical. Parameters ---------- @@ -1521,7 +1552,8 @@ def argsort(self, *args, **kwargs): return super(Categorical, self).argsort(*args, **kwargs) def sort_values(self, inplace=False, ascending=True, na_position='last'): - """ Sorts the Categorical by category value returning a new + """ + Sorts the Categorical by category value returning a new Categorical by default. While an ordering is applied to the category values, sorting in this @@ -1639,7 +1671,8 @@ def _values_for_rank(self): return values def ravel(self, order='C'): - """ Return a flattened (numpy) array. + """ + Return a flattened (numpy) array. For internal compatibility with numpy arrays. @@ -1650,7 +1683,8 @@ def ravel(self, order='C'): return np.array(self) def view(self): - """Return a view of myself. + """ + Return a view of myself. For internal compatibility with numpy arrays. @@ -1662,7 +1696,8 @@ def view(self): return self def to_dense(self): - """Return my 'dense' representation + """ + Return my 'dense' representation For internal compatibility with numpy arrays. @@ -1674,7 +1709,8 @@ def to_dense(self): @deprecate_kwarg(old_arg_name='fill_value', new_arg_name='value') def fillna(self, value=None, method=None, limit=None): - """ Fill NA/NaN values using the specified method. + """ + Fill NA/NaN values using the specified method. Parameters ---------- @@ -1857,7 +1893,8 @@ def take_nd(self, indexer, allow_fill=None, fill_value=None): take = take_nd def _slice(self, slicer): - """ Return a slice of myself. + """ + Return a slice of myself. For internal compatibility with numpy arrays. """ @@ -1874,15 +1911,21 @@ def _slice(self, slicer): return self._constructor(values=codes, dtype=self.dtype, fastpath=True) def __len__(self): - """The length of this Categorical.""" + """ + The length of this Categorical. + """ return len(self._codes) def __iter__(self): - """Returns an Iterator over the values of this Categorical.""" + """ + Returns an Iterator over the values of this Categorical. + """ return iter(self.get_values().tolist()) def __contains__(self, key): - """Returns True if `key` is in this Categorical.""" + """ + Returns True if `key` is in this Categorical. + """ # if key is a NaN, check if any NaN is in self. if isna(key): return self.isna().any() @@ -1905,7 +1948,9 @@ def _tidy_repr(self, max_vals=10, footer=True): return compat.text_type(result) def _repr_categories(self): - """ return the base repr for the categories """ + """ + return the base repr for the categories + """ max_categories = (10 if get_option("display.max_categories") == 0 else get_option("display.max_categories")) from pandas.io.formats import format as fmt @@ -1922,7 +1967,9 @@ def _repr_categories(self): return category_strs def _repr_categories_info(self): - """ Returns a string representation of the footer.""" + """ + Returns a string representation of the footer. + """ category_strs = self._repr_categories() dtype = getattr(self.categories, 'dtype_str', @@ -1965,7 +2012,9 @@ def _get_repr(self, length=True, na_rep='NaN', footer=True): return compat.text_type(result) def __unicode__(self): - """ Unicode representation. """ + """ + Unicode representation. + """ _maxlen = 10 if len(self._codes) > _maxlen: result = self._tidy_repr(_maxlen) @@ -1978,13 +2027,17 @@ def __unicode__(self): return result def _maybe_coerce_indexer(self, indexer): - """ return an indexer coerced to the codes dtype """ + """ + return an indexer coerced to the codes dtype + """ if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'i': indexer = indexer.astype(self._codes.dtype) return indexer def __getitem__(self, key): - """ Return an item. """ + """ + Return an item. + """ if isinstance(key, (int, np.integer)): i = self._codes[key] if i == -1: @@ -1996,7 +2049,8 @@ def __getitem__(self, key): dtype=self.dtype, fastpath=True) def __setitem__(self, key, value): - """ Item assignment. + """ + Item assignment. Raises @@ -2100,7 +2154,8 @@ def _reduce(self, name, axis=0, skipna=True, **kwargs): return func(**kwargs) def min(self, numeric_only=None, **kwargs): - """ The minimum value of the object. + """ + The minimum value of the object. Only ordered `Categoricals` have a minimum! @@ -2125,7 +2180,8 @@ def min(self, numeric_only=None, **kwargs): return self.categories[pointer] def max(self, numeric_only=None, **kwargs): - """ The maximum value of the object. + """ + The maximum value of the object. Only ordered `Categoricals` have a maximum! @@ -2284,7 +2340,8 @@ def is_dtype_equal(self, other): return False def describe(self): - """ Describes this Categorical + """ + Describes this Categorical Returns ------- @@ -2441,6 +2498,9 @@ def _delegate_property_set(self, name, new_values): @property def codes(self): + """ + Return Series of codes as well as the index. + """ from pandas import Series return Series(self._parent.codes, index=self.index) diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 83ee335aa5465..3f4e8cedb6ac5 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -76,7 +76,9 @@ def _simple_new(cls, values, **kwargs): raise AbstractMethodError(cls) def _get_attributes_dict(self): - """return an attributes dict for my class""" + """ + return an attributes dict for my class + """ return {k: getattr(self, k, None) for k in self._attributes} @@ -254,12 +256,16 @@ def isna(self): @property # NB: override with cache_readonly in immutable subclasses def _isnan(self): - """ return if each value is nan""" + """ + return if each value is nan + """ return (self.asi8 == iNaT) @property # NB: override with cache_readonly in immutable subclasses def hasnans(self): - """ return if I have any nans; enables various perf speedups """ + """ + return if I have any nans; enables various perf speedups + """ return bool(self._isnan.any()) def _maybe_mask_results(self, result, fill_value=iNaT, convert=None): @@ -293,7 +299,9 @@ def _maybe_mask_results(self, result, fill_value=iNaT, convert=None): @property def freq(self): - """Return the frequency object if it is set, otherwise None""" + """ + Return the frequency object if it is set, otherwise None. + """ return self._freq @freq.setter @@ -459,7 +467,9 @@ def _add_delta_tdi(self, other): return new_values.view('i8') def _add_nat(self): - """Add pd.NaT to self""" + """ + Add pd.NaT to self + """ if is_period_dtype(self): raise TypeError('Cannot add {cls} and {typ}' .format(cls=type(self).__name__, @@ -474,7 +484,9 @@ def _add_nat(self): return type(self)(result, tz=self.tz, freq=None) def _sub_nat(self): - """Subtract pd.NaT from self""" + """ + Subtract pd.NaT from self + """ # GH#19124 Timedelta - datetime is not in general well-defined. # We make an exception for pd.NaT, which in this case quacks # like a timedelta. diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 4d3caaacca1c1..7bc15c73802cb 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -340,6 +340,9 @@ def dtype(self): @property def tz(self): + """ + Return timezone. + """ # GH 18595 return self._tz @@ -358,12 +361,16 @@ def tzinfo(self): @property # NB: override with cache_readonly in immutable subclasses def _timezone(self): - """ Comparable timezone both for pytz / dateutil""" + """ + Comparable timezone both for pytz / dateutil + """ return timezones.get_timezone(self.tzinfo) @property def offset(self): - """get/set the frequency of the instance""" + """ + get/set the frequency of the instance + """ msg = ('{cls}.offset has been deprecated and will be removed ' 'in a future version; use {cls}.freq instead.' .format(cls=type(self).__name__)) @@ -372,7 +379,9 @@ def offset(self): @offset.setter def offset(self, value): - """get/set the frequency of the instance""" + """ + get/set the frequency of the instance + """ msg = ('{cls}.offset has been deprecated and will be removed ' 'in a future version; use {cls}.freq instead.' .format(cls=type(self).__name__)) @@ -1062,19 +1071,19 @@ def date(self): return tslib.ints_to_pydatetime(timestamps, box="date") - year = _field_accessor('year', 'Y', "The year of the datetime") + year = _field_accessor('year', 'Y', "\n The year of the datetime\n") month = _field_accessor('month', 'M', - "The month as January=1, December=12") - day = _field_accessor('day', 'D', "The days of the datetime") - hour = _field_accessor('hour', 'h', "The hours of the datetime") - minute = _field_accessor('minute', 'm', "The minutes of the datetime") - second = _field_accessor('second', 's', "The seconds of the datetime") + "\n The month as January=1, December=12 \n") + day = _field_accessor('day', 'D', "\nThe days of the datetime\n") + hour = _field_accessor('hour', 'h', "\nThe hours of the datetime\n") + minute = _field_accessor('minute', 'm', "\nThe minutes of the datetime\n") + second = _field_accessor('second', 's', "\nThe seconds of the datetime\n") microsecond = _field_accessor('microsecond', 'us', - "The microseconds of the datetime") + "\nThe microseconds of the datetime\n") nanosecond = _field_accessor('nanosecond', 'ns', - "The nanoseconds of the datetime") + "\nThe nanoseconds of the datetime\n") weekofyear = _field_accessor('weekofyear', 'woy', - "The week ordinal of the year") + "\nThe week ordinal of the year\n") week = weekofyear _dayofweek_doc = """ The day of the week with Monday=0, Sunday=6. @@ -1119,12 +1128,12 @@ def date(self): "The name of day in a week (ex: Friday)\n\n.. deprecated:: 0.23.0") dayofyear = _field_accessor('dayofyear', 'doy', - "The ordinal day of the year") - quarter = _field_accessor('quarter', 'q', "The quarter of the date") + "\nThe ordinal day of the year\n") + quarter = _field_accessor('quarter', 'q', "\nThe quarter of the date\n") days_in_month = _field_accessor( 'days_in_month', 'dim', - "The number of days in the month") + "\nThe number of days in the month\n") daysinmonth = days_in_month _is_month_doc = """ Indicates whether the date is the {first_or_last} day of the month. diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index e9d51aaea4218..7996e24d0c914 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -386,7 +386,8 @@ def _concat_same_type(cls, to_concat): return cls(data, mask) def astype(self, dtype, copy=True): - """Cast to a NumPy array or IntegerArray with 'dtype'. + """ + Cast to a NumPy array or IntegerArray with 'dtype'. Parameters ---------- diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 53629dca4d391..9342276921863 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -205,7 +205,8 @@ def _from_sequence(cls, scalars, dtype=None, copy=False): @classmethod def _from_datetime64(cls, data, freq, tz=None): - """Construct a PeriodArray from a datetime64 array + """ + Construct a PeriodArray from a datetime64 array Parameters ---------- @@ -255,7 +256,9 @@ def _ndarray_values(self): @property def freq(self): - """Return the frequency object for this PeriodArray.""" + """ + Return the frequency object for this PeriodArray. + """ return self.dtype.freq # -------------------------------------------------------------------- @@ -282,7 +285,9 @@ def freq(self): @property def is_leap_year(self): - """ Logical indicating if the date belongs to a leap year """ + """ + Logical indicating if the date belongs to a leap year + """ return isleapyear_arr(np.asarray(self.year)) @property @@ -568,7 +573,9 @@ def asfreq(self, freq=None, how='E'): # Formatting def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs): - """ actually format my specific types """ + """ + actually format my specific types + """ # TODO(DatetimeArray): remove values = self.astype(object) diff --git a/pandas/core/arrays/sparse.py b/pandas/core/arrays/sparse.py index 9a5ef3b3a7dd0..ae5a4eb7075de 100644 --- a/pandas/core/arrays/sparse.py +++ b/pandas/core/arrays/sparse.py @@ -169,7 +169,9 @@ def _is_boolean(self): @property def kind(self): - """The sparse kind. Either 'integer', or 'block'.""" + """ + The sparse kind. Either 'integer', or 'block'. + """ return self.subtype.kind @property @@ -285,7 +287,8 @@ def is_dtype(cls, dtype): return isinstance(dtype, np.dtype) or dtype == 'Sparse' def update_dtype(self, dtype): - """Convert the SparseDtype to a new dtype. + """ + Convert the SparseDtype to a new dtype. This takes care of converting the ``fill_value``. @@ -478,7 +481,9 @@ def _sparse_array_op(left, right, op, name): def _wrap_result(name, data, sparse_index, fill_value, dtype=None): - """ wrap op result to have correct dtype """ + """ + wrap op result to have correct dtype + """ if name.startswith('__'): # e.g. __eq__ --> eq name = name[2:-2] @@ -787,7 +792,8 @@ def nbytes(self): @property def density(self): - """The percent of non- ``fill_value`` points, as decimal. + """ + The percent of non- ``fill_value`` points, as decimal. Examples -------- @@ -800,7 +806,8 @@ def density(self): @property def npoints(self): - """The number of non- ``fill_value`` points. + """ + The number of non- ``fill_value`` points. Examples -------- @@ -1523,12 +1530,16 @@ def mean(self, axis=0, *args, **kwargs): return (sp_sum + self.fill_value * nsparse) / (ct + nsparse) def transpose(self, *axes): - """Returns the SparseArray.""" + """ + Returns the SparseArray. + """ return self @property def T(self): - """Returns the SparseArray.""" + """ + Returns the SparseArray. + """ return self # ------------------------------------------------------------------------ @@ -1742,14 +1753,18 @@ def __unicode__(self): def _maybe_to_dense(obj): - """ try to convert to dense """ + """ + try to convert to dense + """ if hasattr(obj, 'to_dense'): return obj.to_dense() return obj def _maybe_to_sparse(array): - """ array must be SparseSeries or SparseArray """ + """ + array must be SparseSeries or SparseArray + """ if isinstance(array, ABCSparseSeries): array = array.values.copy() return array diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 83cea51cec9f6..0039cc9565df7 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -444,16 +444,16 @@ def to_pytimedelta(self): return tslibs.ints_to_pytimedelta(self.asi8) days = _field_accessor("days", "days", - " Number of days for each element. ") + "\nNumber of days for each element.\n") seconds = _field_accessor("seconds", "seconds", - " Number of seconds (>= 0 and less than 1 day) " - "for each element. ") + "\nNumber of seconds (>= 0 and less than 1 day) " + "for each element.\n") microseconds = _field_accessor("microseconds", "microseconds", "\nNumber of microseconds (>= 0 and less " - "than 1 second) for each\nelement. ") + "than 1 second) for each element.\n") nanoseconds = _field_accessor("nanoseconds", "nanoseconds", "\nNumber of nanoseconds (>= 0 and less " - "than 1 microsecond) for each\nelement.\n") + "than 1 microsecond) for each element.\n") @property def components(self): diff --git a/pandas/core/base.py b/pandas/core/base.py index fd303182959a5..31ff60b025758 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -631,7 +631,9 @@ def _aggregate_multiple_funcs(self, arg, _level, _axis): return result def _shallow_copy(self, obj=None, obj_type=None, **kwargs): - """ return a new object with the replacement attributes """ + """ + return a new object with the replacement attributes + """ if obj is None: obj = self._selected_obj.copy() if obj_type is None: @@ -644,7 +646,9 @@ def _shallow_copy(self, obj=None, obj_type=None, **kwargs): return obj_type(obj, **kwargs) def _is_cython_func(self, arg): - """ if we define an internal function for this argument, return it """ + """ + if we define an internal function for this argument, return it + """ return self._cython_table.get(arg) def _is_builtin_func(self, arg): @@ -675,7 +679,8 @@ def transpose(self, *args, **kwargs): @property def _is_homogeneous_type(self): - """Whether the object has a single dtype. + """ + Whether the object has a single dtype. By definition, Series and Index are always considered homogeneous. A MultiIndex may or may not be homogeneous, depending on the @@ -780,7 +785,8 @@ def base(self): @property def _ndarray_values(self): # type: () -> np.ndarray - """The data as an ndarray, possibly losing information. + """ + The data as an ndarray, possibly losing information. The expectation is that this is cheap to compute, and is primarily used for interacting with our indexers. @@ -927,7 +933,8 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None, return func(**kwds) def _map_values(self, mapper, na_action=None): - """An internal function that maps values using the input + """ + An internal function that maps values using the input correspondence (which can be a dict, Series, or function). Parameters @@ -1201,7 +1208,8 @@ def factorize(self, sort=False, na_sentinel=-1): return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel) _shared_docs['searchsorted'] = ( - """Find indices where elements should be inserted to maintain order. + """ + Find indices where elements should be inserted to maintain order. Find the indices into a sorted %(klass)s `self` such that, if the corresponding elements in `value` were inserted before the indices, diff --git a/pandas/core/dtypes/base.py b/pandas/core/dtypes/base.py index df0e89cced816..aa81e88abf28e 100644 --- a/pandas/core/dtypes/base.py +++ b/pandas/core/dtypes/base.py @@ -146,7 +146,8 @@ def _is_boolean(self): class ExtensionDtype(_DtypeOpsMixin): - """A custom data type, to be paired with an ExtensionArray. + """ + A custom data type, to be paired with an ExtensionArray. .. versionadded:: 0.23.0 @@ -211,7 +212,8 @@ def __str__(self): @property def type(self): # type: () -> type - """The scalar type for the array, e.g. ``int`` + """ + The scalar type for the array, e.g. ``int`` It's expected ``ExtensionArray[item]`` returns an instance of ``ExtensionDtype.type`` for scalar ``item``, assuming @@ -223,7 +225,8 @@ def type(self): @property def kind(self): # type () -> str - """A character code (one of 'biufcmMOSUV'), default 'O' + """ + A character code (one of 'biufcmMOSUV'), default 'O' This should match the NumPy dtype used when the array is converted to an ndarray, which is probably 'O' for object if @@ -239,7 +242,8 @@ def kind(self): @property def name(self): # type: () -> str - """A string identifying the data type. + """ + A string identifying the data type. Will be used for display in, e.g. ``Series.dtype`` """ @@ -247,7 +251,8 @@ def name(self): @classmethod def construct_array_type(cls): - """Return the array type associated with this dtype + """ + Return the array type associated with this dtype Returns ------- @@ -257,7 +262,8 @@ def construct_array_type(cls): @classmethod def construct_from_string(cls, string): - """Attempt to construct this type from a string. + """ + Attempt to construct this type from a string. Parameters ---------- diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index fee983f969221..e432f3604f7b1 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -342,7 +342,8 @@ def _hash_categories(categories, ordered=True): @classmethod def construct_array_type(cls): - """Return the array type associated with this dtype + """ + Return the array type associated with this dtype Returns ------- @@ -353,7 +354,8 @@ def construct_array_type(cls): @classmethod def construct_from_string(cls, string): - """ attempt to construct this type from a string, raise a TypeError if + """ + attempt to construct this type from a string, raise a TypeError if it's not possible """ try: if string == 'category': @@ -459,7 +461,9 @@ def categories(self): @property def ordered(self): - """Whether the categories have an ordered relationship""" + """ + Whether the categories have an ordered relationship + """ return self._ordered @property @@ -488,7 +492,8 @@ class DatetimeTZDtype(PandasExtensionDtype): _cache = {} def __new__(cls, unit=None, tz=None): - """ Create a new unit if needed, otherwise return from the cache + """ + Create a new unit if needed, otherwise return from the cache Parameters ---------- @@ -547,7 +552,8 @@ def __new__(cls, unit=None, tz=None): @classmethod def construct_array_type(cls): - """Return the array type associated with this dtype + """ + Return the array type associated with this dtype Returns ------- @@ -558,7 +564,8 @@ def construct_array_type(cls): @classmethod def construct_from_string(cls, string): - """ attempt to construct this type from a string, raise a TypeError if + """ + attempt to construct this type from a string, raise a TypeError if it's not possible """ try: @@ -775,7 +782,8 @@ def __new__(cls, subtype=None): @classmethod def construct_array_type(cls): - """Return the array type associated with this dtype + """ + Return the array type associated with this dtype Returns ------- diff --git a/pandas/core/generic.py b/pandas/core/generic.py index f43b93f200db3..1918eb08831ac 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -152,6 +152,9 @@ def _init_mgr(self, mgr, axes=None, dtype=None, copy=False): @property def is_copy(self): + """ + Return the copy. + """ warnings.warn("Attribute 'is_copy' is deprecated and will be removed " "in a future version.", FutureWarning, stacklevel=2) return self._is_copy @@ -415,12 +418,16 @@ def _stat_axis(self): @property def shape(self): - """Return a tuple of axis dimensions""" + """ + Return a tuple of axis dimensions + """ return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS) @property def axes(self): - """Return index label(s) of the internal NDFrame""" + """ + Return index label(s) of the internal NDFrame + """ # we do it this way because if we have reversed axes, then # the block manager shows then reversed return [self._get_axis(a) for a in self._AXIS_ORDERS] @@ -685,7 +692,8 @@ def swapaxes(self, axis1, axis2, copy=True): return self._constructor(new_values, *new_axes).__finalize__(self) def droplevel(self, level, axis=0): - """Return DataFrame with requested index / column level(s) removed. + """ + Return DataFrame with requested index / column level(s) removed. .. versionadded:: 0.24.0 @@ -1457,7 +1465,8 @@ def __nonzero__(self): __bool__ = __nonzero__ def bool(self): - """Return the bool of a single element PandasObject. + """ + Return the bool of a single element PandasObject. This must be a boolean scalar value, either True or False. Raise a ValueError if the PandasObject does not have exactly 1 element, or that @@ -1893,7 +1902,9 @@ def __array_wrap__(self, result, context=None): # return dict(typestr=values.dtype.str,shape=values.shape,data=values) def to_dense(self): - """Return dense representation of NDFrame (as opposed to sparse)""" + """ + Return dense representation of NDFrame (as opposed to sparse) + """ # compat return self @@ -3550,7 +3561,8 @@ def xs(self, key, axis=0, level=None, drop_level=True): _xs = xs def select(self, crit, axis=0): - """Return data corresponding to axis labels matching criteria + """ + Return data corresponding to axis labels matching criteria .. deprecated:: 0.21.0 Use df.loc[df.index.map(crit)] to select via labels @@ -4311,7 +4323,8 @@ def _needs_reindex_multi(self, axes, method, level): def _reindex_multi(self, axes, copy, fill_value): return NotImplemented - _shared_docs['reindex_axis'] = ("""Conform input object to new index + _shared_docs['reindex_axis'] = (""" + Conform input object to new index with optional filling logic, placing NA/NaN in locations having no value in the previous index. A new object is produced unless the new index is equivalent to the current one and copy=False. @@ -5116,7 +5129,8 @@ def _get_bool_data(self): # Internal Interface Methods def as_matrix(self, columns=None): - """Convert the frame to its Numpy-array representation. + """ + Convert the frame to its Numpy-array representation. .. deprecated:: 0.23.0 Use :meth:`DataFrame.values` instead. @@ -5757,7 +5771,8 @@ def _convert(self, datetime=False, numeric=False, timedelta=False, def convert_objects(self, convert_dates=True, convert_numeric=False, convert_timedeltas=True, copy=True): - """Attempt to infer better dtype for object columns. + """ + Attempt to infer better dtype for object columns. .. deprecated:: 0.21.0 @@ -9662,7 +9677,9 @@ def describe_1d(data): return d def _check_percentile(self, q): - """Validate percentiles (used by describe and quantile).""" + """ + Validate percentiles (used by describe and quantile). + """ msg = ("percentiles should all be in the interval [0, 1]. " "Try {0} instead.") @@ -9821,7 +9838,9 @@ def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs): @classmethod def _add_numeric_operations(cls): - """Add the operations to the cls; evaluate the doc strings again""" + """ + Add the operations to the cls; evaluate the doc strings again + """ axis_descr, name, name2 = _doc_parms(cls) @@ -9947,7 +9966,8 @@ def compound(self, axis=None, skipna=None, level=None): @classmethod def _add_series_only_operations(cls): - """Add the series only operations to the cls; evaluate the doc + """ + Add the series only operations to the cls; evaluate the doc strings again. """ @@ -9973,7 +9993,8 @@ def nanptp(values, axis=0, skipna=True): @classmethod def _add_series_or_dataframe_operations(cls): - """Add the series or dataframe only operations to the cls; evaluate + """ + Add the series or dataframe only operations to the cls; evaluate the doc strings again. """ @@ -10035,7 +10056,8 @@ def transform(self, func, *args, **kwargs): """ def _find_valid_index(self, how): - """Retrieves the index of the first valid value. + """ + Retrieves the index of the first valid value. Parameters ---------- diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 5e25efe77d8b9..8542a59a69d38 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -216,7 +216,9 @@ def ceil(self, freq, ambiguous='raise', nonexistent='raise'): class DatetimeIndexOpsMixin(DatetimeLikeArrayMixin): - """ common ops mixin to support a unified interface datetimelike Index """ + """ + common ops mixin to support a unified interface datetimelike Index + """ # override DatetimeLikeArrayMixin method copy = Index.copy diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index f03376c32f7f4..1e2f0d8108f02 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -1,4 +1,3 @@ - # pylint: disable=E1101,E1103,W0232 import datetime from sys import getsizeof diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 56df454bddf1c..ef0c4b3b8a9d7 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -960,7 +960,8 @@ def asi8(self): return self.view('i8') def item(self): - """ return the first element of the underlying data as a python + """ + return the first element of the underlying data as a python scalar """ # TODO(DatetimeArray): remove diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index cfa451db866be..0914324a03f84 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -2415,7 +2415,8 @@ def _convert_key(self, key, is_setter=False): def length_of_indexer(indexer, target=None): - """return the length of a single non-tuple indexer which could be a slice + """ + return the length of a single non-tuple indexer which could be a slice """ if target is not None and isinstance(indexer, slice): target_len = len(target) @@ -2443,7 +2444,8 @@ def length_of_indexer(indexer, target=None): def convert_to_index_sliceable(obj, key): - """if we are index sliceable, then return my slicer, otherwise return None + """ + if we are index sliceable, then return my slicer, otherwise return None """ idx = obj.index if isinstance(key, slice): @@ -2493,7 +2495,8 @@ def check_bool_indexer(ax, key): def check_setitem_lengths(indexer, value, values): - """Validate that value and indexer are the same length. + """ + Validate that value and indexer are the same length. An special-case is allowed for when the indexer is a boolean array and the number of true values equals the length of ``value``. In @@ -2536,7 +2539,8 @@ def check_setitem_lengths(indexer, value, values): def convert_missing_indexer(indexer): - """ reverse convert a missing indexer, which is a dict + """ + reverse convert a missing indexer, which is a dict return the scalar indexer and a boolean indicating if we converted """ @@ -2553,7 +2557,9 @@ def convert_missing_indexer(indexer): def convert_from_missing_indexer_tuple(indexer, axes): - """ create a filtered indexer that doesn't have any missing indexers """ + """ + create a filtered indexer that doesn't have any missing indexers + """ def get_indexer(_i, _idx): return (axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else @@ -2607,7 +2613,8 @@ def maybe_convert_indices(indices, n): def validate_indices(indices, n): - """Perform bounds-checking for an indexer. + """ + Perform bounds-checking for an indexer. -1 is allowed for indicating missing values. diff --git a/pandas/core/series.py b/pandas/core/series.py index c3bcd2d76c27a..ecbe899525fdb 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -389,6 +389,9 @@ def _update_inplace(self, result, **kwargs): @property def name(self): + """ + Return name of the Series. + """ return self._name @name.setter @@ -678,6 +681,9 @@ def __array_prepare__(self, result, context=None): @property def real(self): + """ + Return the real value of vector. + """ return self.values.real @real.setter @@ -686,6 +692,9 @@ def real(self, v): @property def imag(self): + """ + Return imag value of vector. + """ return self.values.imag @imag.setter diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 0b791f6f91aa3..d372623472f3c 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -2804,7 +2804,8 @@ def rfind(self, sub, start=0, end=None): return self._wrap_result(result) def normalize(self, form): - """Return the Unicode normal form for the strings in the Series/Index. + """ + Return the Unicode normal form for the strings in the Series/Index. For more information on the forms, see the :func:`unicodedata.normalize`. diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index ee44a64514f4f..a6d71b7db3cfd 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -700,9 +700,10 @@ def coerce(values): def _attempt_YYYYMMDD(arg, errors): - """ try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like, - arg is a passed in as an object dtype, but could really be ints/strings - with nan-like/or floats (e.g. with nan) + """ + try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like, + arg is a passed in as an object dtype, but could really be ints/strings + with nan-like/or floats (e.g. with nan) Parameters ---------- diff --git a/pandas/plotting/_converter.py b/pandas/plotting/_converter.py index 20e4fdffc39b6..8139694138a84 100644 --- a/pandas/plotting/_converter.py +++ b/pandas/plotting/_converter.py @@ -55,7 +55,8 @@ def get_pairs(): def register(explicit=True): - """Register Pandas Formatters and Converters with matplotlib + """ + Register Pandas Formatters and Converters with matplotlib This function modifies the global ``matplotlib.units.registry`` dictionary. Pandas adds custom converters for @@ -87,7 +88,8 @@ def register(explicit=True): def deregister(): - """Remove pandas' formatters and converters + """ + Remove pandas' formatters and converters Removes the custom converters added by :func:`register`. This attempts to set the state of the registry back to the state before diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 8574275c8478b..c50514a1414cd 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -1948,7 +1948,6 @@ def _plot(data, x=None, y=None, subplots=False, for bar plot layout by `position` keyword. From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center) %(klass_note)s - """ @@ -2709,7 +2708,8 @@ def __call__(self, *args, **kwargs): class SeriesPlotMethods(BasePlotMethods): - """Series plotting accessor and method + """ + Series plotting accessor and method Examples --------
- [ ]xref #24059 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24065
2018-12-03T12:53:48Z
2018-12-03T15:01:01Z
2018-12-03T15:01:00Z
2018-12-03T15:08:37Z
CI: Fixing benchmark log
diff --git a/azure-pipelines.yml b/azure-pipelines.yml index a58f82ec6de49..409b1ac8c9df3 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -114,7 +114,7 @@ jobs: ASV_OUTPUT="$(asv dev)" if [[ $(echo "$ASV_OUTPUT" | grep "failed") ]]; then echo "##vso[task.logissue type=error]Benchmarks run with errors" - echo $ASV_OUTPUT + echo "$ASV_OUTPUT" exit 1 else echo "Benchmarks run without errors"
- [X] closes #24061 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24062
2018-12-03T11:02:22Z
2018-12-03T15:59:32Z
2018-12-03T15:59:32Z
2018-12-03T15:59:35Z
BUG/TST: Fix TimedeltaIndex comparisons with invalid types
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 15476c3bc2e13..eab5956735f12 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1312,6 +1312,7 @@ Timedelta - Bug in :class:`TimedeltaIndex` where adding ``np.timedelta64('NaT')`` incorrectly returned an all-`NaT` :class:`DatetimeIndex` instead of an all-`NaT` :class:`TimedeltaIndex` (:issue:`23215`) - Bug in :class:`Timedelta` and :func:`to_timedelta()` have inconsistencies in supported unit string (:issue:`21762`) - Bug in :class:`TimedeltaIndex` division where dividing by another :class:`TimedeltaIndex` raised ``TypeError`` instead of returning a :class:`Float64Index` (:issue:`23829`, :issue:`22631`) +- Bug in :class:`TimedeltaIndex` comparison operations where comparing against non-``Timedelta``-like objects would raise ``TypeError`` instead of returning all-``False`` for ``__eq__`` and all-``True`` for ``__ne__`` (:issue:`24056`) Timezones ^^^^^^^^^ diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index e731dd33f5bb5..12611f9e81248 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -174,6 +174,9 @@ class DatetimeArrayMixin(dtl.DatetimeLikeArrayMixin, # by returning NotImplemented timetuple = None + # Needed so that Timestamp.__richcmp__(DateTimeArray) operates pointwise + ndim = 1 + # ensure that operations with numpy arrays defer to our implementation __array_priority__ = 1000 @@ -217,6 +220,12 @@ def __new__(cls, values, freq=None, tz=None, dtype=None): # if dtype has an embedded tz, capture it tz = dtl.validate_tz_from_dtype(dtype, tz) + if not hasattr(values, "dtype"): + if np.ndim(values) == 0: + # i.e. iterator + values = list(values) + values = np.array(values) + if is_object_dtype(values): # kludge; dispatch until the DatetimeArray constructor is complete from pandas import DatetimeIndex diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 830283d31a929..89956355c9508 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -22,6 +22,7 @@ ABCDataFrame, ABCIndexClass, ABCSeries, ABCTimedeltaIndex) from pandas.core.dtypes.missing import isna +from pandas.core import ops from pandas.core.algorithms import checked_add_with_arr, unique1d import pandas.core.common as com @@ -70,25 +71,29 @@ def _td_array_cmp(cls, op): opname = '__{name}__'.format(name=op.__name__) nat_result = True if opname == '__ne__' else False + meth = getattr(dtl.DatetimeLikeArrayMixin, opname) + def wrapper(self, other): - msg = "cannot compare a {cls} with type {typ}" - meth = getattr(dtl.DatetimeLikeArrayMixin, opname) if _is_convertible_to_td(other) or other is NaT: try: other = _to_m8(other) except ValueError: # failed to parse as timedelta - raise TypeError(msg.format(cls=type(self).__name__, - typ=type(other).__name__)) + return ops.invalid_comparison(self, other, op) + result = meth(self, other) if isna(other): result.fill(nat_result) elif not is_list_like(other): - raise TypeError(msg.format(cls=type(self).__name__, - typ=type(other).__name__)) + return ops.invalid_comparison(self, other, op) + else: - other = type(self)(other)._data + try: + other = type(self)(other)._data + except (ValueError, TypeError): + return ops.invalid_comparison(self, other, op) + result = meth(self, other) result = com.values_from_object(result) @@ -108,6 +113,9 @@ class TimedeltaArrayMixin(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps): _typ = "timedeltaarray" __array_priority__ = 1000 + # Needed so that NaT.__richcmp__(DateTimeArray) operates pointwise + ndim = 1 + @property def _box_func(self): return lambda x: Timedelta(x, unit='ns') diff --git a/pandas/core/generic.py b/pandas/core/generic.py index e780c8344869f..b3cb5c3be67f9 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -111,6 +111,10 @@ class NDFrame(PandasObject, SelectionMixin): _metadata = [] _is_copy = None + # dummy attribute so that datetime.__eq__(Series/DataFrame) defers + # by returning NotImplemented + timetuple = None + # ---------------------------------------------------------------------- # Constructors diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 6ea31422478f2..bd5268808e7b2 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -1115,7 +1115,7 @@ def dispatch_to_series(left, right, func, str_rep=None, axis=None): import pandas.core.computation.expressions as expressions right = lib.item_from_zerodim(right) - if lib.is_scalar(right): + if lib.is_scalar(right) or np.ndim(right) == 0: def column_op(a, b): return {i: func(a.iloc[:, i], b) diff --git a/pandas/tests/arithmetic/test_datetime64.py b/pandas/tests/arithmetic/test_datetime64.py index bc9b712e78d03..02e9c212b56ef 100644 --- a/pandas/tests/arithmetic/test_datetime64.py +++ b/pandas/tests/arithmetic/test_datetime64.py @@ -25,6 +25,17 @@ DatetimeIndex, TimedeltaIndex) +def assert_all(obj): + """ + Test helper to call call obj.all() the appropriate number of times on + a Series or DataFrame. + """ + if isinstance(obj, pd.DataFrame): + assert obj.all().all() + else: + assert obj.all() + + # ------------------------------------------------------------------ # Comparisons @@ -86,11 +97,16 @@ def test_comparison_invalid(self, box_with_array): [Period('2011-01', freq='M'), NaT, Period('2011-03', freq='M')] ]) @pytest.mark.parametrize('dtype', [None, object]) - def test_nat_comparisons_scalar(self, dtype, data, box): - xbox = box if box is not pd.Index else np.ndarray + def test_nat_comparisons_scalar(self, dtype, data, box_with_array): + if box_with_array is tm.to_array and dtype is object: + # dont bother testing ndarray comparison methods as this fails + # on older numpys (since they check object identity) + return + + xbox = box_with_array if box_with_array is not pd.Index else np.ndarray left = Series(data, dtype=dtype) - left = tm.box_expected(left, box) + left = tm.box_expected(left, box_with_array) expected = [False, False, False] expected = tm.box_expected(expected, xbox) @@ -290,23 +306,24 @@ def test_dti_cmp_datetimelike(self, other, tz_naive_fixture): expected = np.array([True, False]) tm.assert_numpy_array_equal(result, expected) - def dti_cmp_non_datetime(self, tz_naive_fixture): + def dt64arr_cmp_non_datetime(self, tz_naive_fixture, box_with_array): # GH#19301 by convention datetime.date is not considered comparable # to Timestamp or DatetimeIndex. This may change in the future. tz = tz_naive_fixture dti = pd.date_range('2016-01-01', periods=2, tz=tz) + dtarr = tm.box_expected(dti, box_with_array) other = datetime(2016, 1, 1).date() - assert not (dti == other).any() - assert (dti != other).all() + assert not (dtarr == other).any() + assert (dtarr != other).all() with pytest.raises(TypeError): - dti < other + dtarr < other with pytest.raises(TypeError): - dti <= other + dtarr <= other with pytest.raises(TypeError): - dti > other + dtarr > other with pytest.raises(TypeError): - dti >= other + dtarr >= other @pytest.mark.parametrize('other', [None, np.nan, pd.NaT]) def test_dti_eq_null_scalar(self, other, tz_naive_fixture): @@ -323,49 +340,67 @@ def test_dti_ne_null_scalar(self, other, tz_naive_fixture): assert (dti != other).all() @pytest.mark.parametrize('other', [None, np.nan]) - def test_dti_cmp_null_scalar_inequality(self, tz_naive_fixture, other): + def test_dti_cmp_null_scalar_inequality(self, tz_naive_fixture, other, + box_with_array): # GH#19301 tz = tz_naive_fixture dti = pd.date_range('2016-01-01', periods=2, tz=tz) + # FIXME: ValueError with transpose + dtarr = tm.box_expected(dti, box_with_array, transpose=False) with pytest.raises(TypeError): - dti < other + dtarr < other with pytest.raises(TypeError): - dti <= other + dtarr <= other with pytest.raises(TypeError): - dti > other + dtarr > other with pytest.raises(TypeError): - dti >= other + dtarr >= other @pytest.mark.parametrize('dtype', [None, object]) - def test_dti_cmp_nat(self, dtype): + def test_dti_cmp_nat(self, dtype, box_with_array): + if box_with_array is tm.to_array and dtype is object: + # dont bother testing ndarray comparison methods as this fails + # on older numpys (since they check object identity) + return + + xbox = box_with_array if box_with_array is not pd.Index else np.ndarray + left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]) right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]) + left = tm.box_expected(left, box_with_array) + right = tm.box_expected(right, box_with_array) + lhs, rhs = left, right if dtype is object: lhs, rhs = left.astype(object), right.astype(object) result = rhs == lhs expected = np.array([False, False, True]) - tm.assert_numpy_array_equal(result, expected) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) result = lhs != rhs expected = np.array([True, True, False]) - tm.assert_numpy_array_equal(result, expected) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) expected = np.array([False, False, False]) - tm.assert_numpy_array_equal(lhs == pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT == rhs, expected) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(lhs == pd.NaT, expected) + tm.assert_equal(pd.NaT == rhs, expected) expected = np.array([True, True, True]) - tm.assert_numpy_array_equal(lhs != pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT != lhs, expected) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(lhs != pd.NaT, expected) + tm.assert_equal(pd.NaT != lhs, expected) expected = np.array([False, False, False]) - tm.assert_numpy_array_equal(lhs < pd.NaT, expected) - tm.assert_numpy_array_equal(pd.NaT > lhs, expected) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(lhs < pd.NaT, expected) + tm.assert_equal(pd.NaT > lhs, expected) def test_dti_cmp_nat_behaves_like_float_cmp_nan(self): fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0]) @@ -459,36 +494,47 @@ def test_dti_cmp_nat_behaves_like_float_cmp_nan(self): @pytest.mark.parametrize('op', [operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le]) - def test_comparison_tzawareness_compat(self, op): + def test_comparison_tzawareness_compat(self, op, box_with_array): # GH#18162 dr = pd.date_range('2016-01-01', periods=6) dz = dr.tz_localize('US/Pacific') + # FIXME: ValueError with transpose + dr = tm.box_expected(dr, box_with_array, transpose=False) + dz = tm.box_expected(dz, box_with_array, transpose=False) + with pytest.raises(TypeError): op(dr, dz) - with pytest.raises(TypeError): - op(dr, list(dz)) + if box_with_array is not pd.DataFrame: + # DataFrame op is invalid until transpose bug is fixed + with pytest.raises(TypeError): + op(dr, list(dz)) with pytest.raises(TypeError): op(dz, dr) - with pytest.raises(TypeError): - op(dz, list(dr)) + if box_with_array is not pd.DataFrame: + # DataFrame op is invalid until transpose bug is fixed + with pytest.raises(TypeError): + op(dz, list(dr)) # Check that there isn't a problem aware-aware and naive-naive do not # raise - assert (dr == dr).all() - assert (dr == list(dr)).all() - assert (dz == dz).all() - assert (dz == list(dz)).all() + assert_all(dr == dr) + assert_all(dz == dz) + if box_with_array is not pd.DataFrame: + # DataFrame doesn't align the lists correctly unless we transpose, + # which we cannot do at the moment + assert (dr == list(dr)).all() + assert (dz == list(dz)).all() # Check comparisons against scalar Timestamps ts = pd.Timestamp('2000-03-14 01:59') ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam') - assert (dr > ts).all() + assert_all(dr > ts) with pytest.raises(TypeError): op(dr, ts_tz) - assert (dz > ts_tz).all() + assert_all(dz > ts_tz) with pytest.raises(TypeError): op(dz, ts) @@ -502,13 +548,18 @@ def test_comparison_tzawareness_compat(self, op): @pytest.mark.parametrize('other', [datetime(2016, 1, 1), Timestamp('2016-01-01'), np.datetime64('2016-01-01')]) - def test_scalar_comparison_tzawareness(self, op, other, tz_aware_fixture): + def test_scalar_comparison_tzawareness(self, op, other, tz_aware_fixture, + box_with_array): tz = tz_aware_fixture dti = pd.date_range('2016-01-01', periods=2, tz=tz) + + # FIXME: ValueError with transpose + dtarr = tm.box_expected(dti, box_with_array, transpose=False) + with pytest.raises(TypeError): - op(dti, other) + op(dtarr, other) with pytest.raises(TypeError): - op(other, dti) + op(other, dtarr) @pytest.mark.parametrize('op', [operator.eq, operator.ne, operator.gt, operator.ge, @@ -558,18 +609,25 @@ def test_dti_cmp_str(self, tz_naive_fixture): @pytest.mark.parametrize('other', ['foo', 99, 4.0, object(), timedelta(days=2)]) - def test_dti_cmp_scalar_invalid(self, other, tz_naive_fixture): + def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, + box_with_array): # GH#22074 tz = tz_naive_fixture + xbox = box_with_array if box_with_array is not pd.Index else np.ndarray + rng = date_range('1/1/2000', periods=10, tz=tz) + # FIXME: ValueError with transpose + rng = tm.box_expected(rng, box_with_array, transpose=False) result = rng == other expected = np.array([False] * 10) - tm.assert_numpy_array_equal(result, expected) + expected = tm.box_expected(expected, xbox, transpose=False) + tm.assert_equal(result, expected) result = rng != other expected = np.array([True] * 10) - tm.assert_numpy_array_equal(result, expected) + expected = tm.box_expected(expected, xbox, transpose=False) + tm.assert_equal(result, expected) with pytest.raises(TypeError): rng < other diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index 5f2fd98e29b96..5404d3f5f1915 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -42,21 +42,31 @@ def test_compare_timedelta_series(self): expected = pd.Series([False, True]) tm.assert_series_equal(actual, expected) - def test_tdi_cmp_str_invalid(self): + def test_tdi_cmp_str_invalid(self, box_with_array): # GH#13624 + xbox = box_with_array if box_with_array is not pd.Index else np.ndarray tdi = TimedeltaIndex(['1 day', '2 days']) + tdarr = tm.box_expected(tdi, box_with_array) - for left, right in [(tdi, 'a'), ('a', tdi)]: + for left, right in [(tdarr, 'a'), ('a', tdarr)]: with pytest.raises(TypeError): left > right - with pytest.raises(TypeError): - # FIXME: Shouldn't this return all-False? - left == right - + left >= right with pytest.raises(TypeError): - # FIXME: Shouldn't this return all-True? - left != right + left < right + with pytest.raises(TypeError): + left <= right + + result = left == right + expected = np.array([False, False], dtype=bool) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + result = left != right + expected = np.array([True, True], dtype=bool) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) @pytest.mark.parametrize('dtype', [None, object]) def test_comp_nat(self, dtype): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 24aff12e64192..97c64d013d241 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1073,6 +1073,7 @@ def assert_period_array_equal(left, right, obj='PeriodArray'): def assert_datetime_array_equal(left, right, obj='DatetimeArray'): + __tracebackhide__ = True _check_isinstance(left, right, DatetimeArray) assert_numpy_array_equal(left._data, right._data, @@ -1082,6 +1083,7 @@ def assert_datetime_array_equal(left, right, obj='DatetimeArray'): def assert_timedelta_array_equal(left, right, obj='TimedeltaArray'): + __tracebackhide__ = True _check_isinstance(left, right, TimedeltaArray) assert_numpy_array_equal(left._data, right._data, obj='{obj}._data'.format(obj=obj))
Parametrize a handful of tests. - [x] closes #23063 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24056
2018-12-02T23:52:30Z
2018-12-05T12:47:23Z
2018-12-05T12:47:23Z
2018-12-05T14:44:09Z
TST: run setitem tests for IntegerArray
diff --git a/pandas/core/arrays/integer.py b/pandas/core/arrays/integer.py index e9d51aaea4218..87dc5f8aaf1e4 100644 --- a/pandas/core/arrays/integer.py +++ b/pandas/core/arrays/integer.py @@ -58,6 +58,11 @@ def numpy_dtype(self): def kind(self): return self.numpy_dtype.kind + @cache_readonly + def itemsize(self): + """ Return the number of bytes in this dtype """ + return self.numpy_dtype.itemsize + @classmethod def construct_array_type(cls): """Return the array type associated with this dtype diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py index efee647389884..218b2e9bd0e11 100644 --- a/pandas/tests/extension/test_integer.py +++ b/pandas/tests/extension/test_integer.py @@ -174,6 +174,10 @@ class TestGetitem(base.BaseGetitemTests): pass +class TestSetitem(base.BaseSetitemTests): + pass + + class TestMissing(base.BaseMissingTests): pass
https://api.github.com/repos/pandas-dev/pandas/pulls/24054
2018-12-02T23:23:51Z
2018-12-03T16:03:09Z
2018-12-03T16:03:09Z
2018-12-03T16:03:32Z
REF/TST: Add more pytest idiom to indexing/multiindex/test_getitem.py
diff --git a/pandas/tests/indexing/multiindex/test_getitem.py b/pandas/tests/indexing/multiindex/test_getitem.py index 00b30bab37441..1013bb3e90149 100644 --- a/pandas/tests/indexing/multiindex/test_getitem.py +++ b/pandas/tests/indexing/multiindex/test_getitem.py @@ -1,5 +1,3 @@ -from warnings import catch_warnings, simplefilter - import numpy as np import pytest @@ -11,335 +9,339 @@ from pandas.util import testing as tm -@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning") -class TestMultiIndexGetItem(object): - - def test_series_getitem_multiindex(self): - - # GH 6018 - # series regression getitem with a multi-index - - s = Series([1, 2, 3]) - s.index = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 1)]) - - result = s[:, 0] - expected = Series([1], index=[0]) - tm.assert_series_equal(result, expected) - - result = s.loc[:, 1] - expected = Series([2, 3], index=[1, 2]) - tm.assert_series_equal(result, expected) - - # xs - result = s.xs(0, level=0) - expected = Series([1], index=[0]) - tm.assert_series_equal(result, expected) - - result = s.xs(1, level=1) - expected = Series([2, 3], index=[1, 2]) - tm.assert_series_equal(result, expected) - - # GH6258 - dt = list(date_range('20130903', periods=3)) - idx = MultiIndex.from_product([list('AB'), dt]) - s = Series([1, 3, 4, 1, 3, 4], index=idx) +@pytest.mark.parametrize('access_method', [lambda s, x: s[:, x], + lambda s, x: s.loc[:, x], + lambda s, x: s.xs(x, level=1)]) +@pytest.mark.parametrize('level1_value, expected', [ + (0, Series([1], index=[0])), + (1, Series([2, 3], index=[1, 2])) +]) +def test_series_getitem_multiindex(access_method, level1_value, expected): + + # GH 6018 + # series regression getitem with a multi-index + + s = Series([1, 2, 3]) + s.index = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 1)]) + result = access_method(s, level1_value) + tm.assert_series_equal(result, expected) + + +def test_series_getitem_multiindex_xs(): + # GH6258 + dt = list(date_range('20130903', periods=3)) + idx = MultiIndex.from_product([list('AB'), dt]) + s = Series([1, 3, 4, 1, 3, 4], index=idx) + + result = s.xs('20130903', level=1) + expected = Series([1, 1], index=list('AB')) + tm.assert_series_equal(result, expected) + + +def test_series_getitem_multiindex_xs_by_label(): + # GH5684 + idx = MultiIndex.from_tuples([('a', 'one'), ('a', 'two'), ('b', 'one'), + ('b', 'two')]) + s = Series([1, 2, 3, 4], index=idx) + s.index.set_names(['L1', 'L2'], inplace=True) + result = s.xs('one', level='L2') + expected = Series([1, 3], index=['a', 'b']) + expected.index.set_names(['L1'], inplace=True) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize('level0_value', ['D', 'A']) +def test_getitem_duplicates_multiindex(level0_value): + # GH 5725 the 'A' happens to be a valid Timestamp so the doesn't raise + # the appropriate error, only in PY3 of course! + + index = MultiIndex(levels=[[level0_value, 'B', 'C'], + [0, 26, 27, 37, 57, 67, 75, 82]], + codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], + [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]], + names=['tag', 'day']) + arr = np.random.randn(len(index), 1) + df = DataFrame(arr, index=index, columns=['val']) + + # confirm indexing on missing value raises KeyError + if level0_value != 'A': + msg = "'A'" + with pytest.raises(KeyError, match=msg): + df.val['A'] - result = s.xs('20130903', level=1) - expected = Series([1, 1], index=list('AB')) - tm.assert_series_equal(result, expected) + msg = "'X'" + with pytest.raises(KeyError, match=msg): + df.val['X'] + + result = df.val[level0_value] + expected = Series(arr.ravel()[0:3], name='val', index=Index( + [26, 37, 57], name='day')) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize('indexer, is_level1, expected_error', [ + ([], False, None), # empty ok + (['A'], False, None), + (['A', 'D'], False, None), + (['D'], False, r"\['D'\] not in index"), # not any values found + (pd.IndexSlice[:, ['foo']], True, None), + (pd.IndexSlice[:, ['foo', 'bah']], True, None) +]) +def test_getitem_duplicates_multiindex_missing_indexers(indexer, is_level1, + expected_error): + # GH 7866 + # multi-index slicing with missing indexers + idx = MultiIndex.from_product([['A', 'B', 'C'], + ['foo', 'bar', 'baz']], + names=['one', 'two']) + s = Series(np.arange(9, dtype='int64'), index=idx).sort_index() + + if indexer == []: + expected = s.iloc[[]] + elif is_level1: + expected = Series([0, 3, 6], index=MultiIndex.from_product( + [['A', 'B', 'C'], ['foo']], names=['one', 'two'])).sort_index() + else: + exp_idx = MultiIndex.from_product([['A'], ['foo', 'bar', 'baz']], + names=['one', 'two']) + expected = Series(np.arange(3, dtype='int64'), + index=exp_idx).sort_index() - # GH5684 - idx = MultiIndex.from_tuples([('a', 'one'), ('a', 'two'), ('b', 'one'), - ('b', 'two')]) - s = Series([1, 2, 3, 4], index=idx) - s.index.set_names(['L1', 'L2'], inplace=True) - result = s.xs('one', level='L2') - expected = Series([1, 3], index=['a', 'b']) - expected.index.set_names(['L1'], inplace=True) + if expected_error is not None: + with pytest.raises(KeyError, match=expected_error): + s.loc[indexer] + else: + result = s.loc[indexer] tm.assert_series_equal(result, expected) - def test_getitem_duplicates_multiindex(self): - # GH 5725 the 'A' happens to be a valid Timestamp so the doesn't raise - # the appropriate error, only in PY3 of course! - - index = MultiIndex(levels=[['D', 'B', 'C'], - [0, 26, 27, 37, 57, 67, 75, 82]], - codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], - [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]], - names=['tag', 'day']) - arr = np.random.randn(len(index), 1) - df = DataFrame(arr, index=index, columns=['val']) - result = df.val['D'] - expected = Series(arr.ravel()[0:3], name='val', index=Index( - [26, 37, 57], name='day')) - tm.assert_series_equal(result, expected) - def f(): - df.val['A'] +@pytest.mark.parametrize('columns_indexer', [ + ([], slice(None)), + (['foo'], []) +]) +def test_getitem_duplicates_multiindex_empty_indexer(columns_indexer): + # GH 8737 + # empty indexer + multi_index = MultiIndex.from_product((['foo', 'bar', 'baz'], + ['alpha', 'beta'])) + df = DataFrame(np.random.randn(5, 6), index=range(5), columns=multi_index) + df = df.sort_index(level=0, axis=1) + + expected = DataFrame(index=range(5), columns=multi_index.reindex([])[0]) + result = df.loc[:, columns_indexer] + tm.assert_frame_equal(result, expected) + + +def test_getitem_duplicates_multiindex_non_scalar_type_object(): + # regression from < 0.14.0 + # GH 7914 + df = DataFrame([[np.mean, np.median], ['mean', 'median']], + columns=MultiIndex.from_tuples([('functs', 'mean'), + ('functs', 'median')]), + index=['function', 'name']) + result = df.loc['function', ('functs', 'mean')] + expected = np.mean + assert result == expected + + +def test_getitem_simple(multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + df = frame.T + + col = df['foo', 'one'] + tm.assert_almost_equal(col.values, df.values[:, 0]) + msg = r"\('foo', 'four'\)" + with pytest.raises(KeyError, match=msg): + df[('foo', 'four')] + msg = "'foobar'" + with pytest.raises(KeyError, match=msg): + df['foobar'] - pytest.raises(KeyError, f) - def f(): - df.val['X'] - - pytest.raises(KeyError, f) +@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning") +def test_series_getitem(multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + s = ymd['A'] - # A is treated as a special Timestamp - index = MultiIndex(levels=[['A', 'B', 'C'], - [0, 26, 27, 37, 57, 67, 75, 82]], - codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], - [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]], - names=['tag', 'day']) - df = DataFrame(arr, index=index, columns=['val']) - result = df.val['A'] - expected = Series(arr.ravel()[0:3], name='val', index=Index( - [26, 37, 57], name='day')) - tm.assert_series_equal(result, expected) + result = s[2000, 3] - def f(): - df.val['X'] + # TODO(wesm): unused? + # result2 = s.loc[2000, 3] - pytest.raises(KeyError, f) + expected = s.reindex(s.index[42:65]) + expected.index = expected.index.droplevel(0).droplevel(0) + tm.assert_series_equal(result, expected) - # GH 7866 - # multi-index slicing with missing indexers - idx = MultiIndex.from_product([['A', 'B', 'C'], - ['foo', 'bar', 'baz']], - names=['one', 'two']) - s = Series(np.arange(9, dtype='int64'), index=idx).sort_index() + result = s[2000, 3, 10] + expected = s[49] + assert result == expected - exp_idx = MultiIndex.from_product([['A'], ['foo', 'bar', 'baz']], - names=['one', 'two']) - expected = Series(np.arange(3, dtype='int64'), - index=exp_idx).sort_index() + # fancy + expected = s.reindex(s.index[49:51]) + result = s.loc[[(2000, 3, 10), (2000, 3, 13)]] + tm.assert_series_equal(result, expected) - result = s.loc[['A']] - tm.assert_series_equal(result, expected) - result = s.loc[['A', 'D']] - tm.assert_series_equal(result, expected) + result = s.ix[[(2000, 3, 10), (2000, 3, 13)]] + tm.assert_series_equal(result, expected) - # not any values found - pytest.raises(KeyError, lambda: s.loc[['D']]) + # key error + msg = "356" + with pytest.raises(KeyError, match=msg): + s.__getitem__((2000, 3, 4)) - # empty ok - result = s.loc[[]] - expected = s.iloc[[]] - tm.assert_series_equal(result, expected) - idx = pd.IndexSlice - expected = Series([0, 3, 6], index=MultiIndex.from_product( - [['A', 'B', 'C'], ['foo']], names=['one', 'two'])).sort_index() +def test_series_getitem_corner( + multiindex_year_month_day_dataframe_random_data): + ymd = multiindex_year_month_day_dataframe_random_data + s = ymd['A'] - result = s.loc[idx[:, ['foo']]] - tm.assert_series_equal(result, expected) - result = s.loc[idx[:, ['foo', 'bah']]] - tm.assert_series_equal(result, expected) + # don't segfault, GH #495 + # out of bounds access + msg = "index out of bounds" + with pytest.raises(IndexError, match=msg): + s.__getitem__(len(ymd)) - # GH 8737 - # empty indexer - multi_index = MultiIndex.from_product((['foo', 'bar', 'baz'], - ['alpha', 'beta'])) - df = DataFrame( - np.random.randn(5, 6), index=range(5), columns=multi_index) - df = df.sort_index(level=0, axis=1) - - expected = DataFrame(index=range(5), - columns=multi_index.reindex([])[0]) - result1 = df.loc[:, ([], slice(None))] - result2 = df.loc[:, (['foo'], [])] - tm.assert_frame_equal(result1, expected) - tm.assert_frame_equal(result2, expected) - - # regression from < 0.14.0 - # GH 7914 - df = DataFrame([[np.mean, np.median], ['mean', 'median']], - columns=MultiIndex.from_tuples([('functs', 'mean'), - ('functs', 'median')]), - index=['function', 'name']) - result = df.loc['function', ('functs', 'mean')] - assert result == np.mean - - def test_getitem_simple(self, multiindex_dataframe_random_data): - frame = multiindex_dataframe_random_data - df = frame.T - - col = df['foo', 'one'] - tm.assert_almost_equal(col.values, df.values[:, 0]) - with pytest.raises(KeyError): - df[('foo', 'four')] - with pytest.raises(KeyError): - df['foobar'] - - def test_series_getitem( - self, multiindex_year_month_day_dataframe_random_data): - ymd = multiindex_year_month_day_dataframe_random_data - s = ymd['A'] - - result = s[2000, 3] - - # TODO(wesm): unused? - # result2 = s.loc[2000, 3] - - expected = s.reindex(s.index[42:65]) - expected.index = expected.index.droplevel(0).droplevel(0) - tm.assert_series_equal(result, expected) + # generator + result = s[(x > 0 for x in s)] + expected = s[s > 0] + tm.assert_series_equal(result, expected) - result = s[2000, 3, 10] - expected = s[49] - assert result == expected - # fancy - expected = s.reindex(s.index[49:51]) - result = s.loc[[(2000, 3, 10), (2000, 3, 13)]] - tm.assert_series_equal(result, expected) +def test_frame_getitem_multicolumn_empty_level(): + f = DataFrame({'a': ['1', '2', '3'], 'b': ['2', '3', '4']}) + f.columns = [['level1 item1', 'level1 item2'], ['', 'level2 item2'], + ['level3 item1', 'level3 item2']] - with catch_warnings(record=True): - simplefilter("ignore", DeprecationWarning) - result = s.ix[[(2000, 3, 10), (2000, 3, 13)]] - tm.assert_series_equal(result, expected) + result = f['level1 item1'] + expected = DataFrame([['1'], ['2'], ['3']], index=f.index, + columns=['level3 item1']) + tm.assert_frame_equal(result, expected) - # key error - pytest.raises(KeyError, s.__getitem__, (2000, 3, 4)) - def test_series_getitem_corner( - self, multiindex_year_month_day_dataframe_random_data): - ymd = multiindex_year_month_day_dataframe_random_data - s = ymd['A'] +@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning") +def test_getitem_tuple_plus_slice(): + # GH #671 + df = DataFrame({'a': lrange(10), + 'b': lrange(10), + 'c': np.random.randn(10), + 'd': np.random.randn(10)}) - # don't segfault, GH #495 - # out of bounds access - pytest.raises(IndexError, s.__getitem__, len(ymd)) + idf = df.set_index(['a', 'b']) - # generator - result = s[(x > 0 for x in s)] - expected = s[s > 0] - tm.assert_series_equal(result, expected) + result = idf.loc[(0, 0), :] + expected = idf.loc[0, 0] + expected2 = idf.xs((0, 0)) + expected3 = idf.ix[0, 0] - def test_frame_getitem_multicolumn_empty_level(self): - f = DataFrame({'a': ['1', '2', '3'], 'b': ['2', '3', '4']}) - f.columns = [['level1 item1', 'level1 item2'], ['', 'level2 item2'], - ['level3 item1', 'level3 item2']] + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result, expected2) + tm.assert_series_equal(result, expected3) - result = f['level1 item1'] - expected = DataFrame([['1'], ['2'], ['3']], index=f.index, - columns=['level3 item1']) - tm.assert_frame_equal(result, expected) - def test_getitem_tuple_plus_slice(self): - # GH #671 - df = DataFrame({'a': lrange(10), - 'b': lrange(10), - 'c': np.random.randn(10), - 'd': np.random.randn(10)}) +def test_getitem_toplevel(multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + df = frame.T - idf = df.set_index(['a', 'b']) + result = df['foo'] + expected = df.reindex(columns=df.columns[:3]) + expected.columns = expected.columns.droplevel(0) + tm.assert_frame_equal(result, expected) - result = idf.loc[(0, 0), :] - expected = idf.loc[0, 0] - expected2 = idf.xs((0, 0)) - with catch_warnings(record=True): - simplefilter("ignore", DeprecationWarning) - expected3 = idf.ix[0, 0] + result = df['bar'] + result2 = df.loc[:, 'bar'] - tm.assert_series_equal(result, expected) - tm.assert_series_equal(result, expected2) - tm.assert_series_equal(result, expected3) + expected = df.reindex(columns=df.columns[3:5]) + expected.columns = expected.columns.droplevel(0) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, result2) - def test_getitem_toplevel(self, multiindex_dataframe_random_data): - frame = multiindex_dataframe_random_data - df = frame.T - result = df['foo'] - expected = df.reindex(columns=df.columns[:3]) - expected.columns = expected.columns.droplevel(0) - tm.assert_frame_equal(result, expected) +def test_getitem_int(multiindex_dataframe_random_data): + levels = [[0, 1], [0, 1, 2]] + codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]] + index = MultiIndex(levels=levels, codes=codes) - result = df['bar'] - result2 = df.loc[:, 'bar'] + frame = DataFrame(np.random.randn(6, 2), index=index) - expected = df.reindex(columns=df.columns[3:5]) - expected.columns = expected.columns.droplevel(0) - tm.assert_frame_equal(result, expected) - tm.assert_frame_equal(result, result2) + result = frame.loc[1] + expected = frame[-3:] + expected.index = expected.index.droplevel(0) + tm.assert_frame_equal(result, expected) - def test_getitem_int(self, multiindex_dataframe_random_data): - levels = [[0, 1], [0, 1, 2]] - codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]] - index = MultiIndex(levels=levels, codes=codes) + # raises exception + msg = "3" + with pytest.raises(KeyError, match=msg): + frame.loc.__getitem__(3) - frame = DataFrame(np.random.randn(6, 2), index=index) + # however this will work + frame = multiindex_dataframe_random_data + result = frame.iloc[2] + expected = frame.xs(frame.index[2]) + tm.assert_series_equal(result, expected) - result = frame.loc[1] - expected = frame[-3:] - expected.index = expected.index.droplevel(0) - tm.assert_frame_equal(result, expected) - # raises exception - pytest.raises(KeyError, frame.loc.__getitem__, 3) +def test_frame_getitem_view(multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + df = frame.T.copy() - # however this will work - frame = multiindex_dataframe_random_data - result = frame.iloc[2] - expected = frame.xs(frame.index[2]) - tm.assert_series_equal(result, expected) + # this works because we are modifying the underlying array + # really a no-no + df['foo'].values[:] = 0 + assert (df['foo'].values == 0).all() - def test_frame_getitem_view(self, multiindex_dataframe_random_data): - frame = multiindex_dataframe_random_data - df = frame.T.copy() + # but not if it's mixed-type + df['foo', 'four'] = 'foo' + df = df.sort_index(level=0, axis=1) - # this works because we are modifying the underlying array - # really a no-no - df['foo'].values[:] = 0 - assert (df['foo'].values == 0).all() + # this will work, but will raise/warn as its chained assignment + def f(): + df['foo']['one'] = 2 + return df - # but not if it's mixed-type - df['foo', 'four'] = 'foo' - df = df.sort_index(level=0, axis=1) + msg = "A value is trying to be set on a copy of a slice from a DataFrame" + with pytest.raises(com.SettingWithCopyError, match=msg): + df['foo']['one'] = 2 - # this will work, but will raise/warn as its chained assignment - def f(): - df['foo']['one'] = 2 - return df + try: + df = f() + except ValueError: + pass + assert (df['foo', 'one'] == 0).all() - pytest.raises(com.SettingWithCopyError, f) - try: - df = f() - except ValueError: - pass - assert (df['foo', 'one'] == 0).all() +def test_getitem_lowerdim_corner(multiindex_dataframe_random_data): + frame = multiindex_dataframe_random_data + msg = "11" + with pytest.raises(KeyError, match=msg): + frame.loc.__getitem__((('bar', 'three'), 'B')) - def test_getitem_lowerdim_corner(self, multiindex_dataframe_random_data): - frame = multiindex_dataframe_random_data - pytest.raises(KeyError, frame.loc.__getitem__, - (('bar', 'three'), 'B')) + # in theory should be inserting in a sorted space???? + frame.loc[('bar', 'three'), 'B'] = 0 + assert frame.sort_index().loc[('bar', 'three'), 'B'] == 0 - # in theory should be inserting in a sorted space???? - frame.loc[('bar', 'three'), 'B'] = 0 - assert frame.sort_index().loc[('bar', 'three'), 'B'] == 0 - @pytest.mark.parametrize('unicode_strings', [True, False]) - def test_mixed_depth_get(self, unicode_strings): - # If unicode_strings is True, the column labels in dataframe - # construction will use unicode strings in Python 2 (pull request - # #17099). +@pytest.mark.parametrize('unicode_strings', [True, False]) +def test_mixed_depth_get(unicode_strings): + # If unicode_strings is True, the column labels in dataframe + # construction will use unicode strings in Python 2 (pull request + # #17099). - arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'], - ['', 'OD', 'OD', 'result1', 'result2', 'result1'], - ['', 'wx', 'wy', '', '', '']] + arrays = [['a', 'top', 'top', 'routine1', 'routine1', 'routine2'], + ['', 'OD', 'OD', 'result1', 'result2', 'result1'], + ['', 'wx', 'wy', '', '', '']] - if unicode_strings: - arrays = [[u(s) for s in arr] for arr in arrays] + if unicode_strings: + arrays = [[u(s) for s in arr] for arr in arrays] - tuples = sorted(zip(*arrays)) - index = MultiIndex.from_tuples(tuples) - df = DataFrame(np.random.randn(4, 6), columns=index) + tuples = sorted(zip(*arrays)) + index = MultiIndex.from_tuples(tuples) + df = DataFrame(np.random.randn(4, 6), columns=index) - result = df['a'] - expected = df['a', '', ''].rename('a') - tm.assert_series_equal(result, expected) + result = df['a'] + expected = df['a', '', ''].rename('a') + tm.assert_series_equal(result, expected) - result = df['routine1', 'result1'] - expected = df['routine1', 'result1', ''] - expected = expected.rename(('routine1', 'result1')) - tm.assert_series_equal(result, expected) + result = df['routine1', 'result1'] + expected = df['routine1', 'result1', ''] + expected = expected.rename(('routine1', 'result1')) + tm.assert_series_equal(result, expected)
xref #24040 for this pass: - the Test classes have been removed. - `catch_warnings` used for the `.ix` deprecation replaced with `@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")` at test level. cc @jreback @gfyoung
https://api.github.com/repos/pandas-dev/pandas/pulls/24053
2018-12-02T20:26:13Z
2018-12-11T13:54:10Z
2018-12-11T13:54:10Z
2018-12-13T12:57:09Z
DOC: Stop ignoring fixed rst files in flake8-rst
diff --git a/doc/source/io.rst b/doc/source/io.rst index 372a7b8a325e7..fbd238586c776 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1151,7 +1151,7 @@ Let us consider some examples: .. code-block:: python - read_csv(path, na_values=[5]) + pd.read_csv('path_to_file.csv', na_values=[5]) In the example above ``5`` and ``5.0`` will be recognized as ``NaN``, in addition to the defaults. A string will first be interpreted as a numerical @@ -1159,19 +1159,19 @@ addition to the defaults. A string will first be interpreted as a numerical .. code-block:: python - read_csv(path, keep_default_na=False, na_values=[""]) + pd.read_csv('path_to_file.csv', keep_default_na=False, na_values=[""]) Above, only an empty field will be recognized as ``NaN``. .. code-block:: python - read_csv(path, keep_default_na=False, na_values=["NA", "0"]) + pd.read_csv('path_to_file.csv', keep_default_na=False, na_values=["NA", "0"]) Above, both ``NA`` and ``0`` as strings are ``NaN``. .. code-block:: python - read_csv(path, na_values=["Nope"]) + pd.read_csv('path_to_file.csv', na_values=["Nope"]) The default values, in addition to the string ``"Nope"`` are recognized as ``NaN``. @@ -1245,24 +1245,13 @@ too few fields will have NA values filled in the trailing fields. Lines with too many fields will raise an error by default: .. ipython:: python - :suppress: + :okexcept: data = ('a,b,c\n' '1,2,3\n' '4,5,6,7\n' '8,9,10') - -.. code-block:: ipython - - In [27]: data = ('a,b,c\n' - '1,2,3\n' - '4,5,6,7\n' - '8,9,10') - - In [28]: pd.read_csv(StringIO(data)) - --------------------------------------------------------------------------- - ParserError Traceback (most recent call last) - ParserError: Error tokenizing data. C error: Expected 3 fields in line 3, saw 4 + pd.read_csv(StringIO(data)) You can elect to skip bad lines: @@ -2754,7 +2743,7 @@ file, and the ``sheet_name`` indicating which sheet to parse. .. code-block:: python # Returns a DataFrame - read_excel('path_to_file.xls', sheet_name='Sheet1') + pd.read_excel('path_to_file.xls', sheet_name='Sheet1') .. _io.excel.excelfile_class: @@ -2803,14 +2792,14 @@ of sheet names can simply be passed to ``read_excel`` with no loss in performanc # using the ExcelFile class data = {} with pd.ExcelFile('path_to_file.xls') as xls: - data['Sheet1'] = read_excel(xls, 'Sheet1', index_col=None, - na_values=['NA']) - data['Sheet2'] = read_excel(xls, 'Sheet2', index_col=None, - na_values=['NA']) + data['Sheet1'] = pd.read_excel(xls, 'Sheet1', index_col=None, + na_values=['NA']) + data['Sheet2'] = pd.read_excel(xls, 'Sheet2', index_col=None, + na_values=['NA']) # equivalent using the read_excel function - data = read_excel('path_to_file.xls', ['Sheet1', 'Sheet2'], - index_col=None, na_values=['NA']) + data = pd.read_excel('path_to_file.xls', ['Sheet1', 'Sheet2'], + index_col=None, na_values=['NA']) .. _io.excel.specifying_sheets: @@ -2832,35 +2821,35 @@ Specifying Sheets .. code-block:: python # Returns a DataFrame - read_excel('path_to_file.xls', 'Sheet1', index_col=None, na_values=['NA']) + pd.read_excel('path_to_file.xls', 'Sheet1', index_col=None, na_values=['NA']) Using the sheet index: .. code-block:: python # Returns a DataFrame - read_excel('path_to_file.xls', 0, index_col=None, na_values=['NA']) + pd.read_excel('path_to_file.xls', 0, index_col=None, na_values=['NA']) Using all default values: .. code-block:: python # Returns a DataFrame - read_excel('path_to_file.xls') + pd.read_excel('path_to_file.xls') Using None to get all sheets: .. code-block:: python # Returns a dictionary of DataFrames - read_excel('path_to_file.xls', sheet_name=None) + pd.read_excel('path_to_file.xls', sheet_name=None) Using a list to get multiple sheets: .. code-block:: python # Returns the 1st and 4th sheet, as a dictionary of DataFrames. - read_excel('path_to_file.xls', sheet_name=['Sheet1', 3]) + pd.read_excel('path_to_file.xls', sheet_name=['Sheet1', 3]) ``read_excel`` can read more than one sheet, by setting ``sheet_name`` to either a list of sheet names, a list of sheet positions, or ``None`` to read all sheets. @@ -2932,20 +2921,20 @@ to be parsed. .. code-block:: python - read_excel('path_to_file.xls', 'Sheet1', usecols=2) + pd.read_excel('path_to_file.xls', 'Sheet1', usecols=2) You can also specify a comma-delimited set of Excel columns and ranges as a string: .. code-block:: python - read_excel('path_to_file.xls', 'Sheet1', usecols='A,C:E') + pd.read_excel('path_to_file.xls', 'Sheet1', usecols='A,C:E') If ``usecols`` is a list of integers, then it is assumed to be the file column indices to be parsed. .. code-block:: python - read_excel('path_to_file.xls', 'Sheet1', usecols=[0, 2, 3]) + pd.read_excel('path_to_file.xls', 'Sheet1', usecols=[0, 2, 3]) Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. @@ -2957,7 +2946,7 @@ document header row(s). Those strings define which columns will be parsed: .. code-block:: python - read_excel('path_to_file.xls', 'Sheet1', usecols=['foo', 'bar']) + pd.read_excel('path_to_file.xls', 'Sheet1', usecols=['foo', 'bar']) Element order is ignored, so ``usecols=['baz', 'joe']`` is the same as ``['joe', 'baz']``. @@ -2968,7 +2957,7 @@ the column names, returning names where the callable function evaluates to ``Tru .. code-block:: python - read_excel('path_to_file.xls', 'Sheet1', usecols=lambda x: x.isalpha()) + pd.read_excel('path_to_file.xls', 'Sheet1', usecols=lambda x: x.isalpha()) Parsing Dates +++++++++++++ @@ -2980,7 +2969,7 @@ use the ``parse_dates`` keyword to parse those strings to datetimes: .. code-block:: python - read_excel('path_to_file.xls', 'Sheet1', parse_dates=['date_strings']) + pd.read_excel('path_to_file.xls', 'Sheet1', parse_dates=['date_strings']) Cell Converters @@ -2991,7 +2980,7 @@ option. For instance, to convert a column to boolean: .. code-block:: python - read_excel('path_to_file.xls', 'Sheet1', converters={'MyBools': bool}) + pd.read_excel('path_to_file.xls', 'Sheet1', converters={'MyBools': bool}) This options handles missing values and treats exceptions in the converters as missing data. Transformations are applied cell by cell rather than to the @@ -3006,7 +2995,7 @@ missing data to recover integer dtype: return int(x) if x else -1 - read_excel('path_to_file.xls', 'Sheet1', converters={'MyInts': cfun}) + pd.read_excel('path_to_file.xls', 'Sheet1', converters={'MyInts': cfun}) dtype Specifications ++++++++++++++++++++ @@ -3020,7 +3009,7 @@ no type inference, use the type ``str`` or ``object``. .. code-block:: python - read_excel('path_to_file.xls', dtype={'MyInts': 'int64', 'MyText': str}) + pd.read_excel('path_to_file.xls', dtype={'MyInts': 'int64', 'MyText': str}) .. _io.excel_writer: @@ -5126,7 +5115,7 @@ If you have an SQLAlchemy description of your database you can express where con sa.Column('Col_3', sa.Boolean), ) - pd.read_sql(sa.select([data_table]).where(data_table.c.Col_3 == True), engine) + pd.read_sql(sa.select([data_table]).where(data_table.c.Col_3 is True), engine) You can combine SQLAlchemy expressions with parameters passed to :func:`read_sql` using :func:`sqlalchemy.bindparam` @@ -5155,7 +5144,7 @@ And then issue the following queries: .. code-block:: python - data.to_sql('data', cnx) + data.to_sql('data', con) pd.read_sql_query("SELECT * FROM data", con) @@ -5372,6 +5361,9 @@ Obtain an iterator and read an XPORT file 100,000 lines at a time: .. code-block:: python + def do_something(chunk): + pass + rdr = pd.read_sas('sas_xport.xpt', chunk=100000) for chunk in rdr: do_something(chunk) @@ -5424,85 +5416,7 @@ ignored. dtypes: float64(1), int64(1) memory usage: 15.3 MB -When writing, the top-three functions in terms of speed are are -``test_pickle_write``, ``test_feather_write`` and ``test_hdf_fixed_write_compress``. - -.. code-block:: ipython - - In [14]: %timeit test_sql_write(df) - 2.37 s ± 36.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - - In [15]: %timeit test_hdf_fixed_write(df) - 194 ms ± 65.9 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) - - In [26]: %timeit test_hdf_fixed_write_compress(df) - 119 ms ± 2.15 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) - - In [16]: %timeit test_hdf_table_write(df) - 623 ms ± 125 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - - In [27]: %timeit test_hdf_table_write_compress(df) - 563 ms ± 23.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - - In [17]: %timeit test_csv_write(df) - 3.13 s ± 49.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - - In [30]: %timeit test_feather_write(df) - 103 ms ± 5.88 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) - - In [31]: %timeit test_pickle_write(df) - 109 ms ± 3.72 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) - - In [32]: %timeit test_pickle_write_compress(df) - 3.33 s ± 55.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - -When reading, the top three are ``test_feather_read``, ``test_pickle_read`` and -``test_hdf_fixed_read``. - -.. code-block:: ipython - - In [18]: %timeit test_sql_read() - 1.35 s ± 14.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - - In [19]: %timeit test_hdf_fixed_read() - 14.3 ms ± 438 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - - In [28]: %timeit test_hdf_fixed_read_compress() - 23.5 ms ± 672 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) - - In [20]: %timeit test_hdf_table_read() - 35.4 ms ± 314 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) - - In [29]: %timeit test_hdf_table_read_compress() - 42.6 ms ± 2.1 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) - - In [22]: %timeit test_csv_read() - 516 ms ± 27.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - - In [33]: %timeit test_feather_read() - 4.06 ms ± 115 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - - In [34]: %timeit test_pickle_read() - 6.5 ms ± 172 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - - In [35]: %timeit test_pickle_read_compress() - 588 ms ± 3.57 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) - -Space on disk (in bytes) - -.. code-block:: none - - 34816000 Aug 21 18:00 test.sql - 24009240 Aug 21 18:00 test_fixed.hdf - 7919610 Aug 21 18:00 test_fixed_compress.hdf - 24458892 Aug 21 18:00 test_table.hdf - 8657116 Aug 21 18:00 test_table_compress.hdf - 28520770 Aug 21 18:00 test.csv - 16000248 Aug 21 18:00 test.feather - 16000848 Aug 21 18:00 test.pkl - 7554108 Aug 21 18:00 test.pkl.compress - -And here's the code: +Given the next test set: .. code-block:: python @@ -5589,3 +5503,81 @@ And here's the code: def test_pickle_read_compress(): pd.read_pickle('test.pkl.compress', compression='xz') + +When writing, the top-three functions in terms of speed are are +``test_pickle_write``, ``test_feather_write`` and ``test_hdf_fixed_write_compress``. + +.. code-block:: ipython + + In [14]: %timeit test_sql_write(df) + 2.37 s ± 36.6 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + + In [15]: %timeit test_hdf_fixed_write(df) + 194 ms ± 65.9 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) + + In [26]: %timeit test_hdf_fixed_write_compress(df) + 119 ms ± 2.15 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) + + In [16]: %timeit test_hdf_table_write(df) + 623 ms ± 125 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + + In [27]: %timeit test_hdf_table_write_compress(df) + 563 ms ± 23.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + + In [17]: %timeit test_csv_write(df) + 3.13 s ± 49.9 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + + In [30]: %timeit test_feather_write(df) + 103 ms ± 5.88 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) + + In [31]: %timeit test_pickle_write(df) + 109 ms ± 3.72 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) + + In [32]: %timeit test_pickle_write_compress(df) + 3.33 s ± 55.2 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + +When reading, the top three are ``test_feather_read``, ``test_pickle_read`` and +``test_hdf_fixed_read``. + +.. code-block:: ipython + + In [18]: %timeit test_sql_read() + 1.35 s ± 14.7 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + + In [19]: %timeit test_hdf_fixed_read() + 14.3 ms ± 438 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) + + In [28]: %timeit test_hdf_fixed_read_compress() + 23.5 ms ± 672 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) + + In [20]: %timeit test_hdf_table_read() + 35.4 ms ± 314 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) + + In [29]: %timeit test_hdf_table_read_compress() + 42.6 ms ± 2.1 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) + + In [22]: %timeit test_csv_read() + 516 ms ± 27.1 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + + In [33]: %timeit test_feather_read() + 4.06 ms ± 115 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) + + In [34]: %timeit test_pickle_read() + 6.5 ms ± 172 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) + + In [35]: %timeit test_pickle_read_compress() + 588 ms ± 3.57 ms per loop (mean ± std. dev. of 7 runs, 1 loop each) + +Space on disk (in bytes) + +.. code-block:: none + + 34816000 Aug 21 18:00 test.sql + 24009240 Aug 21 18:00 test_fixed.hdf + 7919610 Aug 21 18:00 test_fixed_compress.hdf + 24458892 Aug 21 18:00 test_table.hdf + 8657116 Aug 21 18:00 test_table_compress.hdf + 28520770 Aug 21 18:00 test.csv + 16000248 Aug 21 18:00 test.feather + 16000848 Aug 21 18:00 test.pkl + 7554108 Aug 21 18:00 test.pkl.compress diff --git a/setup.cfg b/setup.cfg index b9994e9ea0b2c..1d26bcab28ef0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -69,24 +69,17 @@ exclude = doc/source/advanced.rst doc/source/basics.rst doc/source/categorical.rst - doc/source/comparison_with_r.rst - doc/source/comparison_with_sql.rst - doc/source/comparison_with_stata.rst - doc/source/computation.rst doc/source/contributing_docstring.rst doc/source/dsintro.rst doc/source/enhancingperf.rst doc/source/extending.rst doc/source/groupby.rst doc/source/indexing.rst - doc/source/io.rst doc/source/merging.rst doc/source/missing_data.rst doc/source/options.rst doc/source/release.rst doc/source/reshaping.rst - doc/source/timedeltas.rst - doc/source/timeseries.rst doc/source/visualization.rst
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Removing from the list of ignored files in flake8-rst, the pages that have been fixed before the ignore list was merged.
https://api.github.com/repos/pandas-dev/pandas/pulls/24051
2018-12-02T18:21:15Z
2018-12-03T12:28:56Z
2018-12-03T12:28:56Z
2018-12-03T12:29:00Z
DEPR: deprecate default of skipna=False in infer_dtype
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 3a04789b609f8..d5250bc688826 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1220,6 +1220,7 @@ Deprecations - :func:`pandas.api.types.is_datetimetz` is deprecated in favor of `pandas.api.types.is_datetime64tz` (:issue:`23917`) - Creating a :class:`TimedeltaIndex`, :class:`DatetimeIndex`, or :class:`PeriodIndex` by passing range arguments `start`, `end`, and `periods` is deprecated in favor of :func:`timedelta_range`, :func:`date_range`, or :func:`period_range` (:issue:`23919`) - Passing a string alias like ``'datetime64[ns, UTC]'`` as the ``unit`` parameter to :class:`DatetimeTZDtype` is deprecated. Use :class:`DatetimeTZDtype.construct_from_string` instead (:issue:`23990`). +- The ``skipna`` parameter of :meth:`~pandas.api.types.infer_dtype` will switch to ``True`` by default in a future version of pandas (:issue:`17066`, :issue:`24050`) - In :meth:`Series.where` with Categorical data, providing an ``other`` that is not present in the categories is deprecated. Convert the categorical to a different dtype or add the ``other`` to the categories first (:issue:`24077`). - :meth:`Series.clip_lower`, :meth:`Series.clip_upper`, :meth:`DataFrame.clip_lower` and :meth:`DataFrame.clip_upper` are deprecated and will be removed in a future version. Use ``Series.clip(lower=threshold)``, ``Series.clip(upper=threshold)`` and the equivalent ``DataFrame`` methods (:issue:`24203`) diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx index 1124000c97875..6e6d35f00725c 100644 --- a/pandas/_libs/lib.pyx +++ b/pandas/_libs/lib.pyx @@ -4,6 +4,7 @@ from fractions import Fraction from numbers import Number import sys +import warnings import cython from cython import Py_ssize_t @@ -1079,7 +1080,7 @@ cdef _try_infer_map(v): return None -def infer_dtype(value: object, skipna: bool=False) -> str: +def infer_dtype(value: object, skipna: object=None) -> str: """ Efficiently infer the type of a passed val, or list-like array of values. Return a string describing the type. @@ -1088,8 +1089,7 @@ def infer_dtype(value: object, skipna: bool=False) -> str: ---------- value : scalar, list, ndarray, or pandas type skipna : bool, default False - Ignore NaN values when inferring the type. The default of ``False`` - will be deprecated in a later version of pandas. + Ignore NaN values when inferring the type. .. versionadded:: 0.21.0 @@ -1186,6 +1186,12 @@ def infer_dtype(value: object, skipna: bool=False) -> str: bint seen_pdnat = False bint seen_val = False + if skipna is None: + msg = ('A future version of pandas will default to `skipna=True`. To ' + 'silence this warning, pass `skipna=True|False` explicitly.') + warnings.warn(msg, FutureWarning, stacklevel=2) + skipna = False + if util.is_array(value): values = value elif hasattr(value, 'dtype'): diff --git a/pandas/core/arrays/array_.py b/pandas/core/arrays/array_.py index 04842d82fca5d..9b2240eb62906 100644 --- a/pandas/core/arrays/array_.py +++ b/pandas/core/arrays/array_.py @@ -209,7 +209,7 @@ def array(data, # type: Sequence[object] return cls._from_sequence(data, dtype=dtype, copy=copy) if dtype is None: - inferred_dtype = lib.infer_dtype(data) + inferred_dtype = lib.infer_dtype(data, skipna=False) if inferred_dtype == 'period': try: return period_array(data, copy=copy) diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index 191cd5d63eea3..7861a122afdb6 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -962,8 +962,8 @@ def _maybe_coerce_merge_keys(self): # object values are allowed to be merged elif ((lk_is_object and is_numeric_dtype(rk)) or (is_numeric_dtype(lk) and rk_is_object)): - inferred_left = lib.infer_dtype(lk) - inferred_right = lib.infer_dtype(rk) + inferred_left = lib.infer_dtype(lk, skipna=False) + inferred_right = lib.infer_dtype(rk, skipna=False) bool_types = ['integer', 'mixed-integer', 'boolean', 'empty'] string_types = ['string', 'unicode', 'mixed', 'bytes', 'empty'] diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index cc2aa64b98c8b..f58cb362cd6d2 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -334,11 +334,11 @@ def test_infer_dtype_bytes(self): # string array of bytes arr = np.array(list('abc'), dtype='S1') - assert lib.infer_dtype(arr, skipna=False) == compare + assert lib.infer_dtype(arr, skipna=True) == compare # object array of bytes arr = arr.astype(object) - assert lib.infer_dtype(arr, skipna=False) == compare + assert lib.infer_dtype(arr, skipna=True) == compare # object array of bytes with missing values assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare @@ -538,32 +538,40 @@ def test_length_zero(self, skipna): def test_integers(self): arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O') - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'integer' arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O') - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'mixed-integer' arr = np.array([1, 2, 3, 4, 5], dtype='i4') - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'integer' + def test_deprecation(self): + # GH 24050 + arr = np.array([1, 2, 3], dtype=object) + + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = lib.infer_dtype(arr) # default: skipna=None -> warn + assert result == 'integer' + def test_bools(self): arr = np.array([True, False, True, True, True], dtype='O') - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'boolean' arr = np.array([np.bool_(True), np.bool_(False)], dtype='O') - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'boolean' arr = np.array([True, False, True, 'foo'], dtype='O') - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'mixed' arr = np.array([True, False, True], dtype=bool) - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'boolean' arr = np.array([True, np.nan, False], dtype='O') @@ -575,38 +583,38 @@ def test_bools(self): def test_floats(self): arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O') - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'floating' arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'], dtype='O') - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'mixed-integer' arr = np.array([1, 2, 3, 4, 5], dtype='f4') - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'floating' arr = np.array([1, 2, 3, 4, 5], dtype='f8') - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'floating' def test_decimals(self): # GH15690 arr = np.array([Decimal(1), Decimal(2), Decimal(3)]) - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'decimal' arr = np.array([1.0, 2.0, Decimal(3)]) - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'mixed' arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)]) - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'decimal' arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O') - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'decimal' def test_string(self): @@ -648,34 +656,34 @@ def test_infer_dtype_datetime(self): arr = np.array([Timestamp('2011-01-01'), Timestamp('2011-01-02')]) - assert lib.infer_dtype(arr, skipna=False) == 'datetime' + assert lib.infer_dtype(arr, skipna=True) == 'datetime' arr = np.array([np.datetime64('2011-01-01'), np.datetime64('2011-01-01')], dtype=object) - assert lib.infer_dtype(arr, skipna=False) == 'datetime64' + assert lib.infer_dtype(arr, skipna=True) == 'datetime64' arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)]) - assert lib.infer_dtype(arr, skipna=False) == 'datetime' + assert lib.infer_dtype(arr, skipna=True) == 'datetime' # starts with nan for n in [pd.NaT, np.nan]: arr = np.array([n, pd.Timestamp('2011-01-02')]) - assert lib.infer_dtype(arr, skipna=False) == 'datetime' + assert lib.infer_dtype(arr, skipna=True) == 'datetime' arr = np.array([n, np.datetime64('2011-01-02')]) - assert lib.infer_dtype(arr, skipna=False) == 'datetime64' + assert lib.infer_dtype(arr, skipna=True) == 'datetime64' arr = np.array([n, datetime(2011, 1, 1)]) - assert lib.infer_dtype(arr, skipna=False) == 'datetime' + assert lib.infer_dtype(arr, skipna=True) == 'datetime' arr = np.array([n, pd.Timestamp('2011-01-02'), n]) - assert lib.infer_dtype(arr, skipna=False) == 'datetime' + assert lib.infer_dtype(arr, skipna=True) == 'datetime' arr = np.array([n, np.datetime64('2011-01-02'), n]) - assert lib.infer_dtype(arr, skipna=False) == 'datetime64' + assert lib.infer_dtype(arr, skipna=True) == 'datetime64' arr = np.array([n, datetime(2011, 1, 1), n]) - assert lib.infer_dtype(arr, skipna=False) == 'datetime' + assert lib.infer_dtype(arr, skipna=True) == 'datetime' # different type of nat arr = np.array([np.timedelta64('nat'), @@ -689,58 +697,58 @@ def test_infer_dtype_datetime(self): # mixed datetime arr = np.array([datetime(2011, 1, 1), pd.Timestamp('2011-01-02')]) - assert lib.infer_dtype(arr, skipna=False) == 'datetime' + assert lib.infer_dtype(arr, skipna=True) == 'datetime' # should be datetime? arr = np.array([np.datetime64('2011-01-01'), pd.Timestamp('2011-01-02')]) - assert lib.infer_dtype(arr, skipna=False) == 'mixed' + assert lib.infer_dtype(arr, skipna=True) == 'mixed' arr = np.array([pd.Timestamp('2011-01-02'), np.datetime64('2011-01-01')]) - assert lib.infer_dtype(arr, skipna=False) == 'mixed' + assert lib.infer_dtype(arr, skipna=True) == 'mixed' arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1]) - assert lib.infer_dtype(arr, skipna=False) == 'mixed-integer' + assert lib.infer_dtype(arr, skipna=True) == 'mixed-integer' arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1]) - assert lib.infer_dtype(arr, skipna=False) == 'mixed' + assert lib.infer_dtype(arr, skipna=True) == 'mixed' arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')]) - assert lib.infer_dtype(arr, skipna=False) == 'mixed' + assert lib.infer_dtype(arr, skipna=True) == 'mixed' def test_infer_dtype_timedelta(self): arr = np.array([pd.Timedelta('1 days'), pd.Timedelta('2 days')]) - assert lib.infer_dtype(arr, skipna=False) == 'timedelta' + assert lib.infer_dtype(arr, skipna=True) == 'timedelta' arr = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D')], dtype=object) - assert lib.infer_dtype(arr, skipna=False) == 'timedelta' + assert lib.infer_dtype(arr, skipna=True) == 'timedelta' arr = np.array([timedelta(1), timedelta(2)]) - assert lib.infer_dtype(arr, skipna=False) == 'timedelta' + assert lib.infer_dtype(arr, skipna=True) == 'timedelta' # starts with nan for n in [pd.NaT, np.nan]: arr = np.array([n, Timedelta('1 days')]) - assert lib.infer_dtype(arr, skipna=False) == 'timedelta' + assert lib.infer_dtype(arr, skipna=True) == 'timedelta' arr = np.array([n, np.timedelta64(1, 'D')]) - assert lib.infer_dtype(arr, skipna=False) == 'timedelta' + assert lib.infer_dtype(arr, skipna=True) == 'timedelta' arr = np.array([n, timedelta(1)]) - assert lib.infer_dtype(arr, skipna=False) == 'timedelta' + assert lib.infer_dtype(arr, skipna=True) == 'timedelta' arr = np.array([n, pd.Timedelta('1 days'), n]) - assert lib.infer_dtype(arr, skipna=False) == 'timedelta' + assert lib.infer_dtype(arr, skipna=True) == 'timedelta' arr = np.array([n, np.timedelta64(1, 'D'), n]) - assert lib.infer_dtype(arr, skipna=False) == 'timedelta' + assert lib.infer_dtype(arr, skipna=True) == 'timedelta' arr = np.array([n, timedelta(1), n]) - assert lib.infer_dtype(arr, skipna=False) == 'timedelta' + assert lib.infer_dtype(arr, skipna=True) == 'timedelta' # different type of nat arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')], @@ -755,19 +763,19 @@ def test_infer_dtype_period(self): # GH 13664 arr = np.array([pd.Period('2011-01', freq='D'), pd.Period('2011-02', freq='D')]) - assert lib.infer_dtype(arr, skipna=False) == 'period' + assert lib.infer_dtype(arr, skipna=True) == 'period' arr = np.array([pd.Period('2011-01', freq='D'), pd.Period('2011-02', freq='M')]) - assert lib.infer_dtype(arr, skipna=False) == 'period' + assert lib.infer_dtype(arr, skipna=True) == 'period' # starts with nan for n in [pd.NaT, np.nan]: arr = np.array([n, pd.Period('2011-01', freq='D')]) - assert lib.infer_dtype(arr, skipna=False) == 'period' + assert lib.infer_dtype(arr, skipna=True) == 'period' arr = np.array([n, pd.Period('2011-01', freq='D'), n]) - assert lib.infer_dtype(arr, skipna=False) == 'period' + assert lib.infer_dtype(arr, skipna=True) == 'period' # different type of nat arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')], @@ -846,7 +854,7 @@ def test_infer_datetimelike_array_nan_nat_like(self, first, second, def test_infer_dtype_all_nan_nat_like(self): arr = np.array([np.nan, np.nan]) - assert lib.infer_dtype(arr, skipna=False) == 'floating' + assert lib.infer_dtype(arr, skipna=True) == 'floating' # nan and None mix are result in mixed arr = np.array([np.nan, np.nan, None]) @@ -1043,17 +1051,17 @@ def test_categorical(self): # GH 8974 from pandas import Categorical, Series arr = Categorical(list('abc')) - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'categorical' - result = lib.infer_dtype(Series(arr), skipna=False) + result = lib.infer_dtype(Series(arr), skipna=True) assert result == 'categorical' arr = Categorical(list('abc'), categories=['cegfab'], ordered=True) - result = lib.infer_dtype(arr, skipna=False) + result = lib.infer_dtype(arr, skipna=True) assert result == 'categorical' - result = lib.infer_dtype(Series(arr), skipna=False) + result = lib.infer_dtype(Series(arr), skipna=True) assert result == 'categorical' diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index f921d015fce3d..fa303c904440c 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -813,12 +813,12 @@ def test_constructor_with_datetime_tz(self): s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'), pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')]) assert s.dtype == 'datetime64[ns, US/Pacific]' - assert lib.infer_dtype(s, skipna=False) == 'datetime64' + assert lib.infer_dtype(s, skipna=True) == 'datetime64' s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'), pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')]) assert s.dtype == 'object' - assert lib.infer_dtype(s, skipna=False) == 'datetime' + assert lib.infer_dtype(s, skipna=True) == 'datetime' # with all NaT s = Series(pd.NaT, index=[0, 1], dtype='datetime64[ns, US/Eastern]')
- [x] follows up on the changed in #17066 - [x] tests adapted / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I know that `v.0.22` is generally not counted towards the "3 major releases between warning and deprecation" policy, but since this is a private method, I'm thinking it should be OK? Would make life easier in a couple of places (especially in cleaning up some of the casting code).
https://api.github.com/repos/pandas-dev/pandas/pulls/24050
2018-12-02T18:16:04Z
2019-01-04T12:15:24Z
2019-01-04T12:15:24Z
2019-01-04T16:58:42Z
REF: array_to_datetime catch overflows in one place
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 1205ebbe311e2..86d842d5e2678 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -526,182 +526,157 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise', for i in range(n): val = values[i] - if checknull_with_nat(val): - iresult[i] = NPY_NAT + try: + if checknull_with_nat(val): + iresult[i] = NPY_NAT - elif PyDateTime_Check(val): - seen_datetime = 1 - if val.tzinfo is not None: - if utc_convert: - try: + elif PyDateTime_Check(val): + seen_datetime = 1 + if val.tzinfo is not None: + if utc_convert: _ts = convert_datetime_to_tsobject(val, None) iresult[i] = _ts.value - except OutOfBoundsDatetime: - if is_coerce: - iresult[i] = NPY_NAT - continue - raise + else: + raise ValueError('Tz-aware datetime.datetime ' + 'cannot be converted to ' + 'datetime64 unless utc=True') else: - raise ValueError('Tz-aware datetime.datetime cannot ' - 'be converted to datetime64 unless ' - 'utc=True') - else: - iresult[i] = pydatetime_to_dt64(val, &dts) - if not PyDateTime_CheckExact(val): - # i.e. a Timestamp object - iresult[i] += val.nanosecond - try: + iresult[i] = pydatetime_to_dt64(val, &dts) + if not PyDateTime_CheckExact(val): + # i.e. a Timestamp object + iresult[i] += val.nanosecond check_dts_bounds(&dts) - except OutOfBoundsDatetime: - if is_coerce: - iresult[i] = NPY_NAT - continue - raise - elif PyDate_Check(val): - seen_datetime = 1 - iresult[i] = pydate_to_dt64(val, &dts) - try: + elif PyDate_Check(val): + seen_datetime = 1 + iresult[i] = pydate_to_dt64(val, &dts) check_dts_bounds(&dts) - except OutOfBoundsDatetime: - if is_coerce: - iresult[i] = NPY_NAT - continue - raise - elif is_datetime64_object(val): - seen_datetime = 1 - try: + elif is_datetime64_object(val): + seen_datetime = 1 iresult[i] = get_datetime64_nanos(val) - except OutOfBoundsDatetime: - if is_coerce: - iresult[i] = NPY_NAT - continue - raise - elif is_integer_object(val) or is_float_object(val): - # these must be ns unit by-definition - seen_integer = 1 + elif is_integer_object(val) or is_float_object(val): + # these must be ns unit by-definition + seen_integer = 1 - if val != val or val == NPY_NAT: - iresult[i] = NPY_NAT - elif is_raise or is_ignore: - iresult[i] = val - else: - # coerce - # we now need to parse this as if unit='ns' - # we can ONLY accept integers at this point - # if we have previously (or in future accept - # datetimes/strings, then we must coerce) - try: - iresult[i] = cast_from_unit(val, 'ns') - except: + if val != val or val == NPY_NAT: iresult[i] = NPY_NAT + elif is_raise or is_ignore: + iresult[i] = val + else: + # coerce + # we now need to parse this as if unit='ns' + # we can ONLY accept integers at this point + # if we have previously (or in future accept + # datetimes/strings, then we must coerce) + try: + iresult[i] = cast_from_unit(val, 'ns') + except: + iresult[i] = NPY_NAT - elif is_string_object(val): - # string - seen_string = 1 - - if len(val) == 0 or val in nat_strings: - iresult[i] = NPY_NAT - continue - if isinstance(val, unicode) and PY2: - val = val.encode('utf-8') + elif is_string_object(val): + # string + seen_string = 1 - try: - _string_to_dts(val, &dts, &out_local, &out_tzoffset) - except ValueError: - # A ValueError at this point is a _parsing_ error - # specifically _not_ OutOfBoundsDatetime - if _parse_today_now(val, &iresult[i]): + if len(val) == 0 or val in nat_strings: + iresult[i] = NPY_NAT continue - elif require_iso8601: - # if requiring iso8601 strings, skip trying - # other formats - if is_coerce: - iresult[i] = NPY_NAT - continue - elif is_raise: - raise ValueError("time data {val} doesn't match " - "format specified" - .format(val=val)) - return values, tz_out + if isinstance(val, unicode) and PY2: + val = val.encode('utf-8') try: - py_dt = parse_datetime_string(val, dayfirst=dayfirst, - yearfirst=yearfirst) - except Exception: - if is_coerce: - iresult[i] = NPY_NAT + _string_to_dts(val, &dts, &out_local, &out_tzoffset) + except ValueError: + # A ValueError at this point is a _parsing_ error + # specifically _not_ OutOfBoundsDatetime + if _parse_today_now(val, &iresult[i]): continue - raise TypeError("invalid string coercion to datetime") - - # If the dateutil parser returned tzinfo, capture it - # to check if all arguments have the same tzinfo - tz = py_dt.utcoffset() - if tz is not None: - seen_datetime_offset = 1 - # dateutil timezone objects cannot be hashed, so store - # the UTC offsets in seconds instead - out_tzoffset_vals.add(tz.total_seconds()) - else: - # Add a marker for naive string, to track if we are - # parsing mixed naive and aware strings - out_tzoffset_vals.add('naive') - try: + elif require_iso8601: + # if requiring iso8601 strings, skip trying + # other formats + if is_coerce: + iresult[i] = NPY_NAT + continue + elif is_raise: + raise ValueError("time data {val} doesn't " + "match format specified" + .format(val=val)) + return values, tz_out + + try: + py_dt = parse_datetime_string(val, + dayfirst=dayfirst, + yearfirst=yearfirst) + except Exception: + if is_coerce: + iresult[i] = NPY_NAT + continue + raise TypeError("invalid string coercion to " + "datetime") + + # If the dateutil parser returned tzinfo, capture it + # to check if all arguments have the same tzinfo + tz = py_dt.utcoffset() + if tz is not None: + seen_datetime_offset = 1 + # dateutil timezone objects cannot be hashed, so + # store the UTC offsets in seconds instead + out_tzoffset_vals.add(tz.total_seconds()) + else: + # Add a marker for naive string, to track if we are + # parsing mixed naive and aware strings + out_tzoffset_vals.add('naive') + _ts = convert_datetime_to_tsobject(py_dt, None) iresult[i] = _ts.value - except OutOfBoundsDatetime: + except: + # TODO: What exception are we concerned with here? if is_coerce: iresult[i] = NPY_NAT continue raise - except: - # TODO: What exception are we concerned with here? + else: + # No error raised by string_to_dts, pick back up + # where we left off + value = dtstruct_to_dt64(&dts) + if out_local == 1: + seen_datetime_offset = 1 + # Store the out_tzoffset in seconds + # since we store the total_seconds of + # dateutil.tz.tzoffset objects + out_tzoffset_vals.add(out_tzoffset * 60.) + tz = pytz.FixedOffset(out_tzoffset) + value = tz_convert_single(value, tz, UTC) + else: + # Add a marker for naive string, to track if we are + # parsing mixed naive and aware strings + out_tzoffset_vals.add('naive') + iresult[i] = value + check_dts_bounds(&dts) + + else: if is_coerce: iresult[i] = NPY_NAT - continue - raise - else: - # No error raised by string_to_dts, pick back up - # where we left off - value = dtstruct_to_dt64(&dts) - if out_local == 1: - seen_datetime_offset = 1 - # Store the out_tzoffset in seconds - # since we store the total_seconds of - # dateutil.tz.tzoffset objects - out_tzoffset_vals.add(out_tzoffset * 60.) - tz = pytz.FixedOffset(out_tzoffset) - value = tz_convert_single(value, tz, UTC) else: - # Add a marker for naive string, to track if we are - # parsing mixed naive and aware strings - out_tzoffset_vals.add('naive') - iresult[i] = value - try: - check_dts_bounds(&dts) - except OutOfBoundsDatetime: - # GH#19382 for just-barely-OutOfBounds falling back to - # dateutil parser will return incorrect result because - # it will ignore nanoseconds - if is_coerce: - iresult[i] = NPY_NAT - continue - elif require_iso8601: - if is_raise: - raise ValueError("time data {val} doesn't " - "match format specified" - .format(val=val)) - return values, tz_out - raise + raise TypeError("{typ} is not convertible to datetime" + .format(typ=type(val))) - else: + except OutOfBoundsDatetime: if is_coerce: iresult[i] = NPY_NAT - else: - raise TypeError("{typ} is not convertible to datetime" - .format(typ=type(val))) + continue + elif require_iso8601 and is_string_object(val): + # GH#19382 for just-barely-OutOfBounds falling back to + # dateutil parser will return incorrect result because + # it will ignore nanoseconds + if is_raise: + raise ValueError("time data {val} doesn't " + "match format specified" + .format(val=val)) + assert is_ignore + return values, tz_out + raise if seen_datetime and seen_integer: # we have mixed datetimes & integers
The diff is big, but all this is doing is changing: ``` if A: try: foo(...) except OutOfBoundsDatetime: handle(...) elif B: try: bar(...) except OutOfBoundsDatetime: handle(...) ... ``` to: ``` try: if A: foo(...) elif B: bar(...) ... except OutOfBoundsDatetime: handle(...) ... ``` Orthogonal to #24032, will need rebasing following #24031. All three of these should go before #24006.
https://api.github.com/repos/pandas-dev/pandas/pulls/24049
2018-12-02T16:16:37Z
2018-12-02T21:36:20Z
2018-12-02T21:36:20Z
2018-12-02T21:39:58Z
Deprecate series.nonzero (GH18262)
diff --git a/doc/source/api/series.rst b/doc/source/api/series.rst index 1631f04b1c72f..aa43c8b643d44 100644 --- a/doc/source/api/series.rst +++ b/doc/source/api/series.rst @@ -188,7 +188,6 @@ Computations / Descriptive Stats Series.is_monotonic_decreasing Series.value_counts Series.compound - Series.nonzero Reindexing / Selection / Label manipulation ------------------------------------------- diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 4ff1f96777b1c..d288ef8bf8449 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1224,6 +1224,7 @@ Deprecations - The ``skipna`` parameter of :meth:`~pandas.api.types.infer_dtype` will switch to ``True`` by default in a future version of pandas (:issue:`17066`, :issue:`24050`) - In :meth:`Series.where` with Categorical data, providing an ``other`` that is not present in the categories is deprecated. Convert the categorical to a different dtype or add the ``other`` to the categories first (:issue:`24077`). - :meth:`Series.clip_lower`, :meth:`Series.clip_upper`, :meth:`DataFrame.clip_lower` and :meth:`DataFrame.clip_upper` are deprecated and will be removed in a future version. Use ``Series.clip(lower=threshold)``, ``Series.clip(upper=threshold)`` and the equivalent ``DataFrame`` methods (:issue:`24203`) +- :meth:`Series.nonzero` is deprecated and will be removed in a future version (:issue:`18262`) .. _whatsnew_0240.deprecations.datetimelike_int_ops: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index aa78fc6c8e731..7bbbdd70e062e 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -4589,7 +4589,7 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None, else: raise TypeError('must specify how or thresh') - result = self._take(mask.nonzero()[0], axis=axis) + result = self.loc(axis=axis)[mask] if inplace: self._update_inplace(result) @@ -4624,7 +4624,7 @@ def drop_duplicates(self, subset=None, keep='first', inplace=False): duplicated = self.duplicated(subset, keep=keep) if inplace: - inds, = (-duplicated).nonzero() + inds, = (-duplicated)._ndarray_values.nonzero() new_data = self._data.take(inds) self._update_inplace(new_data) else: diff --git a/pandas/core/series.py b/pandas/core/series.py index 5722fcb1b67a5..eb412add7bbbb 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -540,6 +540,9 @@ def nonzero(self): """ Return the *integer* indices of the elements that are non-zero. + .. deprecated:: 0.24.0 + Please use .to_numpy().nonzero() as a replacement. + This method is equivalent to calling `numpy.nonzero` on the series data. For compatibility with NumPy, the return value is the same (a tuple with an array of indices for each dimension), @@ -569,6 +572,10 @@ def nonzero(self): d 4 dtype: int64 """ + msg = ("Series.nonzero() is deprecated " + "and will be removed in a future version." + "Use Series.to_numpy().nonzero() instead") + warnings.warn(msg, FutureWarning, stacklevel=2) return self._values.nonzero() def put(self, *args, **kwargs): diff --git a/pandas/io/stata.py b/pandas/io/stata.py index aad57fc489fb6..b5e7eb24465f5 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -1629,7 +1629,8 @@ def _do_convert_missing(self, data, convert_missing): continue if convert_missing: # Replacement follows Stata notation - missing_loc = np.argwhere(missing) + + missing_loc = np.argwhere(missing._ndarray_values) umissing, umissing_loc = np.unique(series[missing], return_inverse=True) replacement = Series(series, dtype=np.object) diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index b877ed93f07a2..f113140261aea 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -2168,7 +2168,7 @@ def test_reindex_level(self): def verify_first_level(df, level, idx, check_index_type=True): def f(val): - return np.nonzero(df[level] == val)[0] + return np.nonzero((df[level] == val).to_numpy())[0] i = np.concatenate(list(map(f, idx))) left = df.set_index(icol).reindex(idx, level=level) right = df.iloc[i].set_index(icol) diff --git a/pandas/tests/series/test_missing.py b/pandas/tests/series/test_missing.py index ffd21fb449864..90ef465c5f239 100644 --- a/pandas/tests/series/test_missing.py +++ b/pandas/tests/series/test_missing.py @@ -1324,3 +1324,9 @@ def test_series_interpolate_intraday(self): result = ts.reindex(new_index).interpolate(method='time') tm.assert_numpy_array_equal(result.values, exp.values) + + def test_nonzero_warning(self): + # GH 24048 + ser = pd.Series([1, 0, 3, 4]) + with tm.assert_produces_warning(FutureWarning): + ser.nonzero()
xref #18262 - passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24048
2018-12-02T15:01:14Z
2019-01-05T22:47:46Z
2019-01-05T22:47:46Z
2019-01-05T22:47:49Z
Add test for rdivmod on EA array (GH23287)
diff --git a/pandas/tests/extension/base/ops.py b/pandas/tests/extension/base/ops.py index cd5e55d9871b2..2ac68c52d53c7 100644 --- a/pandas/tests/extension/base/ops.py +++ b/pandas/tests/extension/base/ops.py @@ -91,10 +91,16 @@ def test_divmod(self, data): self._check_divmod_op(s, divmod, 1, exc=self.divmod_exc) self._check_divmod_op(1, ops.rdivmod, s, exc=self.divmod_exc) - def test_divmod_series_array(self, data): + def test_divmod_series_array(self, data, data_for_twos): s = pd.Series(data) self._check_divmod_op(s, divmod, data) + other = data_for_twos + self._check_divmod_op(other, ops.rdivmod, s) + + other = pd.Series(other) + self._check_divmod_op(other, ops.rdivmod, s) + def test_add_series_with_extension_array(self, data): s = pd.Series(data) result = s + data diff --git a/pandas/tests/extension/conftest.py b/pandas/tests/extension/conftest.py index 3cc2d313b09f5..b6e839f250e4e 100644 --- a/pandas/tests/extension/conftest.py +++ b/pandas/tests/extension/conftest.py @@ -21,6 +21,12 @@ def data(): raise NotImplementedError +@pytest.fixture +def data_for_twos(): + """Length-100 array in which all the elements are two.""" + raise NotImplementedError + + @pytest.fixture def data_missing(): """Length-2 array with [NA, Valid]""" diff --git a/pandas/tests/extension/decimal/test_decimal.py b/pandas/tests/extension/decimal/test_decimal.py index 6281c5360cd03..686bd898f5171 100644 --- a/pandas/tests/extension/decimal/test_decimal.py +++ b/pandas/tests/extension/decimal/test_decimal.py @@ -23,6 +23,11 @@ def data(): return DecimalArray(make_data()) +@pytest.fixture +def data_for_twos(): + return DecimalArray([decimal.Decimal(2) for _ in range(100)]) + + @pytest.fixture def data_missing(): return DecimalArray([decimal.Decimal('NaN'), decimal.Decimal(1)]) diff --git a/pandas/tests/extension/json/test_json.py b/pandas/tests/extension/json/test_json.py index 9ee131950f19c..8c7e99b7d0cc5 100644 --- a/pandas/tests/extension/json/test_json.py +++ b/pandas/tests/extension/json/test_json.py @@ -290,6 +290,11 @@ def test_add_series_with_extension_array(self, data): with pytest.raises(TypeError, match="unsupported"): ser + data + def test_divmod_series_array(self): + # GH 23287 + # skipping because it is not implemented + pass + def _check_divmod_op(self, s, op, other, exc=NotImplementedError): return super(TestArithmeticOps, self)._check_divmod_op( s, op, other, exc=TypeError diff --git a/pandas/tests/extension/test_categorical.py b/pandas/tests/extension/test_categorical.py index ac52d8f15b8ce..9871d0d8f96f5 100644 --- a/pandas/tests/extension/test_categorical.py +++ b/pandas/tests/extension/test_categorical.py @@ -214,6 +214,11 @@ def test_add_series_with_extension_array(self, data): with pytest.raises(TypeError, match="cannot perform"): ser + data + def test_divmod_series_array(self): + # GH 23287 + # skipping because it is not implemented + pass + def _check_divmod_op(self, s, op, other, exc=NotImplementedError): return super(TestArithmeticOps, self)._check_divmod_op( s, op, other, exc=TypeError diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py index 00ad35bf6a924..e3fdd0db3e8b4 100644 --- a/pandas/tests/extension/test_datetime.py +++ b/pandas/tests/extension/test_datetime.py @@ -147,6 +147,11 @@ def test_arith_series_with_array(self, data, all_arithmetic_operators): def test_error(self, data, all_arithmetic_operators): pass + def test_divmod_series_array(self): + # GH 23287 + # skipping because it is not implemented + pass + @pytest.mark.xfail(reason="different implementation", strict=False) def test_direct_arith_with_series_returns_not_implemented(self, data): # Right now, we have trouble with this. Returning NotImplemented diff --git a/pandas/tests/extension/test_integer.py b/pandas/tests/extension/test_integer.py index aadf9f2f12b68..a8dcabbb824d5 100644 --- a/pandas/tests/extension/test_integer.py +++ b/pandas/tests/extension/test_integer.py @@ -42,6 +42,11 @@ def data(dtype): return integer_array(make_data(), dtype=dtype) +@pytest.fixture +def data_for_twos(dtype): + return integer_array(np.ones(100) * 2, dtype=dtype) + + @pytest.fixture def data_missing(dtype): return integer_array([np.nan, 1], dtype=dtype) diff --git a/pandas/tests/extension/test_period.py b/pandas/tests/extension/test_period.py index 813efcb5678d3..fb3c4e87abcf5 100644 --- a/pandas/tests/extension/test_period.py +++ b/pandas/tests/extension/test_period.py @@ -20,6 +20,11 @@ def data(dtype): return PeriodArray(np.arange(1970, 2070), freq=dtype.freq) +@pytest.fixture +def data_for_twos(dtype): + return PeriodArray(np.ones(100) * 2, freq=dtype.freq) + + @pytest.fixture def data_for_sorting(dtype): return PeriodArray([2018, 2019, 2017], freq=dtype.freq) diff --git a/pandas/tests/extension/test_sparse.py b/pandas/tests/extension/test_sparse.py index 146dea2b65d83..3e1186f59478f 100644 --- a/pandas/tests/extension/test_sparse.py +++ b/pandas/tests/extension/test_sparse.py @@ -34,6 +34,11 @@ def data(request): return res +@pytest.fixture +def data_for_twos(request): + return SparseArray(np.ones(100) * 2) + + @pytest.fixture(params=[0, np.nan]) def data_missing(request): """Length 2 array with [NA, Valid]"""
- closes #23287 - test added - passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/24047
2018-12-02T13:34:22Z
2019-03-20T00:05:18Z
2019-03-20T00:05:18Z
2019-03-20T00:05:21Z
BUG: all-na corner case for str.cat
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 75d2196fe0b1e..3cec48be9b7ef 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1365,6 +1365,7 @@ Strings - Bug in :meth:`Index.str.partition` was not nan-safe (:issue:`23558`). - Bug in :meth:`Index.str.split` was not nan-safe (:issue:`23677`). - Bug :func:`Series.str.contains` not respecting the ``na`` argument for a ``Categorical`` dtype ``Series`` (:issue:`22158`) +- Bug in :meth:`Index.str.cat` when the result contained only ``NaN`` (:issue:`24044`) Interval ^^^^^^^^ diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 0b791f6f91aa3..995700e79cb50 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -2260,9 +2260,11 @@ def cat(self, others=None, sep=None, na_rep=None, join=None): result = cat_core(all_cols, sep) if isinstance(self._orig, Index): - result = Index(result, name=self._orig.name) + # add dtype for case that result is all-NA + result = Index(result, dtype=object, name=self._orig.name) else: # Series - result = Series(result, index=data.index, name=self._orig.name) + result = Series(result, dtype=object, index=data.index, + name=self._orig.name) return result _shared_docs['str_split'] = (""" diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 117984ce89743..ced8d37678b8b 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -630,11 +630,31 @@ def test_str_cat_align_mixed_inputs(self, join): with pytest.raises(ValueError, match=rgx): s.str.cat([t, z], join=join) - def test_str_cat_raises(self): - # non-strings hiding behind object dtype - s = Series([1, 2, 3, 4], dtype='object') - with pytest.raises(TypeError, match="unsupported operand type.*"): - s.str.cat(s) + @pytest.mark.parametrize('box', [Series, Index]) + @pytest.mark.parametrize('other', [Series, Index]) + def test_str_cat_all_na(self, box, other): + # GH 24044 + + # check that all NaNs in caller / target work + s = Index(['a', 'b', 'c', 'd']) + s = s if box == Index else Series(s, index=s) + t = other([np.nan] * 4, dtype=object) + # add index of s for alignment + t = t if other == Index else Series(t, index=s) + + # all-NA target + if box == Series: + expected = Series([np.nan] * 4, index=s.index, dtype=object) + else: # box == Index + expected = Index([np.nan] * 4, dtype=object) + result = s.str.cat(t, join='left') + assert_series_or_index_equal(result, expected) + + # all-NA caller (only for Series) + if other == Series: + expected = Series([np.nan] * 4, dtype=object, index=t.index) + result = t.str.cat(s, join='left') + tm.assert_series_equal(result, expected) def test_str_cat_special_cases(self): s = Series(['a', 'b', 'c', 'd'])
- [x] closes #24044, split off from #23167 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24045
2018-12-02T11:56:17Z
2018-12-02T20:58:49Z
2018-12-02T20:58:49Z
2018-12-02T23:14:53Z
CLN: Adjust cdef types to fix MacPython 32 bit build
diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 67c2793e4bcef..65f3894cb631a 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -887,8 +887,8 @@ def tz_localize_to_utc(ndarray[int64_t] vals, object tz, object ambiguous=None, int64_t *tdata int64_t v, left, right, val, v_left, v_right, new_local, remaining_mins int64_t HOURS_NS = HOUR_SECONDS * 1000000000 - ndarray[int64_t] trans, result, result_a, result_b, dst_hours - ndarray[int64_t] trans_idx, grp, delta, a_idx, b_idx, one_diff + ndarray[int64_t] trans, result, result_a, result_b, dst_hours, delta + ndarray trans_idx, grp, a_idx, b_idx, one_diff npy_datetimestruct dts bint infer_dst = False, is_dst = False, fill = False bint shift = False, fill_nonexist = False
- [x] closes https://github.com/pandas-dev/pandas/pull/23984#issuecomment-443179198 - [ ] tests added / passed Removing the types from these array declarations because the MacPython 32 bit build was failing. cc @jreback
https://api.github.com/repos/pandas-dev/pandas/pulls/24041
2018-12-02T04:26:55Z
2018-12-02T16:39:32Z
2018-12-02T16:39:32Z
2018-12-02T21:51:44Z
REF: Refactor Datetimelike delegation
diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 0dedd8fe1cf4b..db1369fe41911 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -2,6 +2,7 @@ """ Base and utility classes for tseries type pandas objects. """ +import operator import warnings import numpy as np @@ -19,6 +20,7 @@ from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries from pandas.core import algorithms, ops +from pandas.core.accessor import PandasDelegate from pandas.core.arrays.datetimelike import ( DatetimeLikeArrayMixin, _ensure_datetimelike_to_i8) import pandas.core.indexes.base as ibase @@ -637,3 +639,48 @@ def f(self): f.__name__ = fget.__name__ f.__doc__ = fget.__doc__ return property(f) + + +class DatetimelikeDelegateMixin(PandasDelegate): + """ + Delegation mechanism, specific for Datetime, Timedelta, and Period types. + + Functionality is delegated from the Index class to an Array class. A + few things can be customized + + * _delegate_class : type + The class being delegated to. + * _delegated_methods, delegated_properties : List + The list of property / method names being delagated. + * raw_methods : Set + The set of methods whose results should should *not* be + boxed in an index, after being returned from the array + * raw_properties : Set + The set of properties whose results should should *not* be + boxed in an index, after being returned from the array + """ + # raw_methods : dispatch methods that shouldn't be boxed in an Index + _raw_methods = set() + # raw_properties : dispatch properties that shouldn't be boxed in an Index + _raw_properties = set() + name = None + _data = None + + @property + def _delegate_class(self): + raise AbstractMethodError + + def _delegate_property_get(self, name, *args, **kwargs): + result = getattr(self._data, name) + if name not in self._raw_properties: + result = Index(result, name=self.name) + return result + + def _delegate_property_set(self, name, value, *args, **kwargs): + setattr(self._data, name, value) + + def _delegate_method(self, name, *args, **kwargs): + result = operator.methodcaller(name, *args, **kwargs)(self._data) + if name not in self._raw_methods: + result = Index(result, name=self.name) + return result diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 3cdefb02ef8b3..71f55f9021eac 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -1,6 +1,5 @@ # pylint: disable=E1101,E1103,W0232 from datetime import datetime, timedelta -import operator import warnings import numpy as np @@ -18,15 +17,16 @@ from pandas import compat from pandas.core import common as com -from pandas.core.accessor import PandasDelegate, delegate_names +from pandas.core.accessor import delegate_names from pandas.core.algorithms import unique1d import pandas.core.arrays.datetimelike as dtl +from pandas.core.arrays.datetimelike import DatelikeOps from pandas.core.arrays.period import PeriodArray, period_array from pandas.core.base import _shared_docs import pandas.core.indexes.base as ibase from pandas.core.indexes.base import _index_shared_docs, ensure_index from pandas.core.indexes.datetimelike import ( - DatetimeIndexOpsMixin, wrap_arithmetic_op) + DatetimeIndexOpsMixin, DatetimelikeDelegateMixin, wrap_arithmetic_op) from pandas.core.indexes.datetimes import DatetimeIndex, Index, Int64Index from pandas.core.missing import isna from pandas.core.ops import get_op_result_name @@ -54,37 +54,26 @@ def _new_PeriodIndex(cls, **d): return cls(values, **d) -class PeriodDelegateMixin(PandasDelegate): +class PeriodDelegateMixin(DatetimelikeDelegateMixin): """ Delegate from PeriodIndex to PeriodArray. """ - def _delegate_property_get(self, name, *args, **kwargs): - result = getattr(self._data, name) - box_ops = ( - set(PeriodArray._datetimelike_ops) - set(PeriodArray._bool_ops) - ) - if name in box_ops: - result = Index(result, name=self.name) - return result - - def _delegate_property_set(self, name, value, *args, **kwargs): - setattr(self._data, name, value) - - def _delegate_method(self, name, *args, **kwargs): - result = operator.methodcaller(name, *args, **kwargs)(self._data) - return Index(result, name=self.name) + _delegate_class = PeriodArray + _delegated_properties = PeriodArray._datetimelike_ops + _delegated_methods = ( + set(PeriodArray._datetimelike_methods) | {'_addsub_int_array'} + ) + _raw_properties = {'is_leap_year'} @delegate_names(PeriodArray, - PeriodArray._datetimelike_ops + ['size', 'asi8', 'shape'], + PeriodDelegateMixin._delegated_properties, typ='property') @delegate_names(PeriodArray, - [x for x in PeriodArray._datetimelike_methods - if x not in {"asfreq", "to_timestamp"}], - typ="method", - overwrite=True) -class PeriodIndex(DatetimeIndexOpsMixin, - Int64Index, PeriodDelegateMixin): + PeriodDelegateMixin._delegated_methods, + typ="method") +class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index, + PeriodDelegateMixin): """ Immutable ndarray holding ordinal values indicating regular periods in time such as particular years, quarters, months, etc. @@ -349,21 +338,6 @@ def _maybe_box_as_values(self, values, **attribs): freq = attribs['freq'] return PeriodArray(values, freq=freq) - # ------------------------------------------------------------------------ - # Dispatch and maybe box. Not done in delegate_names because we box - # different from those (which use Index). - - def asfreq(self, freq=None, how='E'): - result = self._data.asfreq(freq=freq, how=how) - return self._simple_new(result, name=self.name) - - def to_timestamp(self, freq=None, how='start'): - from pandas import DatetimeIndex - result = self._data.to_timestamp(freq=freq, how=how) - return DatetimeIndex._simple_new(result.asi8, - name=self.name, - freq=result.freq) - def _maybe_convert_timedelta(self, other): """ Convert timedelta-like input to an integer multiple of self.freq
This is a generalization of PeriodIndex's dispatching to PeriodArray, without any actual changes yet. This is split from #24024, where DatetimeIndex and TimedeltaIndex will implement and inherit from delgates similiar to PeriodDelegateMixin.
https://api.github.com/repos/pandas-dev/pandas/pulls/24039
2018-12-01T20:32:14Z
2018-12-03T13:36:14Z
2018-12-03T13:36:14Z
2018-12-03T13:36:30Z
REF: Refactor Date/TimeLikeOps
diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index 83ee335aa5465..a6f254c79fb51 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -10,11 +10,12 @@ from pandas._libs.tslibs.period import ( DIFFERENT_FREQ_INDEX, IncompatibleFrequency, Period) from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds -from pandas._libs.tslibs.timestamps import maybe_integer_op_deprecated +from pandas._libs.tslibs.timestamps import ( + RoundTo, maybe_integer_op_deprecated, round_nsint64) import pandas.compat as compat from pandas.errors import ( AbstractMethodError, NullFrequencyError, PerformanceWarning) -from pandas.util._decorators import deprecate_kwarg +from pandas.util._decorators import Appender, deprecate_kwarg from pandas.core.dtypes.common import ( is_bool_dtype, is_datetime64_any_dtype, is_datetime64_dtype, @@ -80,6 +81,189 @@ def _get_attributes_dict(self): return {k: getattr(self, k, None) for k in self._attributes} +class DatelikeOps(object): + """ + Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex. + """ + + def strftime(self, date_format): + from pandas import Index + return Index(self.format(date_format=date_format), + dtype=compat.text_type) + strftime.__doc__ = """ + Convert to Index using specified date_format. + + Return an Index of formatted strings specified by date_format, which + supports the same string format as the python standard library. Details + of the string format can be found in `python string format doc <{0}>`__ + + Parameters + ---------- + date_format : str + Date format string (e.g. "%Y-%m-%d"). + + Returns + ------- + Index + Index of formatted strings + + See Also + -------- + to_datetime : Convert the given argument to datetime. + DatetimeIndex.normalize : Return DatetimeIndex with times to midnight. + DatetimeIndex.round : Round the DatetimeIndex to the specified freq. + DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq. + + Examples + -------- + >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"), + ... periods=3, freq='s') + >>> rng.strftime('%B %d, %Y, %r') + Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM', + 'March 10, 2018, 09:00:02 AM'], + dtype='object') + """.format("https://docs.python.org/3/library/datetime.html" + "#strftime-and-strptime-behavior") + + +class TimelikeOps(object): + """ + Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex. + """ + + _round_doc = ( + """ + Perform {op} operation on the data to the specified `freq`. + + Parameters + ---------- + freq : str or Offset + The frequency level to {op} the index to. Must be a fixed + frequency like 'S' (second) not 'ME' (month end). See + :ref:`frequency aliases <timeseries.offset_aliases>` for + a list of possible `freq` values. + ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' + Only relevant for DatetimeIndex: + + - 'infer' will attempt to infer fall dst-transition hours based on + order + - bool-ndarray where True signifies a DST time, False designates + a non-DST time (note that this flag is only applicable for + ambiguous times) + - 'NaT' will return NaT where there are ambiguous times + - 'raise' will raise an AmbiguousTimeError if there are ambiguous + times + + .. versionadded:: 0.24.0 + nonexistent : 'shift', 'NaT', default 'raise' + A nonexistent time does not exist in a particular timezone + where clocks moved forward due to DST. + + - 'shift' will shift the nonexistent time forward to the closest + existing time + - 'NaT' will return NaT where there are nonexistent times + - 'raise' will raise an NonExistentTimeError if there are + nonexistent times + + .. versionadded:: 0.24.0 + + Returns + ------- + DatetimeIndex, TimedeltaIndex, or Series + Index of the same type for a DatetimeIndex or TimedeltaIndex, + or a Series with the same index for a Series. + + Raises + ------ + ValueError if the `freq` cannot be converted. + + Examples + -------- + **DatetimeIndex** + + >>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min') + >>> rng + DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00', + '2018-01-01 12:01:00'], + dtype='datetime64[ns]', freq='T') + """) + + _round_example = ( + """>>> rng.round('H') + DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00', + '2018-01-01 12:00:00'], + dtype='datetime64[ns]', freq=None) + + **Series** + + >>> pd.Series(rng).dt.round("H") + 0 2018-01-01 12:00:00 + 1 2018-01-01 12:00:00 + 2 2018-01-01 12:00:00 + dtype: datetime64[ns] + """) + + _floor_example = ( + """>>> rng.floor('H') + DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00', + '2018-01-01 12:00:00'], + dtype='datetime64[ns]', freq=None) + + **Series** + + >>> pd.Series(rng).dt.floor("H") + 0 2018-01-01 11:00:00 + 1 2018-01-01 12:00:00 + 2 2018-01-01 12:00:00 + dtype: datetime64[ns] + """ + ) + + _ceil_example = ( + """>>> rng.ceil('H') + DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00', + '2018-01-01 13:00:00'], + dtype='datetime64[ns]', freq=None) + + **Series** + + >>> pd.Series(rng).dt.ceil("H") + 0 2018-01-01 12:00:00 + 1 2018-01-01 12:00:00 + 2 2018-01-01 13:00:00 + dtype: datetime64[ns] + """ + ) + + def _round(self, freq, mode, ambiguous, nonexistent): + # round the local times + values = _ensure_datetimelike_to_i8(self) + result = round_nsint64(values, mode, freq) + result = self._maybe_mask_results(result, fill_value=NaT) + + attribs = self._get_attributes_dict() + attribs['freq'] = None + if 'tz' in attribs: + attribs['tz'] = None + return self._ensure_localized( + self._shallow_copy(result, **attribs), ambiguous, nonexistent + ) + + @Appender((_round_doc + _round_example).format(op="round")) + def round(self, freq, ambiguous='raise', nonexistent='raise'): + return self._round( + freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent + ) + + @Appender((_round_doc + _floor_example).format(op="floor")) + def floor(self, freq, ambiguous='raise', nonexistent='raise'): + return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent) + + @Appender((_round_doc + _ceil_example).format(op="ceil")) + def ceil(self, freq, ambiguous='raise', nonexistent='raise'): + return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent) + + class DatetimeLikeArrayMixin(ExtensionOpsMixin, AttributesMixin): """ Shared Base/Mixin class for DatetimeArray, TimedeltaArray, PeriodArray @@ -1023,3 +1207,39 @@ def validate_dtype_freq(dtype, freq): raise IncompatibleFrequency('specified freq and dtype ' 'are different') return freq + + +def _ensure_datetimelike_to_i8(other, to_utc=False): + """ + Helper for coercing an input scalar or array to i8. + + Parameters + ---------- + other : 1d array + to_utc : bool, default False + If True, convert the values to UTC before extracting the i8 values + If False, extract the i8 values directly. + + Returns + ------- + i8 1d array + """ + from pandas import Index + from pandas.core.arrays import PeriodArray + + if lib.is_scalar(other) and isna(other): + return iNaT + elif isinstance(other, (PeriodArray, ABCIndexClass)): + # convert tz if needed + if getattr(other, 'tz', None) is not None: + if to_utc: + other = other.tz_convert('UTC') + else: + other = other.tz_localize(None) + else: + try: + return np.array(other, copy=False).view('i8') + except TypeError: + # period array cannot be coerced to int + other = Index(other) + return other.asi8 diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 4d3caaacca1c1..050442c530314 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -156,7 +156,9 @@ def wrapper(self, other): return compat.set_function_name(wrapper, opname, cls) -class DatetimeArrayMixin(dtl.DatetimeLikeArrayMixin): +class DatetimeArrayMixin(dtl.DatetimeLikeArrayMixin, + dtl.TimelikeOps, + dtl.DatelikeOps): """ Assumes that subclass __new__/__init__ defines: tz diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 856a01e41ce13..6a7ce7033efa0 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -129,7 +129,7 @@ def method(self, other): return method -class TimedeltaArrayMixin(dtl.DatetimeLikeArrayMixin): +class TimedeltaArrayMixin(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps): _typ = "timedeltaarray" __array_priority__ = 1000 diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 5e25efe77d8b9..0dedd8fe1cf4b 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -7,8 +7,6 @@ import numpy as np from pandas._libs import NaT, iNaT, lib -from pandas._libs.tslibs.timestamps import RoundTo, round_nsint64 -import pandas.compat as compat from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, cache_readonly @@ -19,11 +17,10 @@ is_integer, is_integer_dtype, is_list_like, is_object_dtype, is_period_dtype, is_scalar, is_string_dtype) from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries -from pandas.core.dtypes.missing import isna from pandas.core import algorithms, ops -from pandas.core.arrays import PeriodArray -from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin +from pandas.core.arrays.datetimelike import ( + DatetimeLikeArrayMixin, _ensure_datetimelike_to_i8) import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.tools.timedeltas import to_timedelta @@ -33,188 +30,6 @@ _index_doc_kwargs = dict(ibase._index_doc_kwargs) -class DatelikeOps(object): - """ - Common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex. - """ - - def strftime(self, date_format): - return Index(self.format(date_format=date_format), - dtype=compat.text_type) - strftime.__doc__ = """ - Convert to Index using specified date_format. - - Return an Index of formatted strings specified by date_format, which - supports the same string format as the python standard library. Details - of the string format can be found in `python string format doc <{0}>`__ - - Parameters - ---------- - date_format : str - Date format string (e.g. "%Y-%m-%d"). - - Returns - ------- - Index - Index of formatted strings - - See Also - -------- - to_datetime : Convert the given argument to datetime. - DatetimeIndex.normalize : Return DatetimeIndex with times to midnight. - DatetimeIndex.round : Round the DatetimeIndex to the specified freq. - DatetimeIndex.floor : Floor the DatetimeIndex to the specified freq. - - Examples - -------- - >>> rng = pd.date_range(pd.Timestamp("2018-03-10 09:00"), - ... periods=3, freq='s') - >>> rng.strftime('%B %d, %Y, %r') - Index(['March 10, 2018, 09:00:00 AM', 'March 10, 2018, 09:00:01 AM', - 'March 10, 2018, 09:00:02 AM'], - dtype='object') - """.format("https://docs.python.org/3/library/datetime.html" - "#strftime-and-strptime-behavior") - - -class TimelikeOps(object): - """ - Common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex. - """ - - _round_doc = ( - """ - Perform {op} operation on the data to the specified `freq`. - - Parameters - ---------- - freq : str or Offset - The frequency level to {op} the index to. Must be a fixed - frequency like 'S' (second) not 'ME' (month end). See - :ref:`frequency aliases <timeseries.offset_aliases>` for - a list of possible `freq` values. - ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise' - Only relevant for DatetimeIndex: - - - 'infer' will attempt to infer fall dst-transition hours based on - order - - bool-ndarray where True signifies a DST time, False designates - a non-DST time (note that this flag is only applicable for - ambiguous times) - - 'NaT' will return NaT where there are ambiguous times - - 'raise' will raise an AmbiguousTimeError if there are ambiguous - times - - .. versionadded:: 0.24.0 - nonexistent : 'shift', 'NaT', default 'raise' - A nonexistent time does not exist in a particular timezone - where clocks moved forward due to DST. - - - 'shift' will shift the nonexistent time forward to the closest - existing time - - 'NaT' will return NaT where there are nonexistent times - - 'raise' will raise an NonExistentTimeError if there are - nonexistent times - - .. versionadded:: 0.24.0 - - Returns - ------- - DatetimeIndex, TimedeltaIndex, or Series - Index of the same type for a DatetimeIndex or TimedeltaIndex, - or a Series with the same index for a Series. - - Raises - ------ - ValueError if the `freq` cannot be converted. - - Examples - -------- - **DatetimeIndex** - - >>> rng = pd.date_range('1/1/2018 11:59:00', periods=3, freq='min') - >>> rng - DatetimeIndex(['2018-01-01 11:59:00', '2018-01-01 12:00:00', - '2018-01-01 12:01:00'], - dtype='datetime64[ns]', freq='T') - """) - - _round_example = ( - """>>> rng.round('H') - DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00', - '2018-01-01 12:00:00'], - dtype='datetime64[ns]', freq=None) - - **Series** - - >>> pd.Series(rng).dt.round("H") - 0 2018-01-01 12:00:00 - 1 2018-01-01 12:00:00 - 2 2018-01-01 12:00:00 - dtype: datetime64[ns] - """) - - _floor_example = ( - """>>> rng.floor('H') - DatetimeIndex(['2018-01-01 11:00:00', '2018-01-01 12:00:00', - '2018-01-01 12:00:00'], - dtype='datetime64[ns]', freq=None) - - **Series** - - >>> pd.Series(rng).dt.floor("H") - 0 2018-01-01 11:00:00 - 1 2018-01-01 12:00:00 - 2 2018-01-01 12:00:00 - dtype: datetime64[ns] - """ - ) - - _ceil_example = ( - """>>> rng.ceil('H') - DatetimeIndex(['2018-01-01 12:00:00', '2018-01-01 12:00:00', - '2018-01-01 13:00:00'], - dtype='datetime64[ns]', freq=None) - - **Series** - - >>> pd.Series(rng).dt.ceil("H") - 0 2018-01-01 12:00:00 - 1 2018-01-01 12:00:00 - 2 2018-01-01 13:00:00 - dtype: datetime64[ns] - """ - ) - - def _round(self, freq, mode, ambiguous, nonexistent): - # round the local times - values = _ensure_datetimelike_to_i8(self) - result = round_nsint64(values, mode, freq) - result = self._maybe_mask_results(result, fill_value=NaT) - - attribs = self._get_attributes_dict() - attribs['freq'] = None - if 'tz' in attribs: - attribs['tz'] = None - return self._ensure_localized( - self._shallow_copy(result, **attribs), ambiguous, nonexistent - ) - - @Appender((_round_doc + _round_example).format(op="round")) - def round(self, freq, ambiguous='raise', nonexistent='raise'): - return self._round( - freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent - ) - - @Appender((_round_doc + _floor_example).format(op="floor")) - def floor(self, freq, ambiguous='raise', nonexistent='raise'): - return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent) - - @Appender((_round_doc + _ceil_example).format(op="ceil")) - def ceil(self, freq, ambiguous='raise', nonexistent='raise'): - return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent) - - class DatetimeIndexOpsMixin(DatetimeLikeArrayMixin): """ common ops mixin to support a unified interface datetimelike Index """ @@ -754,39 +569,6 @@ def _time_shift(self, periods, freq=None): return result -def _ensure_datetimelike_to_i8(other, to_utc=False): - """ - Helper for coercing an input scalar or array to i8. - - Parameters - ---------- - other : 1d array - to_utc : bool, default False - If True, convert the values to UTC before extracting the i8 values - If False, extract the i8 values directly. - - Returns - ------- - i8 1d array - """ - if is_scalar(other) and isna(other): - return iNaT - elif isinstance(other, (PeriodArray, ABCIndexClass)): - # convert tz if needed - if getattr(other, 'tz', None) is not None: - if to_utc: - other = other.tz_convert('UTC') - else: - other = other.tz_localize(None) - else: - try: - return np.array(other, copy=False).view('i8') - except TypeError: - # period array cannot be coerced to int - other = Index(other) - return other.asi8 - - def wrap_arithmetic_op(self, other, result): if result is NotImplemented: return NotImplemented diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 01901d022da32..8f36096d128c2 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -31,8 +31,7 @@ import pandas.core.common as com from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.datetimelike import ( - DatelikeOps, DatetimeIndexOpsMixin, TimelikeOps, wrap_array_method, - wrap_field_accessor) + DatetimeIndexOpsMixin, wrap_array_method, wrap_field_accessor) from pandas.core.indexes.numeric import Int64Index from pandas.core.ops import get_op_result_name import pandas.core.tools.datetimes as tools @@ -62,8 +61,7 @@ def _new_DatetimeIndex(cls, d): return result -class DatetimeIndex(DatetimeArray, DatelikeOps, TimelikeOps, - DatetimeIndexOpsMixin, Int64Index): +class DatetimeIndex(DatetimeArray, DatetimeIndexOpsMixin, Int64Index): """ Immutable ndarray of datetime64 data, represented internally as int64, and which can be boxed to Timestamp objects that are subclasses of datetime and diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 56df454bddf1c..3cdefb02ef8b3 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -26,7 +26,7 @@ import pandas.core.indexes.base as ibase from pandas.core.indexes.base import _index_shared_docs, ensure_index from pandas.core.indexes.datetimelike import ( - DatelikeOps, DatetimeIndexOpsMixin, wrap_arithmetic_op) + DatetimeIndexOpsMixin, wrap_arithmetic_op) from pandas.core.indexes.datetimes import DatetimeIndex, Index, Int64Index from pandas.core.missing import isna from pandas.core.ops import get_op_result_name @@ -83,7 +83,7 @@ def _delegate_method(self, name, *args, **kwargs): if x not in {"asfreq", "to_timestamp"}], typ="method", overwrite=True) -class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, +class PeriodIndex(DatetimeIndexOpsMixin, Int64Index, PeriodDelegateMixin): """ Immutable ndarray holding ordinal values indicating regular periods in diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index ed4e43df8f41a..e33d61d29d302 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -22,7 +22,7 @@ import pandas.core.common as com from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.datetimelike import ( - DatetimeIndexOpsMixin, TimelikeOps, wrap_arithmetic_op, wrap_array_method, + DatetimeIndexOpsMixin, wrap_arithmetic_op, wrap_array_method, wrap_field_accessor) from pandas.core.indexes.numeric import Int64Index from pandas.core.ops import get_op_result_name @@ -31,8 +31,7 @@ from pandas.tseries.frequencies import to_offset -class TimedeltaIndex(TimedeltaArray, DatetimeIndexOpsMixin, - TimelikeOps, Int64Index): +class TimedeltaIndex(TimedeltaArray, DatetimeIndexOpsMixin, Int64Index): """ Immutable ndarray of timedelta64 data, represented internally as int64, and which can be boxed to timedelta objects
No real functional changes, just an inheritance reorganization to make the diff at #24024 smaller. Changes: - Removes DatelikeOps from PeriodIndex (already had strftime via delegation) - Moves Date/TimelikeOps from DatetimeIndex to DatetimeArray - Moves TimelikeOps from TimedeltaIndex to TimedeltaArray
https://api.github.com/repos/pandas-dev/pandas/pulls/24038
2018-12-01T20:16:55Z
2018-12-02T16:56:51Z
2018-12-02T16:56:51Z
2018-12-03T12:29:23Z
ENH: Add columns argument to read_feather() (#24025)
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 7617ad5b428a2..3fb7b925ceb6b 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -25,6 +25,7 @@ New features dataframe's indexes from the resulting Parquet file. (:issue:`20768`) - :meth:`DataFrame.corr` and :meth:`Series.corr` now accept a callable for generic calculation methods of correlation, e.g. histogram intersection (:issue:`22684`) - :func:`DataFrame.to_string` now accepts ``decimal`` as an argument, allowing the user to specify which decimal separator should be used in the output. (:issue:`23614`) +- :func:`DataFrame.read_feather` now accepts ``columns`` as an argument, allowing the user to specify which columns should be read. (:issue:`24025`) .. _whatsnew_0240.values_api: diff --git a/pandas/io/feather_format.py b/pandas/io/feather_format.py index 96ebca16d1892..5c8ab37c7c917 100644 --- a/pandas/io/feather_format.py +++ b/pandas/io/feather_format.py @@ -84,7 +84,7 @@ def to_feather(df, path): @deprecate_kwarg(old_arg_name='nthreads', new_arg_name='use_threads') -def read_feather(path, use_threads=True): +def read_feather(path, columns=None, use_threads=True): """ Load a feather-format object from the file path @@ -93,6 +93,10 @@ def read_feather(path, use_threads=True): Parameters ---------- path : string file path, or file-like object + columns : sequence, default None + If not provided, all columns are read + + .. versionadded 0.24.0 nthreads : int, default 1 Number of CPU threads to use when reading to pandas.DataFrame @@ -116,6 +120,8 @@ def read_feather(path, use_threads=True): int_use_threads = int(use_threads) if int_use_threads < 1: int_use_threads = 1 - return feather.read_feather(path, nthreads=int_use_threads) + return feather.read_feather(path, columns=columns, + nthreads=int_use_threads) - return feather.read_feather(path, use_threads=bool(use_threads)) + return feather.read_feather(path, columns=columns, + use_threads=bool(use_threads)) diff --git a/pandas/tests/io/test_feather.py b/pandas/tests/io/test_feather.py index 16b59526c8233..19ecb378b6378 100644 --- a/pandas/tests/io/test_feather.py +++ b/pandas/tests/io/test_feather.py @@ -26,13 +26,16 @@ def check_error_on_write(self, df, exc): with ensure_clean() as path: to_feather(df, path) - def check_round_trip(self, df, **kwargs): + def check_round_trip(self, df, expected=None, **kwargs): + + if expected is None: + expected = df with ensure_clean() as path: to_feather(df, path) result = read_feather(path, **kwargs) - assert_frame_equal(result, df) + assert_frame_equal(result, expected) def test_error(self): @@ -74,6 +77,16 @@ def test_stringify_columns(self): df = pd.DataFrame(np.arange(12).reshape(4, 3)).copy() self.check_error_on_write(df, ValueError) + def test_read_columns(self): + # GH 24025 + df = pd.DataFrame({'col1': list('abc'), + 'col2': list(range(1, 4)), + 'col3': list('xyz'), + 'col4': list(range(4, 7))}) + columns = ['col1', 'col3'] + self.check_round_trip(df, expected=df[columns], + columns=columns) + def test_unsupported_other(self): # period
- [x] closes #24025 - [x] tests added / ~~passed~~ - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry I have added test case but when running ``` pytest pandas/tests/io/test_feather.py ``` test cases (including some of already existing test cases) fails with error ``` NotImplementedError: > 1 ndim Categorical are not supported at this time ``` let me know if this is expected or I'm missing something
https://api.github.com/repos/pandas-dev/pandas/pulls/24034
2018-12-01T10:16:11Z
2018-12-04T12:23:53Z
2018-12-04T12:23:53Z
2018-12-04T12:25:45Z
REF: Move non-raising parts of array_to_datetime outside of try/except
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 609608a0948c5..f545e113949d9 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -520,9 +520,10 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise', # specify error conditions assert is_raise or is_ignore or is_coerce + result = np.empty(n, dtype='M8[ns]') + iresult = result.view('i8') + try: - result = np.empty(n, dtype='M8[ns]') - iresult = result.view('i8') for i in range(n): val = values[i] @@ -706,62 +707,85 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise', raise TypeError("{typ} is not convertible to datetime" .format(typ=type(val))) - if seen_datetime and seen_integer: - # we have mixed datetimes & integers - - if is_coerce: - # coerce all of the integers/floats to NaT, preserve - # the datetimes and other convertibles - for i in range(n): - val = values[i] - if is_integer_object(val) or is_float_object(val): - result[i] = NPY_NAT - elif is_raise: - raise ValueError( - "mixed datetimes and integers in passed array") - else: - raise TypeError - - if seen_datetime_offset and not utc_convert: - # GH 17697 - # 1) If all the offsets are equal, return one offset for - # the parsed dates to (maybe) pass to DatetimeIndex - # 2) If the offsets are different, then force the parsing down the - # object path where an array of datetimes - # (with individual dateutil.tzoffsets) are returned - is_same_offsets = len(out_tzoffset_vals) == 1 - if not is_same_offsets: - return array_to_datetime_object(values, is_raise, - dayfirst, yearfirst) - else: - tz_offset = out_tzoffset_vals.pop() - tz_out = pytz.FixedOffset(tz_offset / 60.) - return result, tz_out except OutOfBoundsDatetime: if is_raise: raise - oresult = np.empty(n, dtype=object) - for i in range(n): - val = values[i] + return ignore_errors_out_of_bounds_fallback(values), tz_out - # set as nan except if its a NaT - if checknull_with_nat(val): - if isinstance(val, float): - oresult[i] = np.nan - else: - oresult[i] = NaT - elif is_datetime64_object(val): - if get_datetime64_value(val) == NPY_NAT: - oresult[i] = NaT - else: - oresult[i] = val.item() - else: - oresult[i] = val - return oresult, tz_out except TypeError: return array_to_datetime_object(values, is_raise, dayfirst, yearfirst) + if seen_datetime and seen_integer: + # we have mixed datetimes & integers + + if is_coerce: + # coerce all of the integers/floats to NaT, preserve + # the datetimes and other convertibles + for i in range(n): + val = values[i] + if is_integer_object(val) or is_float_object(val): + result[i] = NPY_NAT + elif is_raise: + raise ValueError("mixed datetimes and integers in passed array") + else: + return array_to_datetime_object(values, is_raise, + dayfirst, yearfirst) + + if seen_datetime_offset and not utc_convert: + # GH#17697 + # 1) If all the offsets are equal, return one offset for + # the parsed dates to (maybe) pass to DatetimeIndex + # 2) If the offsets are different, then force the parsing down the + # object path where an array of datetimes + # (with individual dateutil.tzoffsets) are returned + is_same_offsets = len(out_tzoffset_vals) == 1 + if not is_same_offsets: + return array_to_datetime_object(values, is_raise, + dayfirst, yearfirst) + else: + tz_offset = out_tzoffset_vals.pop() + tz_out = pytz.FixedOffset(tz_offset / 60.) + return result, tz_out + + +cdef inline ignore_errors_out_of_bounds_fallback(ndarray[object] values): + """ + Fallback for array_to_datetime if an OutOfBoundsDatetime is raised + and errors == "ignore" + + Parameters + ---------- + values : ndarray[object] + + Returns + ------- + ndarray[object] + """ + cdef: + Py_ssize_t i, n = len(values) + object val + + oresult = np.empty(n, dtype=object) + + for i in range(n): + val = values[i] + + # set as nan except if its a NaT + if checknull_with_nat(val): + if isinstance(val, float): + oresult[i] = np.nan + else: + oresult[i] = NaT + elif is_datetime64_object(val): + if get_datetime64_value(val) == NPY_NAT: + oresult[i] = NaT + else: + oresult[i] = val.item() + else: + oresult[i] = val + return oresult + @cython.wraparound(False) @cython.boundscheck(False)
Bit by bit we untangle this function.
https://api.github.com/repos/pandas-dev/pandas/pulls/24032
2018-12-01T02:14:03Z
2018-12-02T18:03:45Z
2018-12-02T18:03:45Z
2018-12-02T19:19:22Z
PERF: do NPY_NAT check inside get_datetime64_nanos
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 609608a0948c5..1205ebbe311e2 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -571,16 +571,13 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise', elif is_datetime64_object(val): seen_datetime = 1 - if get_datetime64_value(val) == NPY_NAT: - iresult[i] = NPY_NAT - else: - try: - iresult[i] = get_datetime64_nanos(val) - except OutOfBoundsDatetime: - if is_coerce: - iresult[i] = NPY_NAT - continue - raise + try: + iresult[i] = get_datetime64_nanos(val) + except OutOfBoundsDatetime: + if is_coerce: + iresult[i] = NPY_NAT + continue + raise elif is_integer_object(val) or is_float_object(val): # these must be ns unit by-definition diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 67c2793e4bcef..c8d241e6b7a62 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -62,8 +62,11 @@ cdef inline int64_t get_datetime64_nanos(object val) except? -1: NPY_DATETIMEUNIT unit npy_datetime ival - unit = get_datetime64_unit(val) ival = get_datetime64_value(val) + if ival == NPY_NAT: + return NPY_NAT + + unit = get_datetime64_unit(val) if unit != NPY_FR_ns: pandas_datetime_to_datetimestruct(ival, unit, &dts) @@ -283,10 +286,8 @@ cdef convert_to_tsobject(object ts, object tz, object unit, if ts is None or ts is NaT: obj.value = NPY_NAT elif is_datetime64_object(ts): - if ts.view('i8') == NPY_NAT: - obj.value = NPY_NAT - else: - obj.value = get_datetime64_nanos(ts) + obj.value = get_datetime64_nanos(ts) + if obj.value != NPY_NAT: dt64_to_dtstruct(obj.value, &obj.dts) elif is_integer_object(ts): if ts == NPY_NAT:
Bonus of a little simplification in `array_to_datetime` The perf pickup comes from changing a python call `ts.view('i8')` to a cython call `get_datetime64_value(ts)`. ``` In [3]: val = np.datetime64('NaT', 'D') In [4]: %timeit pd.Timestamp(val) ``` master (leaving out "The slowest run [...]" ``` 100000 loops, best of 3: 2.33 µs per loop 1000000 loops, best of 3: 2.02 µs per loop 100000 loops, best of 3: 2.44 µs per loop 100000 loops, best of 3: 1.4 µs per loop 1000000 loops, best of 3: 2.32 µs per loop ``` PR ``` 1000000 loops, best of 3: 533 ns per loop 1000000 loops, best of 3: 758 ns per loop 1000000 loops, best of 3: 734 ns per loop 1000000 loops, best of 3: 570 ns per loop 1000000 loops, best of 3: 779 ns per loop ``` ``` In [9]: val2 = np.datetime64(55, 'us') In [10]: %timeit pd.Timestamp(val2) ``` master ``` 100000 loops, best of 3: 3.58 µs per loop 100000 loops, best of 3: 3.49 µs per loop 100000 loops, best of 3: 3.65 µs per loop 100000 loops, best of 3: 2.99 µs per loop ``` PR ``` 1000000 loops, best of 3: 1.61 µs per loop 1000000 loops, best of 3: 1.76 µs per loop 1000000 loops, best of 3: 1.66 µs per loop 1000000 loops, best of 3: 1.49 µs per loop ```
https://api.github.com/repos/pandas-dev/pandas/pulls/24031
2018-12-01T01:27:55Z
2018-12-02T16:40:14Z
2018-12-02T16:40:14Z
2018-12-02T16:41:05Z
TST: Check error message in overlapping bins test
diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py index b0445f5a9e2d5..19f1a9a8b65c7 100644 --- a/pandas/tests/reshape/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -92,9 +92,11 @@ def test_bins_from_intervalindex(self): np.array([1, 1, 2], dtype='int8')) def test_bins_not_overlapping_from_intervalindex(self): - # verify if issue 23980 is properly solved. + # see gh-23980 + msg = "Overlapping IntervalIndex is not accepted" ii = IntervalIndex.from_tuples([(0, 10), (2, 12), (4, 14)]) - with pytest.raises(ValueError): + + with pytest.raises(ValueError, match=msg): cut([5, 6], bins=ii) def test_bins_not_monotonic(self):
Follow-up to #23999. xref #23922. cc @jschendel @charlesdong1991
https://api.github.com/repos/pandas-dev/pandas/pulls/24029
2018-12-01T01:09:34Z
2018-12-01T01:59:00Z
2018-12-01T01:59:00Z
2018-12-01T01:59:26Z
BUG: Fix concat series loss of timezone
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index 15476c3bc2e13..e6a08362a70f7 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1545,6 +1545,7 @@ Reshaping - Bug in :meth:`DataFrame.append` with a :class:`Series` with a dateutil timezone would raise a ``TypeError`` (:issue:`23682`) - Bug in ``Series`` construction when passing no data and ``dtype=str`` (:issue:`22477`) - Bug in :func:`cut` with ``bins`` as an overlapping ``IntervalIndex`` where multiple bins were returned per item instead of raising a ``ValueError`` (:issue:`23980`) +- Bug in :func:`pandas.concat` when joining ``Series`` datetimetz with ``Series`` category would lose timezone (:issue:`23816`) - Bug in :meth:`DataFrame.join` when joining on partial MultiIndex would drop names (:issue:`20452`). .. _whatsnew_0240.bug_fixes.sparse: diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 58f1bcbfa74c0..0df0c01dbd47a 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -191,15 +191,6 @@ def _concat_categorical(to_concat, axis=0): A single array, preserving the combined dtypes """ - def _concat_asobject(to_concat): - to_concat = [x.get_values() if is_categorical_dtype(x.dtype) - else np.asarray(x).ravel() for x in to_concat] - res = _concat_compat(to_concat) - if axis == 1: - return res.reshape(1, len(res)) - else: - return res - # we could have object blocks and categoricals here # if we only have a single categoricals then combine everything # else its a non-compat categorical @@ -214,7 +205,14 @@ def _concat_asobject(to_concat): if all(first.is_dtype_equal(other) for other in to_concat[1:]): return union_categoricals(categoricals) - return _concat_asobject(to_concat) + # extract the categoricals & coerce to object if needed + to_concat = [x.get_values() if is_categorical_dtype(x.dtype) + else np.asarray(x).ravel() if not is_datetime64tz_dtype(x) + else np.asarray(x.astype(object)) for x in to_concat] + result = _concat_compat(to_concat) + if axis == 1: + result = result.reshape(1, len(result)) + return result def union_categoricals(to_union, sort_categories=False, ignore_order=False): diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 4113fb7f0f11e..bb002f151b455 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -2552,3 +2552,16 @@ def test_concat_series_name_npscalar_tuple(s1name, s2name): result = pd.concat([s1, s2]) expected = pd.Series({'a': 1, 'b': 2, 'c': 5, 'd': 6}) tm.assert_series_equal(result, expected) + + +def test_concat_categorical_tz(): + # GH-23816 + a = pd.Series(pd.date_range('2017-01-01', periods=2, tz='US/Pacific')) + b = pd.Series(['a', 'b'], dtype='category') + result = pd.concat([a, b], ignore_index=True) + expected = pd.Series([ + pd.Timestamp('2017-01-01', tz="US/Pacific"), + pd.Timestamp('2017-01-02', tz="US/Pacific"), + 'a', 'b' + ]) + tm.assert_series_equal(result, expected)
Closes #23816
https://api.github.com/repos/pandas-dev/pandas/pulls/24027
2018-11-30T23:03:45Z
2018-12-05T23:21:47Z
2018-12-05T23:21:47Z
2018-12-05T23:22:15Z
REF: DatetimeLikeArray
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index e770a9e3c47f8..7628c53cefa06 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -430,6 +430,7 @@ Backwards incompatible API changes - ``max_rows`` and ``max_cols`` parameters removed from :class:`HTMLFormatter` since truncation is handled by :class:`DataFrameFormatter` (:issue:`23818`) - :func:`read_csv` will now raise a ``ValueError`` if a column with missing values is declared as having dtype ``bool`` (:issue:`20591`) - The column order of the resultant :class:`DataFrame` from :meth:`MultiIndex.to_frame` is now guaranteed to match the :attr:`MultiIndex.names` order. (:issue:`22420`) +- Incorrectly passing a :class:`DatetimeIndex` to :meth:`MultiIndex.from_tuples`, rather than a sequence of tuples, now raises a ``TypeError`` rather than a ``ValueError`` (:issue:`24024`) - :func:`pd.offsets.generate_range` argument ``time_rule`` has been removed; use ``offset`` instead (:issue:`24157`) Percentage change on groupby @@ -1368,6 +1369,7 @@ Datetimelike - Bug in :class:`DataFrame` with ``datetime64[ns]`` dtype subtracting ``np.datetime64`` object with non-nanosecond unit failing to convert to nanoseconds (:issue:`18874`, :issue:`22163`) - Bug in :class:`DataFrame` comparisons against ``Timestamp``-like objects failing to raise ``TypeError`` for inequality checks with mismatched types (:issue:`8932`, :issue:`22163`) - Bug in :class:`DataFrame` with mixed dtypes including ``datetime64[ns]`` incorrectly raising ``TypeError`` on equality comparisons (:issue:`13128`, :issue:`22163`) +- Bug in :attr:`DataFrame.values` returning a :class:`DatetimeIndex` for a single-column ``DataFrame`` with tz-aware datetime values. Now a 2-D :class:`numpy.ndarray` of :class:`Timestamp` objects is returned (:issue:`24024`) - Bug in :meth:`DataFrame.eq` comparison against ``NaT`` incorrectly returning ``True`` or ``NaN`` (:issue:`15697`, :issue:`22163`) - Bug in :class:`DatetimeIndex` subtraction that incorrectly failed to raise ``OverflowError`` (:issue:`22492`, :issue:`22508`) - Bug in :class:`DatetimeIndex` incorrectly allowing indexing with ``Timedelta`` object (:issue:`20464`) @@ -1384,6 +1386,7 @@ Datetimelike - Bug in :func:`period_range` ignoring the frequency of ``start`` and ``end`` when those are provided as :class:`Period` objects (:issue:`20535`). - Bug in :class:`PeriodIndex` with attribute ``freq.n`` greater than 1 where adding a :class:`DateOffset` object would return incorrect results (:issue:`23215`) - Bug in :class:`Series` that interpreted string indices as lists of characters when setting datetimelike values (:issue:`23451`) +- Bug in :class:`DataFrame` when creating a new column from an ndarray of :class:`Timestamp` objects with timezones creating an object-dtype column, rather than datetime with timezone (:issue:`23932`) - Bug in :class:`Timestamp` constructor which would drop the frequency of an input :class:`Timestamp` (:issue:`22311`) - Bug in :class:`DatetimeIndex` where calling ``np.array(dtindex, dtype=object)`` would incorrectly return an array of ``long`` objects (:issue:`23524`) - Bug in :class:`Index` where passing a timezone-aware :class:`DatetimeIndex` and `dtype=object` would incorrectly raise a ``ValueError`` (:issue:`23524`) diff --git a/pandas/_libs/src/ujson/python/objToJSON.c b/pandas/_libs/src/ujson/python/objToJSON.c index 9662c59dddf4c..d0caeb3333548 100644 --- a/pandas/_libs/src/ujson/python/objToJSON.c +++ b/pandas/_libs/src/ujson/python/objToJSON.c @@ -228,6 +228,11 @@ static PyObject *get_values(PyObject *obj) { PRINTMARK(); if (values && !PyArray_CheckExact(values)) { + + if (PyObject_HasAttrString(values, "to_numpy")) { + values = PyObject_CallMethod(values, "to_numpy", NULL); + } + if (PyObject_HasAttrString(values, "values")) { PyObject *subvals = get_values(values); PyErr_Clear(); @@ -279,8 +284,8 @@ static PyObject *get_values(PyObject *obj) { repr = PyString_FromString("<unknown dtype>"); } - PyErr_Format(PyExc_ValueError, "%s or %s are not JSON serializable yet", - PyString_AS_STRING(repr), PyString_AS_STRING(typeRepr)); + PyErr_Format(PyExc_ValueError, "%R or %R are not JSON serializable yet", + repr, typeRepr); Py_DECREF(repr); Py_DECREF(typeRepr); diff --git a/pandas/core/arrays/datetimelike.py b/pandas/core/arrays/datetimelike.py index ab5621d857e89..d233e1d09a1e9 100644 --- a/pandas/core/arrays/datetimelike.py +++ b/pandas/core/arrays/datetimelike.py @@ -47,7 +47,7 @@ def cmp_method(self, other): if isinstance(other, ABCDataFrame): return NotImplemented - if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries)): + if isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries, cls)): if other.ndim > 0 and len(self) != len(other): raise ValueError('Lengths must match to compare') @@ -1162,9 +1162,10 @@ def _addsub_offset_array(self, other, op): left = lib.values_from_object(self.astype('O')) res_values = op(left, np.array(other)) + kwargs = {} if not is_period_dtype(self): - return type(self)(res_values, freq='infer') - return self._from_sequence(res_values) + kwargs['freq'] = 'infer' + return self._from_sequence(res_values, **kwargs) def _time_shift(self, periods, freq=None): """ diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 8b0565a36648f..f42930929747d 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -97,6 +97,9 @@ def _dt_array_cmp(cls, op): def wrapper(self, other): meth = getattr(dtl.DatetimeLikeArrayMixin, opname) + # TODO: return NotImplemented for Series / Index and let pandas unbox + # Right now, returning NotImplemented for Index fails because we + # go into the index implementation, which may be a bug? other = lib.item_from_zerodim(other) @@ -145,9 +148,16 @@ def wrapper(self, other): return ops.invalid_comparison(self, other, op) else: self._assert_tzawareness_compat(other) - if not hasattr(other, 'asi8'): - # ndarray, Series - other = type(self)(other) + if isinstance(other, (ABCIndexClass, ABCSeries)): + other = other.array + + if (is_datetime64_dtype(other) and + not is_datetime64_ns_dtype(other) or + not hasattr(other, 'asi8')): + # e.g. other.dtype == 'datetime64[s]' + # or an object-dtype ndarray + other = type(self)._from_sequence(other) + result = meth(self, other) o_mask = other._isnan @@ -171,10 +181,24 @@ class DatetimeArrayMixin(dtl.DatetimeLikeArrayMixin, dtl.TimelikeOps, dtl.DatelikeOps): """ - Assumes that subclass __new__/__init__ defines: - tz - _freq - _data + Pandas ExtensionArray for tz-naive or tz-aware datetime data. + + .. versionadded:: 0.24.0 + + Parameters + ---------- + values : Series, Index, DatetimeArray, ndarray + The datetime data. + + For DatetimeArray `values` (or a Series or Index boxing one), + `dtype` and `freq` will be extracted from `values`, with + precedence given to + + dtype : numpy.dtype or DatetimeTZDtype + Note that the only NumPy dtype allowed is 'datetime64[ns]'. + freq : str or Offset, optional + copy : bool, default False + Whether to copy the underlying array of values. """ _typ = "datetimearray" _scalar_type = Timestamp @@ -213,38 +237,84 @@ class DatetimeArrayMixin(dtl.DatetimeLikeArrayMixin, _dtype = None # type: Union[np.dtype, DatetimeTZDtype] _freq = None - @classmethod - def _simple_new(cls, values, freq=None, tz=None): - """ - we require the we have a dtype compat for the values - if we are passed a non-dtype compat, then coerce using the constructor - """ - assert isinstance(values, np.ndarray), type(values) + def __init__(self, values, dtype=_NS_DTYPE, freq=None, copy=False): + if isinstance(values, (ABCSeries, ABCIndexClass)): + values = values._values + + if isinstance(values, type(self)): + # validation + dtz = getattr(dtype, 'tz', None) + if dtz and values.tz is None: + dtype = DatetimeTZDtype(tz=dtype.tz) + elif dtz and values.tz: + if not timezones.tz_compare(dtz, values.tz): + msg = ( + "Timezone of the array and 'dtype' do not match. " + "'{}' != '{}'" + ) + raise TypeError(msg.format(dtz, values.tz)) + elif values.tz: + dtype = values.dtype + # freq = validate_values_freq(values, freq) + if freq is None: + freq = values.freq + values = values._data + + if not isinstance(values, np.ndarray): + msg = ( + "Unexpected type '{}'. 'values' must be a DatetimeArray " + "ndarray, or Series or Index containing one of those." + ) + raise ValueError(msg.format(type(values).__name__)) + if values.dtype == 'i8': # for compat with datetime/timedelta/period shared methods, # we can sometimes get here with int64 values. These represent # nanosecond UTC (or tz-naive) unix timestamps values = values.view(_NS_DTYPE) - assert values.dtype == 'M8[ns]', values.dtype + if values.dtype != _NS_DTYPE: + msg = ( + "The dtype of 'values' is incorrect. Must be 'datetime64[ns]'." + " Got {} instead." + ) + raise ValueError(msg.format(values.dtype)) - result = object.__new__(cls) - result._data = values - result._freq = freq - if tz is None: - dtype = _NS_DTYPE - else: - tz = timezones.maybe_get_tz(tz) - tz = timezones.tz_standardize(tz) - dtype = DatetimeTZDtype('ns', tz) - result._dtype = dtype - return result + dtype = pandas_dtype(dtype) + _validate_dt64_dtype(dtype) - def __new__(cls, values, freq=None, tz=None, dtype=None, copy=False, - dayfirst=False, yearfirst=False, ambiguous='raise'): - return cls._from_sequence( - values, freq=freq, tz=tz, dtype=dtype, copy=copy, - dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous) + if freq == "infer": + msg = ( + "Frequency inference not allowed in DatetimeArray.__init__. " + "Use 'pd.array()' instead." + ) + raise ValueError(msg) + + if copy: + values = values.copy() + if freq: + freq = to_offset(freq) + if getattr(dtype, 'tz', None): + # https://github.com/pandas-dev/pandas/issues/18595 + # Ensure that we have a standard timezone for pytz objects. + # Without this, things like adding an array of timedeltas and + # a tz-aware Timestamp (with a tz specific to its datetime) will + # be incorrect(ish?) for the array as a whole + dtype = DatetimeTZDtype(tz=timezones.tz_standardize(dtype.tz)) + + self._data = values + self._dtype = dtype + self._freq = freq + + @classmethod + def _simple_new(cls, values, freq=None, tz=None): + """ + we require the we have a dtype compat for the values + if we are passed a non-dtype compat, then coerce using the constructor + """ + dtype = DatetimeTZDtype(tz=tz) if tz else _NS_DTYPE + + return cls(values, freq=freq, dtype=dtype) @classmethod def _from_sequence(cls, data, dtype=None, copy=False, @@ -459,8 +529,7 @@ def __array__(self, dtype=None): elif is_int64_dtype(dtype): return self.asi8 - # TODO: warn that conversion may be lossy? - return self._data.view(np.ndarray) # follow Index.__array__ + return self._data def __iter__(self): """ @@ -519,7 +588,7 @@ def astype(self, dtype, copy=True): @Appender(dtl.DatetimeLikeArrayMixin._validate_fill_value.__doc__) def _validate_fill_value(self, fill_value): - if isna(fill_value): + if isna(fill_value) or fill_value == iNaT: fill_value = iNaT elif isinstance(fill_value, (datetime, np.datetime64)): self._assert_tzawareness_compat(fill_value) @@ -1574,6 +1643,9 @@ def sequence_to_dt64ns(data, dtype=None, copy=False, # if dtype has an embedded tz, capture it tz = validate_tz_from_dtype(dtype, tz) + if isinstance(data, ABCIndexClass): + data = data._data + # By this point we are assured to have either a numpy array or Index data, copy = maybe_convert_dtype(data, copy) @@ -1590,12 +1662,15 @@ def sequence_to_dt64ns(data, dtype=None, copy=False, data, dayfirst=dayfirst, yearfirst=yearfirst) tz = maybe_infer_tz(tz, inferred_tz) + # `data` may have originally been a Categorical[datetime64[ns, tz]], + # so we need to handle these types. if is_datetime64tz_dtype(data): + # DatetimeArray -> ndarray tz = maybe_infer_tz(tz, data.tz) result = data._data elif is_datetime64_dtype(data): - # tz-naive DatetimeArray/Index or ndarray[datetime64] + # tz-naive DatetimeArray or ndarray[datetime64] data = getattr(data, "_data", data) if data.dtype != _NS_DTYPE: data = conversion.ensure_datetime64ns(data) @@ -1750,7 +1825,7 @@ def maybe_convert_dtype(data, copy): # GH#18664 preserve tz in going DTI->Categorical->DTI # TODO: cases where we need to do another pass through this func, # e.g. the categories are timedelta64s - data = data.categories.take(data.codes, fill_value=NaT) + data = data.categories.take(data.codes, fill_value=NaT)._values copy = False elif is_extension_type(data) and not is_datetime64tz_dtype(data): diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 45a6081093aed..70da02f2ba0a1 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -179,8 +179,7 @@ def __init__(self, values, freq=None, dtype=None, copy=False): @classmethod def _simple_new(cls, values, freq=None, **kwargs): - # TODO(DatetimeArray): remove once all constructors are aligned. - # alias from PeriodArray.__init__ + # alias for PeriodArray.__init__ return cls(values, freq=freq, **kwargs) @classmethod diff --git a/pandas/core/arrays/timedeltas.py b/pandas/core/arrays/timedeltas.py index 78570be8dc07f..b747e2b6b096b 100644 --- a/pandas/core/arrays/timedeltas.py +++ b/pandas/core/arrays/timedeltas.py @@ -33,6 +33,8 @@ from . import datetimelike as dtl +_BAD_DTYPE = "dtype {dtype} cannot be converted to timedelta64[ns]" + def _to_m8(key): """ @@ -142,25 +144,56 @@ def dtype(self): # Constructors _attributes = ["freq"] - @classmethod - def _simple_new(cls, values, freq=None, dtype=_TD_DTYPE): - # `dtype` is passed by _shallow_copy in corner cases, should always - # be timedelta64[ns] if present - assert dtype == _TD_DTYPE - assert isinstance(values, np.ndarray), type(values) + def __init__(self, values, dtype=_TD_DTYPE, freq=None, copy=False): + if isinstance(values, (ABCSeries, ABCIndexClass)): + values = values._values + + if isinstance(values, type(self)): + values, freq, freq_infer = extract_values_freq(values, freq) + + if not isinstance(values, np.ndarray): + msg = ( + "Unexpected type '{}'. 'values' must be a TimedeltaArray " + "ndarray, or Series or Index containing one of those." + ) + raise ValueError(msg.format(type(values).__name__)) if values.dtype == 'i8': - values = values.view('m8[ns]') + # for compat with datetime/timedelta/period shared methods, + # we can sometimes get here with int64 values. These represent + # nanosecond UTC (or tz-naive) unix timestamps + values = values.view(_TD_DTYPE) - assert values.dtype == 'm8[ns]' + if values.dtype != _TD_DTYPE: + raise TypeError(_BAD_DTYPE.format(dtype=values.dtype)) - result = object.__new__(cls) - result._data = values - result._freq = freq - return result + try: + dtype_mismatch = dtype != _TD_DTYPE + except TypeError: + raise TypeError(_BAD_DTYPE.format(dtype=dtype)) + else: + if dtype_mismatch: + raise TypeError(_BAD_DTYPE.format(dtype=dtype)) - def __new__(cls, values, freq=None, dtype=_TD_DTYPE, copy=False): - return cls._from_sequence(values, dtype=dtype, copy=copy, freq=freq) + if freq == "infer": + msg = ( + "Frequency inference not allowed in TimedeltaArray.__init__. " + "Use 'pd.array()' instead." + ) + raise ValueError(msg) + + if copy: + values = values.copy() + if freq: + freq = to_offset(freq) + + self._data = values + self._dtype = dtype + self._freq = freq + + @classmethod + def _simple_new(cls, values, freq=None, dtype=_TD_DTYPE): + return cls(values, dtype=dtype, freq=freq) @classmethod def _from_sequence(cls, data, dtype=_TD_DTYPE, copy=False, @@ -984,3 +1017,18 @@ def _generate_regular_range(start, end, periods, offset): data = np.arange(b, e, stride, dtype=np.int64) return data + + +def extract_values_freq(arr, freq): + # type: (TimedeltaArray, Offset) -> Tuple[ndarray, Offset, bool] + freq_infer = False + if freq is None: + freq = arr.freq + elif freq and arr.freq: + freq = to_offset(freq) + freq, freq_infer = dtl.validate_inferred_freq( + freq, arr.freq, + freq_infer=False + ) + values = arr._data + return values, freq, freq_infer diff --git a/pandas/core/base.py b/pandas/core/base.py index cc1bda620c215..c37ab48de7cb8 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -850,11 +850,9 @@ def array(self): """ result = self._values - if (is_datetime64_ns_dtype(result.dtype) or - is_datetime64tz_dtype(result.dtype)): + if is_datetime64_ns_dtype(result.dtype): from pandas.arrays import DatetimeArray result = DatetimeArray(result) - elif is_timedelta64_ns_dtype(result.dtype): from pandas.arrays import TimedeltaArray result = TimedeltaArray(result) @@ -950,14 +948,14 @@ def to_numpy(self, dtype=None, copy=False): array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'], dtype='datetime64[ns]') """ - if (is_extension_array_dtype(self.dtype) or - is_datetime64tz_dtype(self.dtype)): - # TODO(DatetimeArray): remove the second clause. - # TODO(GH-24345): Avoid potential double copy - result = np.asarray(self._values, dtype=dtype) - else: - result = self._values + if is_datetime64tz_dtype(self.dtype) and dtype is None: + # note: this is going to change very soon. + # I have a WIP PR making this unnecessary, but it's + # a bit out of scope for the DatetimeArray PR. + dtype = "object" + result = np.asarray(self._values, dtype=dtype) + # TODO(GH-24345): Avoid potential double copy if copy: result = result.copy() return result diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index eae9eb97f35fe..8f26f7ac209b1 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -20,7 +20,9 @@ pandas_dtype) from .dtypes import ( DatetimeTZDtype, ExtensionDtype, PandasExtensionDtype, PeriodDtype) -from .generic import ABCDatetimeIndex, ABCPeriodIndex, ABCSeries +from .generic import ( + ABCDatetimeArray, ABCDatetimeIndex, ABCPeriodArray, ABCPeriodIndex, + ABCSeries) from .inference import is_list_like from .missing import isna, notna @@ -860,7 +862,9 @@ def maybe_infer_to_datetimelike(value, convert_dates=False): """ - if isinstance(value, (ABCDatetimeIndex, ABCPeriodIndex)): + # TODO: why not timedelta? + if isinstance(value, (ABCDatetimeIndex, ABCPeriodIndex, + ABCDatetimeArray, ABCPeriodArray)): return value elif isinstance(value, ABCSeries): if isinstance(value._values, ABCDatetimeIndex): diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index e1141c6b6b3a8..293ce7d8e4aca 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -10,8 +10,7 @@ from pandas.core.dtypes.dtypes import ( CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, ExtensionDtype, - IntervalDtype, PandasExtensionDtype, PeriodDtype, _pandas_registry, - registry) + IntervalDtype, PandasExtensionDtype, PeriodDtype, registry) from pandas.core.dtypes.generic import ( ABCCategorical, ABCCategoricalIndex, ABCDateOffset, ABCDatetimeIndex, ABCIndexClass, ABCPeriodArray, ABCPeriodIndex, ABCSeries, ABCSparseArray, @@ -1984,7 +1983,7 @@ def pandas_dtype(dtype): return dtype # registered extension types - result = _pandas_registry.find(dtype) or registry.find(dtype) + result = registry.find(dtype) if result is not None: return result diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index 0501889d743d4..e6967ed2a4d3d 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -12,8 +12,8 @@ is_extension_array_dtype, is_interval_dtype, is_object_dtype, is_period_dtype, is_sparse, is_timedelta64_dtype) from pandas.core.dtypes.generic import ( - ABCDatetimeIndex, ABCPeriodIndex, ABCRangeIndex, ABCSparseDataFrame, - ABCTimedeltaIndex) + ABCDatetimeArray, ABCDatetimeIndex, ABCIndexClass, ABCPeriodIndex, + ABCRangeIndex, ABCSparseDataFrame, ABCTimedeltaIndex) from pandas import compat @@ -471,7 +471,15 @@ def _concat_datetimetz(to_concat, name=None): all inputs must be DatetimeIndex it is used in DatetimeIndex.append also """ - return to_concat[0]._concat_same_dtype(to_concat, name=name) + # Right now, internals will pass a List[DatetimeArray] here + # for reductions like quantile. I would like to disentangle + # all this before we get here. + sample = to_concat[0] + + if isinstance(sample, ABCIndexClass): + return sample._concat_same_dtype(to_concat, name=name) + elif isinstance(sample, ABCDatetimeArray): + return sample._concat_same_type(to_concat) def _concat_index_same_dtype(indexes, klass=None): diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index e0d0cf3393dd5..9e2564c4f825b 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -479,7 +479,8 @@ def _is_boolean(self): return is_bool_dtype(self.categories) -class DatetimeTZDtype(PandasExtensionDtype): +@register_extension_dtype +class DatetimeTZDtype(PandasExtensionDtype, ExtensionDtype): """ A np.dtype duck-typed class, suitable for holding a custom datetime with tz @@ -493,6 +494,7 @@ class DatetimeTZDtype(PandasExtensionDtype): str = '|M8[ns]' num = 101 base = np.dtype('M8[ns]') + na_value = NaT _metadata = ('unit', 'tz') _match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]") _cache = {} @@ -570,8 +572,8 @@ def construct_array_type(cls): ------- type """ - from pandas import DatetimeIndex - return DatetimeIndex + from pandas.core.arrays import DatetimeArrayMixin + return DatetimeArrayMixin @classmethod def construct_from_string(cls, string): @@ -885,10 +887,3 @@ def is_dtype(cls, dtype): else: return False return super(IntervalDtype, cls).is_dtype(dtype) - - -# TODO(Extension): remove the second registry once all internal extension -# dtypes are real extension dtypes. -_pandas_registry = Registry() - -_pandas_registry.register(DatetimeTZDtype) diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py index bbc447d6fa0da..134ec95729833 100644 --- a/pandas/core/dtypes/generic.py +++ b/pandas/core/dtypes/generic.py @@ -67,7 +67,8 @@ def _check(cls, inst): ("extension", "categorical", "periodarray", - "npy_extension", + "datetimearray", + "timedeltaarray", )) ABCPandasArray = create_pandas_abc_type("ABCPandasArray", "_typ", diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ab5e2a14c7783..d6aa3117570af 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -78,6 +78,9 @@ from pandas.core import ops from pandas.core.accessor import CachedAccessor from pandas.core.arrays import Categorical, ExtensionArray +from pandas.core.arrays.datetimelike import ( + DatetimeLikeArrayMixin as DatetimeLikeArray +) from pandas.core.config import get_option from pandas.core.generic import NDFrame, _shared_docs from pandas.core.index import (Index, MultiIndex, ensure_index, @@ -4356,9 +4359,25 @@ def _maybe_casted_values(index, labels=None): values.fill(np.nan) else: values = values.take(labels) + + # TODO(https://github.com/pandas-dev/pandas/issues/24206) + # Push this into maybe_upcast_putmask? + # We can't pass EAs there right now. Looks a bit + # complicated. + # So we unbox the ndarray_values, op, re-box. + values_type = type(values) + values_dtype = values.dtype + + if issubclass(values_type, DatetimeLikeArray): + values = values._data + if mask.any(): values, changed = maybe_upcast_putmask( values, mask, np.nan) + + if issubclass(values_type, DatetimeLikeArray): + values = values_type(values, dtype=values_dtype) + return values new_index = ibase.default_index(len(new_obj)) @@ -5314,7 +5333,6 @@ def extract_values(arr): arr = arr._values if needs_i8_conversion(arr): - # TODO(DatetimelikeArray): just use .asi8 if is_extension_array_dtype(arr.dtype): arr = arr.asi8 else: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 1380c5caed1c9..a26daba49f5d1 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -25,7 +25,7 @@ is_signed_integer_dtype, is_timedelta64_dtype, is_unsigned_integer_dtype) import pandas.core.dtypes.concat as _concat from pandas.core.dtypes.generic import ( - ABCDataFrame, ABCDateOffset, ABCDatetimeIndex, ABCIndexClass, + ABCDataFrame, ABCDateOffset, ABCDatetimeArray, ABCIndexClass, ABCMultiIndex, ABCPandasArray, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray, ABCTimedeltaIndex) from pandas.core.dtypes.missing import array_equivalent, isna @@ -568,9 +568,9 @@ def _shallow_copy(self, values=None, **kwargs): if not len(values) and 'dtype' not in kwargs: attributes['dtype'] = self.dtype - # _simple_new expects an ndarray - values = getattr(values, 'values', values) - if isinstance(values, ABCDatetimeIndex): + # _simple_new expects an the type of self._data + values = getattr(values, '_values', values) + if isinstance(values, ABCDatetimeArray): # `self.values` returns `self` for tz-aware, so we need to unwrap # more specifically values = values.asi8 diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 50b2413167b32..daca4b5116027 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -71,6 +71,10 @@ class DatetimeIndexOpsMixin(ExtensionOpsMixin): _maybe_mask_results = ea_passthrough("_maybe_mask_results") __iter__ = ea_passthrough("__iter__") + @property + def _eadata(self): + return self._data + @property def freq(self): """ @@ -166,12 +170,15 @@ def _join_i8_wrapper(joinf, dtype, with_indexers=True): """ Create the join wrapper methods. """ + from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin @staticmethod def wrapper(left, right): - if isinstance(left, (np.ndarray, ABCIndex, ABCSeries)): + if isinstance(left, (np.ndarray, ABCIndex, ABCSeries, + DatetimeLikeArrayMixin)): left = left.view('i8') - if isinstance(right, (np.ndarray, ABCIndex, ABCSeries)): + if isinstance(right, (np.ndarray, ABCIndex, ABCSeries, + DatetimeLikeArrayMixin)): right = right.view('i8') results = joinf(left, right) if with_indexers: @@ -195,25 +202,27 @@ def _evaluate_compare(self, other, op): def _ensure_localized(self, arg, ambiguous='raise', nonexistent='raise', from_utc=False): # See DatetimeLikeArrayMixin._ensure_localized.__doc__ - if getattr(self, 'tz', None): # ensure_localized is only relevant for tz-aware DTI - from pandas.core.arrays import DatetimeArrayMixin as DatetimeArray - dtarr = DatetimeArray(self) - result = dtarr._ensure_localized(arg, - ambiguous=ambiguous, - nonexistent=nonexistent, - from_utc=from_utc) - return type(self)(result, name=self.name) + result = self._data._ensure_localized(arg, + ambiguous=ambiguous, + nonexistent=nonexistent, + from_utc=from_utc) + return type(self)._simple_new(result, name=self.name) return arg def _box_values_as_index(self): """ Return object Index which contains boxed values. """ + # XXX: this is broken (not called) for PeriodIndex, which doesn't + # define _box_values AFAICT from pandas.core.index import Index return Index(self._box_values(self.asi8), name=self.name, dtype=object) + def _box_values(self, values): + return self._data._box_values(values) + @Appender(_index_shared_docs['contains'] % _index_doc_kwargs) def __contains__(self, key): try: @@ -600,14 +609,8 @@ def _concat_same_dtype(self, to_concat, name): if not is_period_dtype(self): # reset freq attribs['freq'] = None - # TODO(DatetimeArray) - # - remove the .asi8 here - # - remove the _maybe_box_as_values - # - combine with the `else` block - new_data = self._eadata._concat_same_type(to_concat).asi8 - else: - new_data = type(self._values)._concat_same_type(to_concat) + new_data = type(self._values)._concat_same_type(to_concat).asi8 return self._simple_new(new_data, **attribs) def _maybe_box_as_values(self, values, **attribs): @@ -631,11 +634,6 @@ def astype(self, dtype, copy=True): return Index(new_values, dtype=new_values.dtype, name=self.name, copy=False) - @Appender(DatetimeLikeArrayMixin._time_shift.__doc__) - def _time_shift(self, periods, freq=None): - result = self._eadata._time_shift(periods, freq=freq) - return type(self)(result, name=self.name) - @deprecate_kwarg(old_arg_name='n', new_arg_name='periods') def shift(self, periods, freq=None): """ diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index 690a3db28fe83..a6a910f66359c 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -17,11 +17,12 @@ _NS_DTYPE, ensure_int64, is_float, is_integer, is_list_like, is_scalar, is_string_like) import pandas.core.dtypes.concat as _concat +from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas.core.accessor import delegate_names from pandas.core.arrays.datetimes import ( - DatetimeArrayMixin as DatetimeArray, _to_m8) + DatetimeArrayMixin as DatetimeArray, _to_m8, validate_tz_from_dtype) from pandas.core.base import _shared_docs import pandas.core.common as com from pandas.core.indexes.base import Index @@ -40,10 +41,6 @@ def _new_DatetimeIndex(cls, d): """ This is called upon unpickling, rather than the default which doesn't have arguments and breaks __new__ """ - # data are already in UTC - # so need to localize - tz = d.pop('tz', None) - if "data" in d and not isinstance(d["data"], DatetimeIndex): # Avoid need to verify integrity by calling simple_new directly data = d.pop("data") @@ -56,8 +53,6 @@ def _new_DatetimeIndex(cls, d): warnings.simplefilter("ignore") result = cls.__new__(cls, verify_integrity=False, **d) - if tz is not None: - result = result.tz_localize('UTC').tz_convert(tz) return result @@ -306,7 +301,7 @@ def __new__(cls, data=None, data, dtype=dtype, copy=copy, tz=tz, freq=freq, dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous) - subarr = cls._simple_new(dtarr._data, name=name, + subarr = cls._simple_new(dtarr, name=name, freq=dtarr.freq, tz=dtarr.tz) return subarr @@ -317,20 +312,28 @@ def _simple_new(cls, values, name=None, freq=None, tz=None, dtype=None): if we are passed a non-dtype compat, then coerce using the constructor """ if isinstance(values, DatetimeArray): - values = DatetimeArray(values, freq=freq, tz=tz, dtype=dtype) + if tz: + tz = validate_tz_from_dtype(dtype, tz) + dtype = DatetimeTZDtype(tz=tz) + elif dtype is None: + dtype = _NS_DTYPE + + values = DatetimeArray(values, freq=freq, dtype=dtype) tz = values.tz freq = values.freq values = values._data # DatetimeArray._simple_new will accept either i8 or M8[ns] dtypes - assert isinstance(values, np.ndarray), type(values) - + if isinstance(values, DatetimeIndex): + values = values._data dtarr = DatetimeArray._simple_new(values, freq=freq, tz=tz) + assert isinstance(dtarr, DatetimeArray) + result = object.__new__(cls) - result._eadata = dtarr + result._data = dtarr result.name = name # For groupby perf. See note in indexes/base about _index_data - result._index_data = result._data + result._index_data = dtarr._data result._reset_identity() return result @@ -341,13 +344,17 @@ def dtype(self): return self._eadata.dtype @property - def _values(self): - # tz-naive -> ndarray - # tz-aware -> DatetimeIndex - if self.tz is not None: - return self._eadata - else: - return self.values + def tz(self): + # GH 18595 + return self._eadata.tz + + @tz.setter + def tz(self, value): + # GH 3746: Prevent localizing or converting the index by setting tz + raise AttributeError("Cannot directly set timezone. Use tz_localize() " + "or tz_convert() as appropriate") + + tzinfo = tz @property def size(self): @@ -418,7 +425,7 @@ def __setstate__(self, state): np.ndarray.__setstate__(data, state) dtarr = DatetimeArray(data) - self._eadata = dtarr + self._data = dtarr self._reset_identity() else: @@ -596,8 +603,6 @@ def _fast_union(self, other): def _wrap_setop_result(self, other, result): name = get_op_result_name(self, other) - if not timezones.tz_compare(self.tz, other.tz): - raise ValueError('Passed item and index have different timezone') return self._shallow_copy(result, name=name, freq=None, tz=self.tz) def intersection(self, other): @@ -1128,22 +1133,6 @@ def slice_indexer(self, start=None, end=None, step=None, kind=None): # -------------------------------------------------------------------- # Wrapping DatetimeArray - @property - def _data(self): - return self._eadata._data - - @property - def tz(self): - # GH#18595 - return self._eadata.tz - - @tz.setter - def tz(self, value): - # GH#3746; DatetimeArray will raise to disallow setting - self._eadata.tz = value - - tzinfo = tz - # Compat for frequency inference, see GH#23789 _is_monotonic_increasing = Index.is_monotonic_increasing _is_monotonic_decreasing = Index.is_monotonic_decreasing @@ -1536,7 +1525,7 @@ def date_range(start=None, end=None, periods=None, freq=None, tz=None, freq=freq, tz=tz, normalize=normalize, closed=closed, **kwargs) return DatetimeIndex._simple_new( - dtarr._data, tz=dtarr.tz, freq=dtarr.freq, name=name) + dtarr, tz=dtarr.tz, freq=dtarr.freq, name=name) def bdate_range(start=None, end=None, periods=None, freq='B', tz=None, diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 4bd8f7407500b..5bc76ed210edb 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -282,10 +282,6 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs): # ------------------------------------------------------------------------ # Data - @property - def _eadata(self): - return self._data - @property def values(self): return np.asarray(self) @@ -877,12 +873,6 @@ def __setstate__(self, state): _unpickle_compat = __setstate__ - def view(self, dtype=None, type=None): - # TODO(DatetimeArray): remove - if dtype is None or dtype is __builtins__['type'](self): - return self - return self._ndarray_values.view(dtype=dtype) - @property def flags(self): """ return the ndarray.flags for the underlying data """ diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 0798dd6eee0c9..3a3b9ed97c8fe 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -209,7 +209,13 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, 'collection of some kind, {data} was passed' .format(cls=cls.__name__, data=repr(data))) - if isinstance(data, TimedeltaIndex) and freq is None and name is None: + if isinstance(data, TimedeltaArray): + if copy: + data = data.copy() + return cls._simple_new(data, name=name, freq=freq) + + if (isinstance(data, TimedeltaIndex) and + freq is None and name is None): if copy: return data.copy() else: @@ -225,17 +231,17 @@ def __new__(cls, data=None, unit=None, freq=None, start=None, end=None, def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE): # `dtype` is passed by _shallow_copy in corner cases, should always # be timedelta64[ns] if present - assert dtype == _TD_DTYPE - - assert isinstance(values, np.ndarray), type(values) - if values.dtype == 'i8': - values = values.view('m8[ns]') + if not isinstance(values, TimedeltaArray): + values = TimedeltaArray._simple_new(values, dtype=dtype, + freq=freq) + assert isinstance(values, TimedeltaArray), type(values) + assert dtype == _TD_DTYPE, dtype assert values.dtype == 'm8[ns]', values.dtype freq = to_offset(freq) tdarr = TimedeltaArray._simple_new(values, freq=freq) result = object.__new__(cls) - result._eadata = tdarr + result._data = tdarr result.name = name # For groupby perf. See note in indexes/base about _index_data result._index_data = tdarr._data @@ -278,10 +284,6 @@ def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs): # ------------------------------------------------------------------- # Wrapping TimedeltaArray - @property - def _data(self): - return self._eadata._data - __mul__ = _make_wrapped_arith_op("__mul__") __rmul__ = _make_wrapped_arith_op("__rmul__") __floordiv__ = _make_wrapped_arith_op("__floordiv__") diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py index 346f56968c963..7845a62bb7edb 100644 --- a/pandas/core/internals/blocks.py +++ b/pandas/core/internals/blocks.py @@ -35,11 +35,11 @@ import pandas.core.algorithms as algos from pandas.core.arrays import ( - Categorical, DatetimeArrayMixin as DatetimeArray, ExtensionArray) + Categorical, DatetimeArrayMixin as DatetimeArray, ExtensionArray, + TimedeltaArrayMixin as TimedeltaArray) from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.indexes.datetimes import DatetimeIndex -from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.indexing import check_setitem_lengths from pandas.core.internals.arrays import extract_array import pandas.core.missing as missing @@ -2169,7 +2169,7 @@ class DatetimeLikeBlockMixin(object): @property def _holder(self): - return DatetimeIndex + return DatetimeArray @property def _na_value(self): @@ -2179,15 +2179,32 @@ def _na_value(self): def fill_value(self): return tslibs.iNaT + def to_dense(self): + # TODO(DatetimeBlock): remove + return np.asarray(self.values) + def get_values(self, dtype=None): """ return object dtype as boxed values, such as Timestamps/Timedelta """ if is_object_dtype(dtype): - return lib.map_infer(self.values.ravel(), - self._box_func).reshape(self.values.shape) + values = self.values + + if self.ndim > 1: + values = values.ravel() + + values = lib.map_infer(values, self._box_func) + + if self.ndim > 1: + values = values.reshape(self.values.shape) + + return values return self.values + @property + def asi8(self): + return self.values.view('i8') + class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): __slots__ = () @@ -2198,13 +2215,15 @@ class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): def __init__(self, values, placement, ndim=None): if values.dtype != _TD_DTYPE: values = conversion.ensure_timedelta64ns(values) - + if isinstance(values, TimedeltaArray): + values = values._data + assert isinstance(values, np.ndarray), type(values) super(TimeDeltaBlock, self).__init__(values, placement=placement, ndim=ndim) @property def _holder(self): - return TimedeltaIndex + return TimedeltaArray @property def _box_func(self): @@ -2299,6 +2318,9 @@ def to_native_types(self, slicer=None, na_rep=None, quoting=None, dtype=object) return rvalues + def external_values(self, dtype=None): + return np.asarray(self.values.astype("timedelta64[ns]", copy=False)) + class BoolBlock(NumericBlock): __slots__ = () @@ -2771,6 +2793,11 @@ def _maybe_coerce_values(self, values): """ if values.dtype != _NS_DTYPE: values = conversion.ensure_datetime64ns(values) + + if isinstance(values, DatetimeArray): + values = values._data + + assert isinstance(values, np.ndarray), type(values) return values def _astype(self, dtype, **kwargs): @@ -2887,12 +2914,15 @@ def set(self, locs, values, check=False): self.values[locs] = values + def external_values(self): + return np.asarray(self.values.astype('datetime64[ns]', copy=False)) + -class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock): +class DatetimeTZBlock(ExtensionBlock, DatetimeBlock): """ implement a datetime64 block with a tz attribute """ __slots__ = () - _concatenator = staticmethod(_concat._concat_datetime) is_datetimetz = True + is_extension = True def __init__(self, values, placement, ndim=2, dtype=None): # XXX: This will end up calling _maybe_coerce_values twice @@ -2907,6 +2937,10 @@ def __init__(self, values, placement, ndim=2, dtype=None): super(DatetimeTZBlock, self).__init__(values, placement=placement, ndim=ndim) + @property + def _holder(self): + return DatetimeArray + def _maybe_coerce_values(self, values, dtype=None): """Input validation for values passed to __init__. Ensure that we have datetime64TZ, coercing if necessary. @@ -2928,7 +2962,7 @@ def _maybe_coerce_values(self, values, dtype=None): if dtype is not None: if isinstance(dtype, compat.string_types): dtype = DatetimeTZDtype.construct_from_string(dtype) - values = values._shallow_copy(tz=dtype.tz) + values = type(values)(values, dtype=dtype) if values.tz is None: raise ValueError("cannot create a DatetimeTZBlock without a tz") @@ -2939,7 +2973,7 @@ def _maybe_coerce_values(self, values, dtype=None): def is_view(self): """ return a boolean if I am possibly a view """ # check the ndarray values of the DatetimeIndex values - return self.values.values.base is not None + return self.values._data.base is not None def copy(self, deep=True): """ copy constructor """ @@ -2948,18 +2982,39 @@ def copy(self, deep=True): values = values.copy(deep=True) return self.make_block_same_class(values) - def external_values(self): - """ we internally represent the data as a DatetimeIndex, but for - external compat with ndarray, export as a ndarray of Timestamps + def get_values(self, dtype=None): """ - return self.values.astype('datetime64[ns]').values + Returns an ndarray of values. - def get_values(self, dtype=None): - # return object dtype as Timestamps with the zones + Parameters + ---------- + dtype : np.dtype + Only `object`-like dtypes are respected here (not sure + why). + + Returns + ------- + values : ndarray + When ``dtype=object``, then and object-dtype ndarray of + boxed values is returned. Otherwise, an M8[ns] ndarray + is returned. + + DatetimeArray is always 1-d. ``get_values`` will reshape + the return value to be the same dimensionality as the + block. + """ + values = self.values if is_object_dtype(dtype): - return lib.map_infer( - self.values.ravel(), self._box_func).reshape(self.values.shape) - return self.values + values = values._box_values(values._data) + + values = np.asarray(values) + + if self.ndim == 2: + # Ensure that our shape is correct for DataFrame. + # ExtensionArrays are always 1-D, even in a DataFrame when + # the analogous NumPy-backed column would be a 2-D ndarray. + values = values.reshape(1, -1) + return values def _slice(self, slicer): """ return a slice of my values """ @@ -2984,13 +3039,17 @@ def _try_coerce_args(self, values, other): base-type values, base-type other """ # asi8 is a view, needs copy - values = _block_shape(values.asi8, ndim=self.ndim) + values = _block_shape(values.view("i8"), ndim=self.ndim) if isinstance(other, ABCSeries): other = self._holder(other) if isinstance(other, bool): raise TypeError + elif is_datetime64_dtype(other): + # add the tz back + other = self._holder(other, dtype=self.dtype) + elif (is_null_datelike_scalar(other) or (lib.is_scalar(other) and isna(other))): other = tslibs.iNaT @@ -3021,11 +3080,12 @@ def _try_coerce_result(self, result): result = tslibs.Timestamp(result, tz=self.values.tz) if isinstance(result, np.ndarray): # allow passing of > 1dim if its trivial + if result.ndim > 1: result = result.reshape(np.prod(result.shape)) - # GH#24096 new values invalidates a frequency - result = self.values._shallow_copy(result, freq=None) + result = self._holder._simple_new(result, freq=None, + tz=self.values.tz) return result @@ -3033,32 +3093,6 @@ def _try_coerce_result(self, result): def _box_func(self): return lambda x: tslibs.Timestamp(x, tz=self.dtype.tz) - def shift(self, periods, axis=0, fill_value=None): - """ shift the block by periods """ - - # think about moving this to the DatetimeIndex. This is a non-freq - # (number of periods) shift ### - - N = len(self) - indexer = np.zeros(N, dtype=int) - if periods > 0: - indexer[periods:] = np.arange(N - periods) - else: - indexer[:periods] = np.arange(-periods, N) - - new_values = self.values.asi8.take(indexer) - - if isna(fill_value): - fill_value = tslibs.iNaT - if periods > 0: - new_values[:periods] = fill_value - else: - new_values[periods:] = fill_value - - new_values = self.values._shallow_copy(new_values) - return [self.make_block_same_class(new_values, - placement=self.mgr_locs)] - def diff(self, n, axis=0): """1st discrete difference @@ -3088,14 +3122,44 @@ def diff(self, n, axis=0): return [TimeDeltaBlock(new_values, placement=self.mgr_locs.indexer)] def concat_same_type(self, to_concat, placement=None): - """ - Concatenate list of single blocks of the same type. - """ - values = self._concatenator([blk.values for blk in to_concat], - axis=self.ndim - 1) - # not using self.make_block_same_class as values can be non-tz dtype - return make_block( - values, placement=placement or slice(0, len(values), 1)) + # need to handle concat([tz1, tz2]) here, since DatetimeArray + # only handles cases where all the tzs are the same. + # Instead of placing the condition here, it could also go into the + # is_uniform_join_units check, but I'm not sure what is better. + if len({x.dtype for x in to_concat}) > 1: + values = _concat._concat_datetime([x.values for x in to_concat]) + placement = placement or slice(0, len(values), 1) + + if self.ndim > 1: + values = np.atleast_2d(values) + return ObjectBlock(values, ndim=self.ndim, placement=placement) + return super(DatetimeTZBlock, self).concat_same_type(to_concat, + placement) + + def fillna(self, value, limit=None, inplace=False, downcast=None): + # We support filling a DatetimeTZ with a `value` whose timezone + # is different by coercing to object. + try: + return super(DatetimeTZBlock, self).fillna( + value, limit, inplace, downcast + ) + except (ValueError, TypeError): + # different timezones, or a non-tz + return self.astype(object).fillna( + value, limit=limit, inplace=inplace, downcast=downcast + ) + + def setitem(self, indexer, value): + # https://github.com/pandas-dev/pandas/issues/24020 + # Need a dedicated setitem until #24020 (type promotion in setitem + # for extension arrays) is designed and implemented. + try: + return super(DatetimeTZBlock, self).setitem(indexer, value) + except (ValueError, TypeError): + newb = make_block(self.values.astype(object), + placement=self.mgr_locs, + klass=ObjectBlock,) + return newb.setitem(indexer, value) # ----------------------------------------------------------------- diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py index 2441c64518d59..067b95f9d8847 100644 --- a/pandas/core/internals/concat.py +++ b/pandas/core/internals/concat.py @@ -183,8 +183,8 @@ def get_reindexed_values(self, empty_dtype, upcasted_na): is_datetime64tz_dtype(empty_dtype)): if self.block is None: array = empty_dtype.construct_array_type() - missing_arr = array([fill_value], dtype=empty_dtype) - return missing_arr.repeat(self.shape[1]) + return array(np.full(self.shape[1], fill_value), + dtype=empty_dtype) pass elif getattr(self.block, 'is_categorical', False): pass diff --git a/pandas/core/internals/construction.py b/pandas/core/internals/construction.py index b18b966406bbb..b3c893c7d84be 100644 --- a/pandas/core/internals/construction.py +++ b/pandas/core/internals/construction.py @@ -589,7 +589,7 @@ def sanitize_array(data, index, dtype=None, copy=False, # everything else in this block must also handle ndarray's, # becuase we've unwrapped PandasArray into an ndarray. - if dtype is not None and not data.dtype.is_dtype(dtype): + if dtype is not None: subarr = data.astype(dtype) if copy: diff --git a/pandas/core/internals/managers.py b/pandas/core/internals/managers.py index f1372a1fe2f51..d50f9c3e65ebd 100644 --- a/pandas/core/internals/managers.py +++ b/pandas/core/internals/managers.py @@ -750,8 +750,13 @@ def as_array(self, transpose=False, items=None): else: mgr = self - if self._is_single_block or not self.is_mixed_type: - arr = mgr.blocks[0].get_values() + if self._is_single_block and mgr.blocks[0].is_datetimetz: + # TODO(Block.get_values): Make DatetimeTZBlock.get_values + # always be object dtype. Some callers seem to want the + # DatetimeArray (previously DTI) + arr = mgr.blocks[0].get_values(dtype=object) + elif self._is_single_block or not self.is_mixed_type: + arr = np.asarray(mgr.blocks[0].get_values()) else: arr = mgr._interleave() diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index a894b8788f8d8..cec594a13b3d3 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2713,10 +2713,11 @@ def write_array(self, key, value, items=None): raise NotImplementedError('Cannot store a category dtype in ' 'a HDF5 dataset that uses format=' '"fixed". Use format="table".') - if not empty_array: - value = value.T - transposed = True + if hasattr(value, 'T'): + # ExtensionArrays (1d) may not have transpose. + value = value.T + transposed = True if self._filters is not None: atom = None diff --git a/pandas/tests/arithmetic/test_timedelta64.py b/pandas/tests/arithmetic/test_timedelta64.py index dea4940eb3180..12ed174d6cc53 100644 --- a/pandas/tests/arithmetic/test_timedelta64.py +++ b/pandas/tests/arithmetic/test_timedelta64.py @@ -1476,7 +1476,7 @@ def test_tdi_rmul_arraylike(self, other, box_with_array): tdi = TimedeltaIndex(['1 Day'] * 10) expected = timedelta_range('1 days', '10 days') - expected._eadata._freq = None + expected._eadata.freq = None tdi = tm.box_expected(tdi, box) expected = tm.box_expected(expected, xbox) diff --git a/pandas/tests/arrays/test_datetimes.py b/pandas/tests/arrays/test_datetimes.py index 48b64c2968219..1375969c961fd 100644 --- a/pandas/tests/arrays/test_datetimes.py +++ b/pandas/tests/arrays/test_datetimes.py @@ -28,13 +28,26 @@ def test_mismatched_timezone_raises(self): arr = DatetimeArray(np.array(['2000-01-01T06:00:00'], dtype='M8[ns]'), dtype=DatetimeTZDtype(tz='US/Central')) dtype = DatetimeTZDtype(tz='US/Eastern') - with pytest.raises(TypeError, match='data is already tz-aware'): + with pytest.raises(TypeError, match='Timezone of the array'): DatetimeArray(arr, dtype=dtype) + def test_non_array_raises(self): + with pytest.raises(ValueError, match='list'): + DatetimeArray([1, 2, 3]) + + def test_other_type_raises(self): + with pytest.raises(ValueError, + match="The dtype of 'values' is incorrect.*bool"): + DatetimeArray(np.array([1, 2, 3], dtype='bool')) + def test_incorrect_dtype_raises(self): with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): DatetimeArray(np.array([1, 2, 3], dtype='i8'), dtype='category') + def test_freq_infer_raises(self): + with pytest.raises(ValueError, match='Frequency inference'): + DatetimeArray(np.array([1, 2, 3], dtype='i8'), freq="infer") + def test_copy(self): data = np.array([1, 2, 3], dtype='M8[ns]') arr = DatetimeArray(data, copy=False) @@ -128,7 +141,7 @@ def test_repeat_preserves_tz(self): repeated = arr.repeat([1, 1]) # preserves tz and values, but not freq - expected = DatetimeArray(arr.asi8, freq=None, tz=arr.tz) + expected = DatetimeArray(arr.asi8, freq=None, dtype=arr.dtype) tm.assert_equal(repeated, expected) def test_value_counts_preserves_tz(self): @@ -153,8 +166,10 @@ def test_fillna_preserves_tz(self, method): arr[2] = pd.NaT fill_val = dti[1] if method == 'pad' else dti[3] - expected = DatetimeArray([dti[0], dti[1], fill_val, dti[3], dti[4]], - freq=None, tz='US/Central') + expected = DatetimeArray._from_sequence( + [dti[0], dti[1], fill_val, dti[3], dti[4]], + freq=None, tz='US/Central' + ) result = arr.fillna(method=method) tm.assert_extension_array_equal(result, expected) diff --git a/pandas/tests/arrays/test_timedeltas.py b/pandas/tests/arrays/test_timedeltas.py index 1221d920f2e91..08ef27297cca5 100644 --- a/pandas/tests/arrays/test_timedeltas.py +++ b/pandas/tests/arrays/test_timedeltas.py @@ -9,6 +9,10 @@ class TestTimedeltaArrayConstructor(object): + def test_non_array_raises(self): + with pytest.raises(ValueError, match='list'): + TimedeltaArray([1, 2, 3]) + def test_other_type_raises(self): with pytest.raises(TypeError, match="dtype bool cannot be converted"): @@ -17,13 +21,15 @@ def test_other_type_raises(self): def test_incorrect_dtype_raises(self): # TODO: why TypeError for 'category' but ValueError for i8? with pytest.raises(TypeError, - match='data type "category" not understood'): + match=r'category cannot be converted ' + r'to timedelta64\[ns\]'): TimedeltaArray(np.array([1, 2, 3], dtype='i8'), dtype='category') - with pytest.raises(ValueError, - match=r"Only timedelta64\[ns\] dtype is valid"): + with pytest.raises(TypeError, + match=r"dtype int64 cannot be converted " + r"to timedelta64\[ns\]"): TimedeltaArray(np.array([1, 2, 3], dtype='i8'), - dtype=np.dtype(int)) + dtype=np.dtype("int64")) def test_copy(self): data = np.array([1, 2, 3], dtype='m8[ns]') @@ -40,8 +46,6 @@ def test_from_sequence_dtype(self): msg = r"Only timedelta64\[ns\] dtype is valid" with pytest.raises(ValueError, match=msg): TimedeltaArray._from_sequence([], dtype=object) - with pytest.raises(ValueError, match=msg): - TimedeltaArray([], dtype=object) def test_abs(self): vals = np.array([-3600 * 10**9, 'NaT', 7200 * 10**9], dtype='m8[ns]') diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 77dc04e9453a9..aa29473ddf130 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -9,7 +9,7 @@ from pandas.core.dtypes.dtypes import ( DatetimeTZDtype, PeriodDtype, - IntervalDtype, CategoricalDtype, registry, _pandas_registry) + IntervalDtype, CategoricalDtype, registry) from pandas.core.dtypes.common import ( is_categorical_dtype, is_categorical, is_datetime64tz_dtype, is_datetimetz, @@ -798,20 +798,13 @@ def test_update_dtype_errors(self, bad_dtype): @pytest.mark.parametrize('dtype', [ CategoricalDtype, IntervalDtype, + DatetimeTZDtype, PeriodDtype, ]) def test_registry(dtype): assert dtype in registry.dtypes -@pytest.mark.parametrize('dtype', [ - DatetimeTZDtype, -]) -def test_pandas_registry(dtype): - assert dtype not in registry.dtypes - assert dtype in _pandas_registry.dtypes - - @pytest.mark.parametrize('dtype, expected', [ ('int64', None), ('interval', IntervalDtype()), @@ -819,18 +812,12 @@ def test_pandas_registry(dtype): ('interval[datetime64[ns]]', IntervalDtype('datetime64[ns]')), ('period[D]', PeriodDtype('D')), ('category', CategoricalDtype()), + ('datetime64[ns, US/Eastern]', DatetimeTZDtype('ns', 'US/Eastern')), ]) def test_registry_find(dtype, expected): assert registry.find(dtype) == expected -@pytest.mark.parametrize('dtype, expected', [ - ('datetime64[ns, US/Eastern]', DatetimeTZDtype('ns', 'US/Eastern')), -]) -def test_pandas_registry_find(dtype, expected): - assert _pandas_registry.find(dtype) == expected - - @pytest.mark.parametrize('dtype, expected', [ (str, False), (int, False), diff --git a/pandas/tests/extension/arrow/bool.py b/pandas/tests/extension/arrow/bool.py index f8e357e162232..025c4cacd8fa1 100644 --- a/pandas/tests/extension/arrow/bool.py +++ b/pandas/tests/extension/arrow/bool.py @@ -76,6 +76,14 @@ def __getitem__(self, item): def __len__(self): return len(self._data) + def astype(self, dtype, copy=True): + # needed to fix this astype for the Series constructor. + if isinstance(dtype, type(self.dtype)) and dtype == self.dtype: + if copy: + return self.copy() + return self + return super(ArrowBoolArray, self).astype(dtype, copy) + @property def dtype(self): return self._dtype @@ -102,9 +110,9 @@ def take(self, indices, allow_fill=False, fill_value=None): def copy(self, deep=False): if deep: - return copy.deepcopy(self._data) + return type(self)(copy.deepcopy(self._data)) else: - return copy.copy(self._data) + return type(self)(copy.copy(self._data)) def _concat_same_type(cls, to_concat): chunks = list(itertools.chain.from_iterable(x._data.chunks diff --git a/pandas/tests/extension/json/array.py b/pandas/tests/extension/json/array.py index d58b7ddf29123..bd50584406312 100644 --- a/pandas/tests/extension/json/array.py +++ b/pandas/tests/extension/json/array.py @@ -157,6 +157,12 @@ def astype(self, dtype, copy=True): # NumPy has issues when all the dicts are the same length. # np.array([UserDict(...), UserDict(...)]) fails, # but np.array([{...}, {...}]) works, so cast. + + # needed to add this check for the Series constructor + if isinstance(dtype, type(self.dtype)) and dtype == self.dtype: + if copy: + return self.copy() + return self return np.array([dict(x) for x in self], dtype=dtype, copy=copy) def unique(self): diff --git a/pandas/tests/extension/test_common.py b/pandas/tests/extension/test_common.py index 2bc4bf5df2298..db3f3b80bca6b 100644 --- a/pandas/tests/extension/test_common.py +++ b/pandas/tests/extension/test_common.py @@ -77,14 +77,6 @@ def test_astype_no_copy(): assert arr is not result -@pytest.mark.parametrize('dtype', [ - dtypes.DatetimeTZDtype('ns', 'US/Central'), -]) -def test_is_not_extension_array_dtype(dtype): - assert not isinstance(dtype, dtypes.ExtensionDtype) - assert not is_extension_array_dtype(dtype) - - @pytest.mark.parametrize('dtype', [ dtypes.CategoricalDtype(), dtypes.IntervalDtype(), diff --git a/pandas/tests/extension/test_datetime.py b/pandas/tests/extension/test_datetime.py new file mode 100644 index 0000000000000..7c4491d6edbcf --- /dev/null +++ b/pandas/tests/extension/test_datetime.py @@ -0,0 +1,237 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +from pandas.core.arrays import DatetimeArrayMixin as DatetimeArray +from pandas.tests.extension import base + + +@pytest.fixture(params=["US/Central"]) +def dtype(request): + return DatetimeTZDtype(unit="ns", tz=request.param) + + +@pytest.fixture +def data(dtype): + data = DatetimeArray(pd.date_range("2000", periods=100, tz=dtype.tz), + dtype=dtype) + return data + + +@pytest.fixture +def data_missing(dtype): + return DatetimeArray( + np.array(['NaT', '2000-01-01'], dtype='datetime64[ns]'), + dtype=dtype + ) + + +@pytest.fixture +def data_for_sorting(dtype): + a = pd.Timestamp('2000-01-01') + b = pd.Timestamp('2000-01-02') + c = pd.Timestamp('2000-01-03') + return DatetimeArray(np.array([b, c, a], dtype='datetime64[ns]'), + dtype=dtype) + + +@pytest.fixture +def data_missing_for_sorting(dtype): + a = pd.Timestamp('2000-01-01') + b = pd.Timestamp('2000-01-02') + return DatetimeArray(np.array([b, 'NaT', a], dtype='datetime64[ns]'), + dtype=dtype) + + +@pytest.fixture +def data_for_grouping(dtype): + """ + Expected to be like [B, B, NA, NA, A, A, B, C] + + Where A < B < C and NA is missing + """ + a = pd.Timestamp('2000-01-01') + b = pd.Timestamp('2000-01-02') + c = pd.Timestamp('2000-01-03') + na = 'NaT' + return DatetimeArray(np.array([b, b, na, na, a, a, b, c], + dtype='datetime64[ns]'), + dtype=dtype) + + +@pytest.fixture +def na_cmp(): + def cmp(a, b): + return a is pd.NaT and a is b + return cmp + + +@pytest.fixture +def na_value(): + return pd.NaT + + +# ---------------------------------------------------------------------------- +class BaseDatetimeTests(object): + pass + + +# ---------------------------------------------------------------------------- +# Tests +class TestDatetimeDtype(BaseDatetimeTests, base.BaseDtypeTests): + pass + + +class TestConstructors(BaseDatetimeTests, base.BaseConstructorsTests): + pass + + +class TestGetitem(BaseDatetimeTests, base.BaseGetitemTests): + pass + + +class TestMethods(BaseDatetimeTests, base.BaseMethodsTests): + @pytest.mark.skip(reason="Incorrect expected") + def test_value_counts(self, all_data, dropna): + pass + + def test_combine_add(self, data_repeated): + # Timestamp.__add__(Timestamp) not defined + pass + + +class TestInterface(BaseDatetimeTests, base.BaseInterfaceTests): + + def test_array_interface(self, data): + if data.tz: + # np.asarray(DTA) is currently always tz-naive. + pytest.skip("GH-23569") + else: + super(TestInterface, self).test_array_interface(data) + + +class TestArithmeticOps(BaseDatetimeTests, base.BaseArithmeticOpsTests): + implements = {'__sub__', '__rsub__'} + + def test_arith_series_with_scalar(self, data, all_arithmetic_operators): + if all_arithmetic_operators in self.implements: + s = pd.Series(data) + self.check_opname(s, all_arithmetic_operators, s.iloc[0], + exc=None) + else: + # ... but not the rest. + super(TestArithmeticOps, self).test_arith_series_with_scalar( + data, all_arithmetic_operators + ) + + def test_add_series_with_extension_array(self, data): + # Datetime + Datetime not implemented + s = pd.Series(data) + msg = 'cannot add DatetimeArray(Mixin)? and DatetimeArray(Mixin)?' + with pytest.raises(TypeError, match=msg): + s + data + + def test_arith_series_with_array(self, data, all_arithmetic_operators): + if all_arithmetic_operators in self.implements: + s = pd.Series(data) + self.check_opname(s, all_arithmetic_operators, s.iloc[0], + exc=None) + else: + # ... but not the rest. + super(TestArithmeticOps, self).test_arith_series_with_scalar( + data, all_arithmetic_operators + ) + + def test_error(self, data, all_arithmetic_operators): + pass + + @pytest.mark.xfail(reason="different implementation", strict=False) + def test_direct_arith_with_series_returns_not_implemented(self, data): + # Right now, we have trouble with this. Returning NotImplemented + # fails other tests like + # tests/arithmetic/test_datetime64::TestTimestampSeriesArithmetic:: + # test_dt64_seris_add_intlike + return super( + TestArithmeticOps, + self + ).test_direct_arith_with_series_returns_not_implemented(data) + + +class TestCasting(BaseDatetimeTests, base.BaseCastingTests): + pass + + +class TestComparisonOps(BaseDatetimeTests, base.BaseComparisonOpsTests): + + def _compare_other(self, s, data, op_name, other): + # the base test is not appropriate for us. We raise on comparison + # with (some) integers, depending on the value. + pass + + @pytest.mark.xfail(reason="different implementation", strict=False) + def test_direct_arith_with_series_returns_not_implemented(self, data): + return super( + TestComparisonOps, + self + ).test_direct_arith_with_series_returns_not_implemented(data) + + +class TestMissing(BaseDatetimeTests, base.BaseMissingTests): + pass + + +class TestReshaping(BaseDatetimeTests, base.BaseReshapingTests): + + @pytest.mark.skip(reason="We have DatetimeTZBlock") + def test_concat(self, data, in_frame): + pass + + def test_concat_mixed_dtypes(self, data): + # concat(Series[datetimetz], Series[category]) uses a + # plain np.array(values) on the DatetimeArray, which + # drops the tz. + super(TestReshaping, self).test_concat_mixed_dtypes(data) + + @pytest.mark.parametrize("obj", ["series", "frame"]) + def test_unstack(self, obj): + # GH-13287: can't use base test, since building the expected fails. + data = DatetimeArray._from_sequence(['2000', '2001', '2002', '2003'], + tz='US/Central') + index = pd.MultiIndex.from_product(([['A', 'B'], ['a', 'b']]), + names=['a', 'b']) + + if obj == "series": + ser = pd.Series(data, index=index) + expected = pd.DataFrame({ + "A": data.take([0, 1]), + "B": data.take([2, 3]) + }, index=pd.Index(['a', 'b'], name='b')) + expected.columns.name = 'a' + + else: + ser = pd.DataFrame({"A": data, "B": data}, index=index) + expected = pd.DataFrame( + {("A", "A"): data.take([0, 1]), + ("A", "B"): data.take([2, 3]), + ("B", "A"): data.take([0, 1]), + ("B", "B"): data.take([2, 3])}, + index=pd.Index(['a', 'b'], name='b') + ) + expected.columns.names = [None, 'a'] + + result = ser.unstack(0) + self.assert_equal(result, expected) + + +class TestSetitem(BaseDatetimeTests, base.BaseSetitemTests): + pass + + +class TestGroupby(BaseDatetimeTests, base.BaseGroupbyTests): + pass + + +class TestPrinting(BaseDatetimeTests, base.BasePrintingTests): + pass diff --git a/pandas/tests/frame/test_indexing.py b/pandas/tests/frame/test_indexing.py index 6e006c1707604..418046e42d581 100644 --- a/pandas/tests/frame/test_indexing.py +++ b/pandas/tests/frame/test_indexing.py @@ -3244,10 +3244,10 @@ def test_setitem(self): # are copies) b1 = df._data.blocks[1] b2 = df._data.blocks[2] - assert b1.values.equals(b2.values) - if b1.values.values.base is not None: + tm.assert_extension_array_equal(b1.values, b2.values) + if b1.values._data.base is not None: # base being None suffices to assure a copy was made - assert id(b1.values.values.base) != id(b2.values.values.base) + assert id(b1.values._data.base) != id(b2.values._data.base) # with nan df2 = df.copy() diff --git a/pandas/tests/frame/test_timezones.py b/pandas/tests/frame/test_timezones.py index cd93f3a1148dd..f124a4c3f3570 100644 --- a/pandas/tests/frame/test_timezones.py +++ b/pandas/tests/frame/test_timezones.py @@ -12,12 +12,47 @@ from pandas.core.dtypes.dtypes import DatetimeTZDtype +import pandas as pd from pandas import DataFrame, Series from pandas.core.indexes.datetimes import date_range import pandas.util.testing as tm class TestDataFrameTimezones(object): + + def test_frame_values_with_tz(self): + tz = "US/Central" + df = DataFrame({"A": date_range('2000', periods=4, tz=tz)}) + result = df.values + expected = np.array([ + [pd.Timestamp('2000-01-01', tz=tz)], + [pd.Timestamp('2000-01-02', tz=tz)], + [pd.Timestamp('2000-01-03', tz=tz)], + [pd.Timestamp('2000-01-04', tz=tz)], + ]) + tm.assert_numpy_array_equal(result, expected) + + # two columns, homogenous + + df = df.assign(B=df.A) + result = df.values + expected = np.concatenate([expected, expected], axis=1) + tm.assert_numpy_array_equal(result, expected) + + # three columns, heterogenous + est = "US/Eastern" + df = df.assign(C=df.A.dt.tz_convert(est)) + + new = np.array([ + [pd.Timestamp('2000-01-01T01:00:00', tz=est)], + [pd.Timestamp('2000-01-02T01:00:00', tz=est)], + [pd.Timestamp('2000-01-03T01:00:00', tz=est)], + [pd.Timestamp('2000-01-04T01:00:00', tz=est)], + ]) + expected = np.concatenate([expected, new], axis=1) + result = df.values + tm.assert_numpy_array_equal(result, expected) + def test_frame_from_records_utc(self): rec = {'datum': 1.5, 'begin_time': datetime(2006, 4, 27, tzinfo=pytz.utc)} diff --git a/pandas/tests/indexes/timedeltas/test_construction.py b/pandas/tests/indexes/timedeltas/test_construction.py index a4e925f6611f9..b9bbfaff06215 100644 --- a/pandas/tests/indexes/timedeltas/test_construction.py +++ b/pandas/tests/indexes/timedeltas/test_construction.py @@ -26,7 +26,7 @@ def test_int64_nocopy(self): # and copy=False arr = np.arange(10, dtype=np.int64) tdi = TimedeltaIndex(arr, copy=False) - assert tdi._data.base is arr + assert tdi._data._data.base is arr def test_infer_from_tdi(self): # GH#23539 diff --git a/pandas/tests/internals/test_internals.py b/pandas/tests/internals/test_internals.py index 26cd39c4b807c..b9196971d2e53 100644 --- a/pandas/tests/internals/test_internals.py +++ b/pandas/tests/internals/test_internals.py @@ -11,8 +11,13 @@ from distutils.version import LooseVersion import itertools from pandas import (Index, MultiIndex, DataFrame, DatetimeIndex, - Series, Categorical, TimedeltaIndex, SparseArray) + Series, Categorical, SparseArray) + from pandas.compat import OrderedDict, lrange +from pandas.core.arrays import ( + DatetimeArrayMixin as DatetimeArray, + TimedeltaArrayMixin as TimedeltaArray, +) from pandas.core.internals import (SingleBlockManager, make_block, BlockManager) import pandas.core.algorithms as algos @@ -290,7 +295,7 @@ def test_make_block_same_class(self): block = create_block('M8[ns, US/Eastern]', [3]) with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False): - block.make_block_same_class(block.values.values, + block.make_block_same_class(block.values, dtype=block.values.dtype) @@ -451,7 +456,7 @@ def test_copy(self, mgr): assert cp_blk.values.base is blk.values.base else: # DatetimeTZBlock has DatetimeIndex values - assert cp_blk.values.values.base is blk.values.values.base + assert cp_blk.values._data.base is blk.values._data.base cp = mgr.copy(deep=True) for blk, cp_blk in zip(mgr.blocks, cp.blocks): @@ -460,7 +465,7 @@ def test_copy(self, mgr): # some blocks it is an array (e.g. datetimetz), but was copied assert cp_blk.equals(blk) if not isinstance(cp_blk.values, np.ndarray): - assert cp_blk.values.values.base is not blk.values.values.base + assert cp_blk.values._data.base is not blk.values._data.base else: assert cp_blk.values.base is None and blk.values.base is None @@ -1258,9 +1263,9 @@ def test_binop_other(self, op, value, dtype): @pytest.mark.parametrize('typestr, holder', [ ('category', Categorical), - ('M8[ns]', DatetimeIndex), - ('M8[ns, US/Central]', DatetimeIndex), - ('m8[ns]', TimedeltaIndex), + ('M8[ns]', DatetimeArray), + ('M8[ns, US/Central]', DatetimeArray), + ('m8[ns]', TimedeltaArray), ('sparse', SparseArray), ]) def test_holder(typestr, holder): diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 9034b964033ed..d985ca4eb67ea 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -8,6 +8,7 @@ import pytest from pandas.compat import PY3 +import pandas.util._test_decorators as td import pandas as pd from pandas.util import testing as tm @@ -469,6 +470,7 @@ def test_partition_cols_supported(self, pa, df_full): class TestParquetFastParquet(Base): + @td.skip_if_no('fastparquet', min_version="0.2.1") def test_basic(self, fp, df_full): df = df_full diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 051462c5e9fc6..3997aade16892 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1015,13 +1015,17 @@ def test_append_empty_frame_to_series_with_dateutil_tz(self): s = Series({'date': date, 'a': 1.0, 'b': 2.0}) df = DataFrame(columns=['c', 'd']) result = df.append(s, ignore_index=True) + # n.b. it's not clear to me that expected is correct here. + # It's possible that the `date` column should have + # datetime64[ns, tz] dtype for both result and expected. + # that would be more consistent with new columns having + # their own dtype (float for a and b, datetime64ns, tz for date). expected = DataFrame([[np.nan, np.nan, 1., 2., date]], - columns=['c', 'd', 'a', 'b', 'date']) + columns=['c', 'd', 'a', 'b', 'date'], + dtype=object) # These columns get cast to object after append - object_cols = ['c', 'd', 'date'] - expected.loc[:, object_cols] = expected.loc[:, object_cols].astype( - object - ) + expected['a'] = expected['a'].astype(float) + expected['b'] = expected['b'].astype(float) assert_frame_equal(result, expected) diff --git a/pandas/tests/series/test_block_internals.py b/pandas/tests/series/test_block_internals.py index ccfb169cc2f8d..e74b32181ce0f 100644 --- a/pandas/tests/series/test_block_internals.py +++ b/pandas/tests/series/test_block_internals.py @@ -16,14 +16,14 @@ def test_setitem_invalidates_datetime_index_freq(self): ts = dti[1] ser = pd.Series(dti) assert ser._values is not dti - assert ser._values._data.base is not dti._data.base + assert ser._values._data.base is not dti._data._data.base assert dti.freq == 'D' ser.iloc[1] = pd.NaT assert ser._values.freq is None # check that the DatetimeIndex was not altered in place assert ser._values is not dti - assert ser._values._data.base is not dti._data.base + assert ser._values._data.base is not dti._data._data.base assert dti[1] == ts assert dti.freq == 'D' @@ -33,9 +33,10 @@ def test_dt64tz_setitem_does_not_mutate_dti(self): ts = dti[0] ser = pd.Series(dti) assert ser._values is not dti - assert ser._values._data.base is not dti._data.base + assert ser._values._data.base is not dti._data._data.base assert ser._data.blocks[0].values is not dti - assert ser._data.blocks[0].values._data.base is not dti._data.base + assert (ser._data.blocks[0].values._data.base + is not dti._data._data.base) ser[::3] = pd.NaT assert ser[0] is pd.NaT diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py index 85650a9b0df0d..dbdbb0bc238a9 100644 --- a/pandas/tests/test_base.py +++ b/pandas/tests/test_base.py @@ -16,6 +16,7 @@ from pandas.core.dtypes.common import ( is_datetime64_dtype, is_datetime64tz_dtype, is_object_dtype, is_timedelta64_dtype, needs_i8_conversion) +from pandas.core.dtypes.dtypes import DatetimeTZDtype import pandas as pd from pandas import ( @@ -365,7 +366,9 @@ def test_value_counts_unique_nunique(self): else: expected_index = Index(values[::-1]) idx = o.index.repeat(range(1, len(o) + 1)) - rep = np.repeat(values, range(1, len(o) + 1)) + # take-based repeat + indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1)) + rep = values.take(indices) o = klass(rep, index=idx, name='a') # check values has the same dtype as the original @@ -1154,14 +1157,32 @@ def test_iter_box(self): (np.array([0, 1], dtype=np.int64), np.ndarray, 'int64'), (np.array(['a', 'b']), np.ndarray, 'object'), (pd.Categorical(['a', 'b']), pd.Categorical, 'category'), - (pd.DatetimeIndex(['2017', '2018']), np.ndarray, 'datetime64[ns]'), (pd.DatetimeIndex(['2017', '2018'], tz="US/Central"), DatetimeArray, 'datetime64[ns, US/Central]'), - (pd.TimedeltaIndex([10**10]), np.ndarray, 'm8[ns]'), + (pd.PeriodIndex([2018, 2019], freq='A'), pd.core.arrays.PeriodArray, pd.core.dtypes.dtypes.PeriodDtype("A-DEC")), (pd.IntervalIndex.from_breaks([0, 1, 2]), pd.core.arrays.IntervalArray, 'interval'), + + # This test is currently failing for datetime64[ns] and timedelta64[ns]. + # The NumPy type system is sufficient for representing these types, so + # we just use NumPy for Series / DataFrame columns of these types (so + # we get consolidation and so on). + # However, DatetimeIndex and TimedeltaIndex use the DateLikeArray + # abstraction to for code reuse. + # At the moment, we've judged that allowing this test to fail is more + # practical that overriding Series._values to special case + # Series[M8[ns]] and Series[m8[ns]] to return a DateLikeArray. + pytest.param( + pd.DatetimeIndex(['2017', '2018']), np.ndarray, 'datetime64[ns]', + marks=[pytest.mark.xfail(reason="datetime _values", strict=True)] + ), + pytest.param( + pd.TimedeltaIndex([10**10]), np.ndarray, 'm8[ns]', + marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)] + ), + ]) def test_values_consistent(array, expected_type, dtype): l_values = pd.Series(array)._values @@ -1218,7 +1239,13 @@ def test_numpy_array_all_dtypes(any_numpy_dtype): (pd.core.arrays.integer_array([0, np.nan]), '_data'), (pd.core.arrays.IntervalArray.from_breaks([0, 1]), '_left'), (pd.SparseArray([0, 1]), '_sparse_values'), - # TODO: DatetimeArray(add) + (DatetimeArray(np.array([1, 2], dtype="datetime64[ns]")), "_data"), + # tz-aware Datetime + (DatetimeArray(np.array(['2000-01-01T12:00:00', + '2000-01-02T12:00:00'], + dtype='M8[ns]'), + dtype=DatetimeTZDtype(tz="US/Central")), + '_data'), ]) @pytest.mark.parametrize('box', [pd.Series, pd.Index]) def test_array(array, attr, box): @@ -1249,7 +1276,22 @@ def test_array_multiindex_raises(): (pd.core.arrays.IntervalArray.from_breaks([0, 1, 2]), np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object)), (pd.SparseArray([0, 1]), np.array([0, 1], dtype=np.int64)), - # TODO: DatetimeArray(add) + + # tz-naive datetime + (DatetimeArray(np.array(['2000', '2001'], dtype='M8[ns]')), + np.array(['2000', '2001'], dtype='M8[ns]')), + + # tz-aware stays tz`-aware + (DatetimeArray(np.array(['2000-01-01T06:00:00', + '2000-01-02T06:00:00'], + dtype='M8[ns]'), + dtype=DatetimeTZDtype(tz='US/Central')), + np.array([pd.Timestamp('2000-01-01', tz='US/Central'), + pd.Timestamp('2000-01-02', tz='US/Central')])), + + # Timedelta + (TimedeltaArray(np.array([0, 3600000000000], dtype='i8'), freq='H'), + np.array([0, 3600000000000], dtype='m8[ns]')), ]) @pytest.mark.parametrize('box', [pd.Series, pd.Index]) def test_to_numpy(array, expected, box): @@ -1290,13 +1332,18 @@ def test_to_numpy_dtype(as_series): obj = pd.DatetimeIndex(['2000', '2001'], tz=tz) if as_series: obj = pd.Series(obj) - result = obj.to_numpy(dtype=object) + + # preserve tz by default + result = obj.to_numpy() expected = np.array([pd.Timestamp('2000', tz=tz), pd.Timestamp('2001', tz=tz)], dtype=object) tm.assert_numpy_array_equal(result, expected) - result = obj.to_numpy() + result = obj.to_numpy(dtype="object") + tm.assert_numpy_array_equal(result, expected) + + result = obj.to_numpy(dtype="M8[ns]") expected = np.array(['2000-01-01T05', '2001-01-01T05'], dtype='M8[ns]') tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 5539778e1d187..ba0ad72e624f7 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -469,7 +469,8 @@ def test_delitem_and_pop(self): def test_setitem(self): lp = self.panel.filter(['ItemA', 'ItemB']).to_frame() - with pytest.raises(ValueError): + + with pytest.raises(TypeError): self.panel['ItemE'] = lp # DataFrame diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 3c902ce7dc0d8..a6ba62bbdea1e 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -1313,6 +1313,13 @@ def assert_series_equal(left, right, check_dtype=True, elif is_interval_dtype(left) or is_interval_dtype(right): assert_interval_array_equal(left.array, right.array) + elif (is_extension_array_dtype(left.dtype) and + is_datetime64tz_dtype(left.dtype)): + # .values is an ndarray, but ._values is the ExtensionArray. + # TODO: Use .array + assert is_extension_array_dtype(right.dtype) + return assert_extension_array_equal(left._values, right._values) + elif (is_extension_array_dtype(left) and not is_categorical_dtype(left) and is_extension_array_dtype(right) and not is_categorical_dtype(right)): return assert_extension_array_equal(left.array, right.array)
TODO: - [x] DatetimeLikeArrayMixin.astype(int) ignores sign and size? - [ ] _from_sequence too permissive? - [x] OK with DatetimeArray.view? - [x] tz vs. dtype in `DatetimeArray.__init__`: https://github.com/pandas-dev/pandas/pull/24024#discussion_r241445450 - [ ] Index._simple_new consistency: https://github.com/pandas-dev/pandas/pull/24024#discussion_r241458796 --- n.b.: right now I have a few other PRs in this branch (#23601, #23990, #24023). The eventual diff is at https://github.com/TomAugspurger/pandas/compare/disown-tz-only-target...TomAugspurger:disown-tz-only-rebased. Also, I would say this isn't *quite* ready for review yet, but I've been promising it for a while. I'm going to try to split off a bit more into separate PRs. --- This implements DatetimeArray and TimedeltaArray, arrays for storing datetime (tz naive or aware) and timedelta data. ### High-level Design: Everything here applies equally to TimedeltaIndex / TimedeltaArray, other than references to DatetimeTZDtype. We don't have an ExtensionType for Timedelta. DatetimeIndex's data is now a DatetimeArray, rather than an ndarray, and its dtype is `dtype::Union['datetime64[ns]', DatetimeTZDtype]`. **Class Hierarchy**: We've split DatetimeIndex to compose a DatetimeArray, rather than inherit it. Old - DatetimeIndex: ![dti_master](https://user-images.githubusercontent.com/1312546/49312507-03319980-f4aa-11e8-8ae9-2b85fb5c23f2.png) New - DatetimeIndex: ![dti_pr](https://user-images.githubusercontent.com/1312546/49312508-03319980-f4aa-11e8-8c31-1025e64bf981.png) New - DatetimeArray: ![dta_pr](https://user-images.githubusercontent.com/1312546/49312506-03319980-f4aa-11e8-99ae-a41e1eaad210.png) **Internals** I've mostly given up here... Things are *basically* as they were before, with the exception that DatetimeTZBlock now extends ExtensionBlock. **In particular** tz-naive data is still stored in a DatetimeBlock, and is still consolidatable. This isn't elegant, but we've lived with it for this long. Future releases can clean it up, possibly when we rewrite / cythonize the block manager, possibly before. ### Types of changes 1. Some changes from `x.view('i8')` to `x.astype('i8', copy=False)` to support getting i8 values from {ndarray, DatetimeIndex, DatetimeArray}. We could revert those if we implemented DatetimeArray.view, but I don't think we should. However, this is slower than `.view` for ndarrays, so I'm going to find another way. 2. Many places where we did `series.values` instead of `series._values`. 3. Moves from PeriodArray / PeriodIndex to a base class. I've annotated these with `NB: Moved from...` 4. Test skips / xfails. I intended to resolve all of these over the next few days. --- TODO: - [x] Split out DatelikeOps and TimelikeOps changes (#24038) - [x] independent tests for DatelikeOps & TimelikeOps methods on arrays. - [x] re-clean up the constructors. I may have some cruft from rebasing - [x] identify shared code in PeriodArray / PeriodIndex that can be moved up - [ ] all the xfailed tests - [ ] identify code that was special casing Datetime-dtyps (like `is_extension_type`). - [ ] API: Series[datetimetz].unique: ndarray[object] or DatetimeArray? Closes #23185 Closes #23932 closes #24465
https://api.github.com/repos/pandas-dev/pandas/pulls/24024
2018-11-30T20:23:49Z
2019-01-02T18:20:35Z
2019-01-02T18:20:35Z
2019-01-04T16:04:24Z
PERF: Use Categorical.equals in CategoricalIndex.equals
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index f888648a9363e..bd474092a0b78 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1213,6 +1213,7 @@ Performance Improvements The speed increase is both when indexing by label (using .loc) and position(.iloc) (:issue:`20395`) Slicing a monotonically increasing :class:`CategoricalIndex` itself (i.e. ``ci[1000:2000]``) shows similar speed improvements as above (:issue:`21659`) +- Improved performance of :meth:`CategoricalIndex.equals` when comparing to another :class:`CategoricalIndex` (:issue:`24023`) - Improved performance of :func:`Series.describe` in case of numeric dtpyes (:issue:`21274`) - Improved performance of :func:`pandas.core.groupby.GroupBy.rank` when dealing with tied rankings (:issue:`21237`) - Improved performance of :func:`DataFrame.set_index` with columns consisting of :class:`Period` objects (:issue:`21582`, :issue:`21606`) diff --git a/pandas/core/indexes/category.py b/pandas/core/indexes/category.py index 6b84e8deea493..91c7648d5cf2e 100644 --- a/pandas/core/indexes/category.py +++ b/pandas/core/indexes/category.py @@ -13,7 +13,7 @@ is_scalar) from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.generic import ABCCategorical, ABCSeries -from pandas.core.dtypes.missing import array_equivalent, isna +from pandas.core.dtypes.missing import isna from pandas.core import accessor from pandas.core.algorithms import take_1d @@ -283,7 +283,9 @@ def equals(self, other): try: other = self._is_dtype_compat(other) - return array_equivalent(self._data, other) + if isinstance(other, type(self)): + other = other._data + return self._data.equals(other) except (TypeError, ValueError): pass
Avoids a materialization of the full array. ```python import pandas as pd import numpy as np import string a = pd.CategoricalIndex(np.random.choice(list(string.ascii_letters[:10]), 100_000)) b = a.copy() c = pd.CategoricalIndex(np.random.choice(list(string.ascii_letters[:10]), 100_000)) print("eq-index") %timeit a.equals(b) print("ne-index") %timeit a.equals(c) ``` timings case | master | pr --------- | ------- | -- eq-index | 3.35 ms | 420 µs ne-index | 1.29 ms | 451 µs
https://api.github.com/repos/pandas-dev/pandas/pulls/24023
2018-11-30T19:11:14Z
2018-11-30T21:32:49Z
2018-11-30T21:32:49Z
2018-11-30T21:32:51Z
BUG: GH24011 - Rich comparisons of Timestamps now return NotImplemented
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst index 3a539199acd6f..61cff302e5f4d 100644 --- a/doc/source/whatsnew/v0.25.0.rst +++ b/doc/source/whatsnew/v0.25.0.rst @@ -44,6 +44,7 @@ Backwards incompatible API changes .. _whatsnew_0250.api_breaking.utc_offset_indexing: + Indexing with date strings with UTC offsets ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -215,6 +216,7 @@ Other API Changes - :class:`DatetimeTZDtype` will now standardize pytz timezones to a common timezone instance (:issue:`24713`) - ``Timestamp`` and ``Timedelta`` scalars now implement the :meth:`to_numpy` method as aliases to :meth:`Timestamp.to_datetime64` and :meth:`Timedelta.to_timedelta64`, respectively. (:issue:`24653`) - :meth:`Timestamp.strptime` will now rise a ``NotImplementedError`` (:issue:`25016`) +- Comparing :class:`Timestamp` with unsupported objects now returns :py:obj:`NotImplemented` instead of raising ``TypeError``. This implies that unsupported rich comparisons are delegated to the other object, and are now consistent with Python 3 behavior for ``datetime`` objects (:issue:`24011`) - Bug in :meth:`DatetimeIndex.snap` which didn't preserving the ``name`` of the input :class:`Index` (:issue:`25575`) .. _whatsnew_0250.deprecations: diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index bd59da3f38612..3b7eb37a0b74a 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -227,26 +227,13 @@ cdef class _Timestamp(datetime): if is_datetime64_object(other): other = Timestamp(other) else: - if op == Py_EQ: - return False - elif op == Py_NE: - return True - - # only allow ==, != ops - raise TypeError('Cannot compare type %r with type %r' % - (type(self).__name__, - type(other).__name__)) + return NotImplemented elif is_array(other): # avoid recursion error GH#15183 return PyObject_RichCompare(np.array([self]), other, op) return PyObject_RichCompare(other, self, reverse_ops[op]) else: - if op == Py_EQ: - return False - elif op == Py_NE: - return True - raise TypeError('Cannot compare type %r with type %r' % - (type(self).__name__, type(other).__name__)) + return NotImplemented self._assert_tzawareness_compat(other) return cmp_scalar(self.value, ots.value, op) diff --git a/pandas/tests/scalar/timestamp/test_comparisons.py b/pandas/tests/scalar/timestamp/test_comparisons.py index b0b723e66c649..2821c0a578752 100644 --- a/pandas/tests/scalar/timestamp/test_comparisons.py +++ b/pandas/tests/scalar/timestamp/test_comparisons.py @@ -156,3 +156,33 @@ def test_timestamp_compare_with_early_datetime(self): assert stamp >= datetime(1600, 1, 1) assert stamp < datetime(2700, 1, 1) assert stamp <= datetime(2700, 1, 1) + + +def test_rich_comparison_with_unsupported_type(): + # Comparisons with unsupported objects should return NotImplemented + # (it previously raised TypeError, see #24011) + + class Inf(object): + def __lt__(self, o): + return False + + def __le__(self, o): + return isinstance(o, Inf) + + def __gt__(self, o): + return not isinstance(o, Inf) + + def __ge__(self, o): + return True + + def __eq__(self, o): + return isinstance(o, Inf) + + inf = Inf() + timestamp = Timestamp('2018-11-30') + + for left, right in [(inf, timestamp), (timestamp, inf)]: + assert left > right or left < right + assert left >= right or left <= right + assert not (left == right) + assert left != right
- [x] closes #24011 - [x] tests added / passed (see https://github.com/AlexandreDecan/pandas/blob/fix-24011/pandas/tests/tslibs/test_tslib.py#L26) - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24021
2018-11-30T17:49:28Z
2019-04-05T00:40:48Z
2019-04-05T00:40:47Z
2019-04-05T00:40:52Z
DEPS: Pinning versions of some dependencies to speed up environment creation
diff --git a/environment.yml b/environment.yml index fc35f1290f1b1..aa3f1fe15820e 100644 --- a/environment.yml +++ b/environment.yml @@ -4,22 +4,21 @@ channels: - conda-forge dependencies: # required - - NumPy + - numpy>=1.15 - python=3 - python-dateutil>=2.5.0 - pytz # development - - Cython>=0.28.2 + - cython>=0.28.2 - flake8 - flake8-comprehensions - flake8-rst=0.4.2 - gitpython - - hypothesis>=3.58.0 + - hypothesis>=3.82 - isort - moto - - pytest>=3.6 - - setuptools>=24.2.0 + - pytest>=4.0 - sphinx - sphinxcontrib-spelling @@ -28,7 +27,6 @@ dependencies: - blosc - bottleneck>=1.2.0 - fastparquet>=0.1.2 - - gcsfs - html5lib - ipython>=5.6.0 - ipykernel @@ -36,15 +34,13 @@ dependencies: - lxml - matplotlib>=2.0.0 - nbsphinx - - numexpr>=2.6.1 + - numexpr>=2.6.8 - openpyxl - pyarrow>=0.7.0 - - pymysql - pytables>=3.4.2 - pytest-cov - pytest-xdist - - s3fs - - scipy>=0.18.1 + - scipy>=1.1 - seaborn - sqlalchemy - statsmodels diff --git a/requirements-dev.txt b/requirements-dev.txt index d01a21ac5fed5..d24baf3c73356 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,23 +1,21 @@ -NumPy +numpy>=1.15 python-dateutil>=2.5.0 pytz -Cython>=0.28.2 +cython>=0.28.2 flake8 flake8-comprehensions flake8-rst==0.4.2 gitpython -hypothesis>=3.58.0 +hypothesis>=3.82 isort moto -pytest>=3.6 -setuptools>=24.2.0 +pytest>=4.0 sphinx sphinxcontrib-spelling beautifulsoup4>=4.2.1 blosc bottleneck>=1.2.0 fastparquet>=0.1.2 -gcsfs html5lib ipython>=5.6.0 ipykernel @@ -25,15 +23,13 @@ jinja2 lxml matplotlib>=2.0.0 nbsphinx -numexpr>=2.6.1 +numexpr>=2.6.8 openpyxl pyarrow>=0.7.0 -pymysql tables>=3.4.2 pytest-cov pytest-xdist -s3fs -scipy>=0.18.1 +scipy>=1.1 seaborn sqlalchemy statsmodels
- [X] closes #24012 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Conda is taking 45 minutes to create an environment with our `environment.yml`. While this is a bug in conda, it won't be fixed until the next version. So, a workaround for now is to pin the verion of some of the packages. I'm also removing couple of packages that I don't think are useful in a local environment, or in the CI build that will use the file (for the linting and to build the docs).
https://api.github.com/repos/pandas-dev/pandas/pulls/24015
2018-11-30T14:40:39Z
2018-12-01T12:09:44Z
2018-12-01T12:09:44Z
2018-12-01T12:09:48Z
TST/CLN: parametrize tests\resample\test_time_grouper.py
diff --git a/pandas/tests/resample/conftest.py b/pandas/tests/resample/conftest.py new file mode 100644 index 0000000000000..2130bd635b180 --- /dev/null +++ b/pandas/tests/resample/conftest.py @@ -0,0 +1,22 @@ +import pytest + +from pandas.tests.resample.test_base import ( + downsample_methods, resample_methods, upsample_methods) + + +@pytest.fixture(params=downsample_methods) +def downsample_method(request): + """Fixture for parametrization of Grouper downsample methods.""" + return request.param + + +@pytest.fixture(params=upsample_methods) +def upsample_method(request): + """Fixture for parametrization of Grouper upsample methods.""" + return request.param + + +@pytest.fixture(params=resample_methods) +def resample_method(request): + """Fixture for parametrization of Grouper resample methods.""" + return request.param diff --git a/pandas/tests/resample/test_base.py b/pandas/tests/resample/test_base.py index 8d710289aecc1..db2162e9357e2 100644 --- a/pandas/tests/resample/test_base.py +++ b/pandas/tests/resample/test_base.py @@ -26,7 +26,7 @@ # The various methods we support downsample_methods = ['min', 'max', 'first', 'last', 'sum', 'mean', 'sem', - 'median', 'prod', 'var', 'ohlc', 'quantile'] + 'median', 'prod', 'var', 'std', 'ohlc', 'quantile'] upsample_methods = ['count', 'size'] series_methods = ['nunique'] resample_methods = downsample_methods + upsample_methods + series_methods diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py index 927060609822e..ec29b55ac9d67 100644 --- a/pandas/tests/resample/test_time_grouper.py +++ b/pandas/tests/resample/test_time_grouper.py @@ -1,13 +1,9 @@ -# pylint: disable=E1101 - from datetime import datetime from operator import methodcaller import numpy as np import pytest -from pandas.compat import zip - import pandas as pd from pandas import DataFrame, Panel, Series from pandas.core.indexes.datetimes import date_range @@ -104,20 +100,21 @@ def f(x): tm.assert_panel_equal(result, binagg) -def test_fails_on_no_datetime_index(): - index_names = ('Int64Index', 'Index', 'Float64Index', 'MultiIndex') - index_funcs = (tm.makeIntIndex, - tm.makeUnicodeIndex, tm.makeFloatIndex, - lambda m: tm.makeCustomIndex(m, 2)) +@pytest.mark.parametrize('name, func', [ + ('Int64Index', tm.makeIntIndex), + ('Index', tm.makeUnicodeIndex), + ('Float64Index', tm.makeFloatIndex), + ('MultiIndex', lambda m: tm.makeCustomIndex(m, 2)) +]) +def test_fails_on_no_datetime_index(name, func): n = 2 - for name, func in zip(index_names, index_funcs): - index = func(n) - df = DataFrame({'a': np.random.randn(n)}, index=index) + index = func(n) + df = DataFrame({'a': np.random.randn(n)}, index=index) - msg = ("Only valid with DatetimeIndex, TimedeltaIndex " - "or PeriodIndex, but got an instance of %r" % name) - with pytest.raises(TypeError, match=msg): - df.groupby(TimeGrouper('D')) + msg = ("Only valid with DatetimeIndex, TimedeltaIndex " + "or PeriodIndex, but got an instance of %r" % name) + with pytest.raises(TypeError, match=msg): + df.groupby(TimeGrouper('D')) def test_aaa_group_order(): @@ -143,11 +140,13 @@ def test_aaa_group_order(): df[4::5]) -def test_aggregate_normal(): - # check TimeGrouper's aggregation is identical as normal groupby +def test_aggregate_normal(resample_method): + """Check TimeGrouper's aggregation is identical as normal groupby.""" - n = 20 - data = np.random.randn(n, 4) + if resample_method == 'ohlc': + pytest.xfail(reason='DataError: No numeric types to aggregate') + + data = np.random.randn(20, 4) normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D']) normal_df['key'] = [1, 2, 3, 4, 5] * 4 @@ -159,35 +158,11 @@ def test_aggregate_normal(): normal_grouped = normal_df.groupby('key') dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D')) - for func in ['min', 'max', 'prod', 'var', 'std', 'mean']: - expected = getattr(normal_grouped, func)() - dt_result = getattr(dt_grouped, func)() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - assert_frame_equal(expected, dt_result) - - for func in ['count', 'sum']: - expected = getattr(normal_grouped, func)() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_frame_equal(expected, dt_result) - - # GH 7453 - for func in ['size']: - expected = getattr(normal_grouped, func)() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_series_equal(expected, dt_result) - - # GH 7453 - for func in ['first', 'last']: - expected = getattr(normal_grouped, func)() - expected.index = date_range(start='2013-01-01', freq='D', - periods=5, name='key') - dt_result = getattr(dt_grouped, func)() - assert_frame_equal(expected, dt_result) + expected = getattr(normal_grouped, resample_method)() + dt_result = getattr(dt_grouped, resample_method)() + expected.index = date_range(start='2013-01-01', freq='D', + periods=5, name='key') + tm.assert_equal(expected, dt_result) # if TimeGrouper is used included, 'nth' doesn't work yet @@ -201,34 +176,23 @@ def test_aggregate_normal(): """ -@pytest.mark.parametrize('method, unit', [ - ('sum', 0), - ('prod', 1), +@pytest.mark.parametrize('method, method_args, unit', [ + ('sum', dict(), 0), + ('sum', dict(min_count=0), 0), + ('sum', dict(min_count=1), np.nan), + ('prod', dict(), 1), + ('prod', dict(min_count=0), 1), + ('prod', dict(min_count=1), np.nan) ]) -def test_resample_entirly_nat_window(method, unit): +def test_resample_entirly_nat_window(method, method_args, unit): s = pd.Series([0] * 2 + [np.nan] * 2, index=pd.date_range('2017', periods=4)) - # 0 / 1 by default - result = methodcaller(method)(s.resample("2d")) - expected = pd.Series([0.0, unit], - index=pd.to_datetime(['2017-01-01', - '2017-01-03'])) - tm.assert_series_equal(result, expected) - - # min_count=0 - result = methodcaller(method, min_count=0)(s.resample("2d")) + result = methodcaller(method, **method_args)(s.resample("2d")) expected = pd.Series([0.0, unit], index=pd.to_datetime(['2017-01-01', '2017-01-03'])) tm.assert_series_equal(result, expected) - # min_count=1 - result = methodcaller(method, min_count=1)(s.resample("2d")) - expected = pd.Series([0.0, np.nan], - index=pd.to_datetime(['2017-01-01', - '2017-01-03'])) - tm.assert_series_equal(result, expected) - @pytest.mark.parametrize('func, fill_value', [ ('min', np.nan), @@ -302,33 +266,22 @@ def test_repr(): assert result == expected -@pytest.mark.parametrize('method, unit', [ - ('sum', 0), - ('prod', 1), +@pytest.mark.parametrize('method, method_args, expected_values', [ + ('sum', dict(), [1, 0, 1]), + ('sum', dict(min_count=0), [1, 0, 1]), + ('sum', dict(min_count=1), [1, np.nan, 1]), + ('sum', dict(min_count=2), [np.nan, np.nan, np.nan]), + ('prod', dict(), [1, 1, 1]), + ('prod', dict(min_count=0), [1, 1, 1]), + ('prod', dict(min_count=1), [1, np.nan, 1]), + ('prod', dict(min_count=2), [np.nan, np.nan, np.nan]), ]) -def test_upsample_sum(method, unit): +def test_upsample_sum(method, method_args, expected_values): s = pd.Series(1, index=pd.date_range("2017", periods=2, freq="H")) resampled = s.resample("30T") index = pd.to_datetime(['2017-01-01T00:00:00', '2017-01-01T00:30:00', '2017-01-01T01:00:00']) - - # 0 / 1 by default - result = methodcaller(method)(resampled) - expected = pd.Series([1, unit, 1], index=index) - tm.assert_series_equal(result, expected) - - # min_count=0 - result = methodcaller(method, min_count=0)(resampled) - expected = pd.Series([1, unit, 1], index=index) - tm.assert_series_equal(result, expected) - - # min_count=1 - result = methodcaller(method, min_count=1)(resampled) - expected = pd.Series([1, np.nan, 1], index=index) - tm.assert_series_equal(result, expected) - - # min_count>1 - result = methodcaller(method, min_count=2)(resampled) - expected = pd.Series([np.nan, np.nan, np.nan], index=index) + result = methodcaller(method, **method_args)(resampled) + expected = pd.Series(expected_values, index=index) tm.assert_series_equal(result, expected)
- [n/a] xref #17806 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [n/a] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24013
2018-11-30T12:38:06Z
2018-12-03T12:30:53Z
2018-12-03T12:30:53Z
2018-12-03T13:14:18Z
REF: Convert test_hashing to pytest idiom
diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py index 9f5b4f7b90d9f..84bc1863aadd9 100644 --- a/pandas/tests/util/test_hashing.py +++ b/pandas/tests/util/test_hashing.py @@ -10,272 +10,319 @@ import pandas.util.testing as tm -class TestHashing(object): - - @pytest.fixture(params=[ - Series([1, 2, 3] * 3, dtype='int32'), - Series([None, 2.5, 3.5] * 3, dtype='float32'), - Series(['a', 'b', 'c'] * 3, dtype='category'), - Series(['d', 'e', 'f'] * 3), - Series([True, False, True] * 3), - Series(pd.date_range('20130101', periods=9)), - Series(pd.date_range('20130101', periods=9, tz='US/Eastern')), - Series(pd.timedelta_range('2000', periods=9))]) - def series(self, request): - return request.param - - def test_consistency(self): - # check that our hash doesn't change because of a mistake - # in the actual code; this is the ground truth - result = hash_pandas_object(Index(['foo', 'bar', 'baz'])) - expected = Series(np.array([3600424527151052760, 1374399572096150070, - 477881037637427054], dtype='uint64'), - index=['foo', 'bar', 'baz']) - tm.assert_series_equal(result, expected) - - def test_hash_array(self, series): - a = series.values - tm.assert_numpy_array_equal(hash_array(a), hash_array(a)) - - def test_hash_array_mixed(self): - result1 = hash_array(np.array([3, 4, 'All'])) - result2 = hash_array(np.array(['3', '4', 'All'])) - result3 = hash_array(np.array([3, 4, 'All'], dtype=object)) - tm.assert_numpy_array_equal(result1, result2) - tm.assert_numpy_array_equal(result1, result3) - - @pytest.mark.parametrize('val', [5, 'foo', pd.Timestamp('20130101')]) - def test_hash_array_errors(self, val): - msg = 'must pass a ndarray-like' - with pytest.raises(TypeError, match=msg): - hash_array(val) - - def check_equal(self, obj, **kwargs): - a = hash_pandas_object(obj, **kwargs) - b = hash_pandas_object(obj, **kwargs) - tm.assert_series_equal(a, b) - - kwargs.pop('index', None) - a = hash_pandas_object(obj, **kwargs) - b = hash_pandas_object(obj, **kwargs) - tm.assert_series_equal(a, b) - - def check_not_equal_with_index(self, obj): - - # check that we are not hashing the same if - # we include the index - if not isinstance(obj, Index): - a = hash_pandas_object(obj, index=True) - b = hash_pandas_object(obj, index=False) - if len(obj): - assert not (a == b).all() - - def test_hash_tuples(self): - tups = [(1, 'one'), (1, 'two'), (2, 'one')] - result = hash_tuples(tups) - expected = hash_pandas_object(MultiIndex.from_tuples(tups)).values - tm.assert_numpy_array_equal(result, expected) - - result = hash_tuples(tups[0]) - assert result == expected[0] - - @pytest.mark.parametrize('tup', [ - (1, 'one'), (1, np.nan), (1.0, pd.NaT, 'A'), - ('A', pd.Timestamp("2012-01-01"))]) - def test_hash_tuple(self, tup): - # test equivalence between hash_tuples and hash_tuple - result = hash_tuple(tup) - expected = hash_tuples([tup])[0] - assert result == expected - - @pytest.mark.parametrize('val', [ - 1, 1.4, 'A', b'A', u'A', pd.Timestamp("2012-01-01"), - pd.Timestamp("2012-01-01", tz='Europe/Brussels'), - datetime.datetime(2012, 1, 1), - pd.Timestamp("2012-01-01", tz='EST').to_pydatetime(), - pd.Timedelta('1 days'), datetime.timedelta(1), - pd.Period('2012-01-01', freq='D'), pd.Interval(0, 1), - np.nan, pd.NaT, None]) - def test_hash_scalar(self, val): - result = _hash_scalar(val) - expected = hash_array(np.array([val], dtype=object), categorize=True) - assert result[0] == expected[0] - - @pytest.mark.parametrize('val', [5, 'foo', pd.Timestamp('20130101')]) - def test_hash_tuples_err(self, val): - msg = 'must be convertible to a list-of-tuples' - with pytest.raises(TypeError, match=msg): - hash_tuples(val) - - def test_multiindex_unique(self): - mi = MultiIndex.from_tuples([(118, 472), (236, 118), - (51, 204), (102, 51)]) - assert mi.is_unique is True - result = hash_pandas_object(mi) - assert result.is_unique is True - - def test_multiindex_objects(self): - mi = MultiIndex(levels=[['b', 'd', 'a'], [1, 2, 3]], - labels=[[0, 1, 0, 2], [2, 0, 0, 1]], - names=['col1', 'col2']) - recons = mi._sort_levels_monotonic() - - # these are equal - assert mi.equals(recons) - assert Index(mi.values).equals(Index(recons.values)) - - # _hashed_values and hash_pandas_object(..., index=False) - # equivalency - expected = hash_pandas_object( - mi, index=False).values - result = mi._hashed_values - tm.assert_numpy_array_equal(result, expected) - - expected = hash_pandas_object( - recons, index=False).values - result = recons._hashed_values - tm.assert_numpy_array_equal(result, expected) - - expected = mi._hashed_values - result = recons._hashed_values - - # values should match, but in different order - tm.assert_numpy_array_equal(np.sort(result), - np.sort(expected)) - - @pytest.mark.parametrize('obj', [ - Series([1, 2, 3]), - Series([1.0, 1.5, 3.2]), - Series([1.0, 1.5, np.nan]), - Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]), - Series(['a', 'b', 'c']), - Series(['a', np.nan, 'c']), - Series(['a', None, 'c']), - Series([True, False, True]), - Series(), - Index([1, 2, 3]), - Index([True, False, True]), - DataFrame({'x': ['a', 'b', 'c'], 'y': [1, 2, 3]}), - DataFrame(), - tm.makeMissingDataframe(), - tm.makeMixedDataFrame(), - tm.makeTimeDataFrame(), - tm.makeTimeSeries(), - tm.makeTimedeltaIndex(), - tm.makePeriodIndex(), - Series(tm.makePeriodIndex()), - Series(pd.date_range('20130101', periods=3, tz='US/Eastern')), - MultiIndex.from_product([range(5), ['foo', 'bar', 'baz'], - pd.date_range('20130101', periods=2)]), - MultiIndex.from_product([pd.CategoricalIndex(list('aabc')), range(3)]) - ]) - def test_hash_pandas_object(self, obj): - self.check_equal(obj) - self.check_not_equal_with_index(obj) - - def test_hash_pandas_object2(self, series): - self.check_equal(series) - self.check_not_equal_with_index(series) - - @pytest.mark.parametrize('obj', [ - Series([], dtype='float64'), Series([], dtype='object'), Index([])]) - def test_hash_pandas_empty_object(self, obj): - # these are by-definition the same with - # or w/o the index as the data is empty - self.check_equal(obj) - - @pytest.mark.parametrize('s1', [ - Series(['a', 'b', 'c', 'd']), - Series([1000, 2000, 3000, 4000]), - Series(pd.date_range(0, periods=4))]) - @pytest.mark.parametrize('categorize', [True, False]) - def test_categorical_consistency(self, s1, categorize): - # GH15143 - # Check that categoricals hash consistent with their values, not codes - # This should work for categoricals of any dtype - s2 = s1.astype('category').cat.set_categories(s1) - s3 = s2.cat.set_categories(list(reversed(s1))) - - # These should all hash identically - h1 = hash_pandas_object(s1, categorize=categorize) - h2 = hash_pandas_object(s2, categorize=categorize) - h3 = hash_pandas_object(s3, categorize=categorize) - tm.assert_series_equal(h1, h2) - tm.assert_series_equal(h1, h3) - - def test_categorical_with_nan_consistency(self): - c = pd.Categorical.from_codes( - [-1, 0, 1, 2, 3, 4], - categories=pd.date_range('2012-01-01', periods=5, name='B')) - expected = hash_array(c, categorize=False) - c = pd.Categorical.from_codes( - [-1, 0], - categories=[pd.Timestamp('2012-01-01')]) - result = hash_array(c, categorize=False) - assert result[0] in expected - assert result[1] in expected - - @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") - def test_pandas_errors(self): - with pytest.raises(TypeError): - hash_pandas_object(pd.Timestamp('20130101')) - - obj = tm.makePanel() - - with pytest.raises(TypeError): - hash_pandas_object(obj) - - def test_hash_keys(self): - # using different hash keys, should have different hashes - # for the same data - - # this only matters for object dtypes - obj = Series(list('abc')) - a = hash_pandas_object(obj, hash_key='9876543210123456') - b = hash_pandas_object(obj, hash_key='9876543210123465') - assert (a != b).all() - - def test_invalid_key(self): - # this only matters for object dtypes - msg = 'key should be a 16-byte string encoded' - with pytest.raises(ValueError, match=msg): - hash_pandas_object(Series(list('abc')), hash_key='foo') - - def test_alread_encoded(self): - # if already encoded then ok - - obj = Series(list('abc')).str.encode('utf8') - self.check_equal(obj) - - def test_alternate_encoding(self): - - obj = Series(list('abc')) - self.check_equal(obj, encoding='ascii') - - @pytest.mark.parametrize('l_exp', range(8)) - @pytest.mark.parametrize('l_add', [0, 1]) - def test_same_len_hash_collisions(self, l_exp, l_add): - length = 2**(l_exp + 8) + l_add - s = tm.rands_array(length, 2) - result = hash_array(s, 'utf8') - assert not result[0] == result[1] - - def test_hash_collisions(self): - - # hash collisions are bad - # https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726 - L = ['Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9', # noqa - 'Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe'] # noqa - - # these should be different! - result1 = hash_array(np.asarray(L[0:1], dtype=object), 'utf8') - expected1 = np.array([14963968704024874985], dtype=np.uint64) - tm.assert_numpy_array_equal(result1, expected1) - - result2 = hash_array(np.asarray(L[1:2], dtype=object), 'utf8') - expected2 = np.array([16428432627716348016], dtype=np.uint64) - tm.assert_numpy_array_equal(result2, expected2) - - result = hash_array(np.asarray(L, dtype=object), 'utf8') - tm.assert_numpy_array_equal( - result, np.concatenate([expected1, expected2], axis=0)) +@pytest.fixture(params=[ + Series([1, 2, 3] * 3, dtype="int32"), + Series([None, 2.5, 3.5] * 3, dtype="float32"), + Series(["a", "b", "c"] * 3, dtype="category"), + Series(["d", "e", "f"] * 3), + Series([True, False, True] * 3), + Series(pd.date_range("20130101", periods=9)), + Series(pd.date_range("20130101", periods=9, tz="US/Eastern")), + Series(pd.timedelta_range("2000", periods=9))]) +def series(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def index(request): + return request.param + + +def _check_equal(obj, **kwargs): + """ + Check that hashing an objects produces the same value each time. + + Parameters + ---------- + obj : object + The object to hash. + kwargs : kwargs + Keyword arguments to pass to the hashing function. + """ + a = hash_pandas_object(obj, **kwargs) + b = hash_pandas_object(obj, **kwargs) + tm.assert_series_equal(a, b) + + +def _check_not_equal_with_index(obj): + """ + Check the hash of an object with and without its index is not the same. + + Parameters + ---------- + obj : object + The object to hash. + """ + if not isinstance(obj, Index): + a = hash_pandas_object(obj, index=True) + b = hash_pandas_object(obj, index=False) + + if len(obj): + assert not (a == b).all() + + +def test_consistency(): + # Check that our hash doesn't change because of a mistake + # in the actual code; this is the ground truth. + result = hash_pandas_object(Index(["foo", "bar", "baz"])) + expected = Series(np.array([3600424527151052760, 1374399572096150070, + 477881037637427054], dtype="uint64"), + index=["foo", "bar", "baz"]) + tm.assert_series_equal(result, expected) + + +def test_hash_array(series): + arr = series.values + tm.assert_numpy_array_equal(hash_array(arr), hash_array(arr)) + + +@pytest.mark.parametrize("arr2", [ + np.array([3, 4, "All"]), + np.array([3, 4, "All"], dtype=object), +]) +def test_hash_array_mixed(arr2): + result1 = hash_array(np.array(["3", "4", "All"])) + result2 = hash_array(arr2) + + tm.assert_numpy_array_equal(result1, result2) + + +@pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")]) +def test_hash_array_errors(val): + msg = "must pass a ndarray-like" + with pytest.raises(TypeError, match=msg): + hash_array(val) + + +def test_hash_tuples(): + tuples = [(1, "one"), (1, "two"), (2, "one")] + result = hash_tuples(tuples) + + expected = hash_pandas_object(MultiIndex.from_tuples(tuples)).values + tm.assert_numpy_array_equal(result, expected) + + result = hash_tuples(tuples[0]) + assert result == expected[0] + + +@pytest.mark.parametrize("tup", [ + (1, "one"), (1, np.nan), (1.0, pd.NaT, "A"), + ("A", pd.Timestamp("2012-01-01"))]) +def test_hash_tuple(tup): + # Test equivalence between + # hash_tuples and hash_tuple. + result = hash_tuple(tup) + expected = hash_tuples([tup])[0] + + assert result == expected + + +@pytest.mark.parametrize("val", [ + 1, 1.4, "A", b"A", u"A", pd.Timestamp("2012-01-01"), + pd.Timestamp("2012-01-01", tz="Europe/Brussels"), + datetime.datetime(2012, 1, 1), + pd.Timestamp("2012-01-01", tz="EST").to_pydatetime(), + pd.Timedelta("1 days"), datetime.timedelta(1), + pd.Period("2012-01-01", freq="D"), pd.Interval(0, 1), + np.nan, pd.NaT, None]) +def test_hash_scalar(val): + result = _hash_scalar(val) + expected = hash_array(np.array([val], dtype=object), categorize=True) + + assert result[0] == expected[0] + + +@pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")]) +def test_hash_tuples_err(val): + msg = "must be convertible to a list-of-tuples" + with pytest.raises(TypeError, match=msg): + hash_tuples(val) + + +def test_multiindex_unique(): + mi = MultiIndex.from_tuples([(118, 472), (236, 118), + (51, 204), (102, 51)]) + assert mi.is_unique is True + + result = hash_pandas_object(mi) + assert result.is_unique is True + + +def test_multiindex_objects(): + mi = MultiIndex(levels=[["b", "d", "a"], [1, 2, 3]], + labels=[[0, 1, 0, 2], [2, 0, 0, 1]], + names=["col1", "col2"]) + recons = mi._sort_levels_monotonic() + + # These are equal. + assert mi.equals(recons) + assert Index(mi.values).equals(Index(recons.values)) + + # _hashed_values and hash_pandas_object(..., index=False) equivalency. + expected = hash_pandas_object(mi, index=False).values + result = mi._hashed_values + + tm.assert_numpy_array_equal(result, expected) + + expected = hash_pandas_object(recons, index=False).values + result = recons._hashed_values + + tm.assert_numpy_array_equal(result, expected) + + expected = mi._hashed_values + result = recons._hashed_values + + # Values should match, but in different order. + tm.assert_numpy_array_equal(np.sort(result), np.sort(expected)) + + +@pytest.mark.parametrize("obj", [ + Series([1, 2, 3]), + Series([1.0, 1.5, 3.2]), + Series([1.0, 1.5, np.nan]), + Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]), + Series(["a", "b", "c"]), + Series(["a", np.nan, "c"]), + Series(["a", None, "c"]), + Series([True, False, True]), + Series(), + Index([1, 2, 3]), + Index([True, False, True]), + DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}), + DataFrame(), + tm.makeMissingDataframe(), + tm.makeMixedDataFrame(), + tm.makeTimeDataFrame(), + tm.makeTimeSeries(), + tm.makeTimedeltaIndex(), + tm.makePeriodIndex(), + Series(tm.makePeriodIndex()), + Series(pd.date_range("20130101", periods=3, tz="US/Eastern")), + MultiIndex.from_product([range(5), ["foo", "bar", "baz"], + pd.date_range("20130101", periods=2)]), + MultiIndex.from_product([pd.CategoricalIndex(list("aabc")), range(3)]) +]) +def test_hash_pandas_object(obj, index): + _check_equal(obj, index=index) + _check_not_equal_with_index(obj) + + +def test_hash_pandas_object2(series, index): + _check_equal(series, index=index) + _check_not_equal_with_index(series) + + +@pytest.mark.parametrize("obj", [ + Series([], dtype="float64"), Series([], dtype="object"), Index([])]) +def test_hash_pandas_empty_object(obj, index): + # These are by-definition the same with + # or without the index as the data is empty. + _check_equal(obj, index=index) + + +@pytest.mark.parametrize("s1", [ + Series(["a", "b", "c", "d"]), + Series([1000, 2000, 3000, 4000]), + Series(pd.date_range(0, periods=4))]) +@pytest.mark.parametrize("categorize", [True, False]) +def test_categorical_consistency(s1, categorize): + # see gh-15143 + # + # Check that categoricals hash consistent with their values, + # not codes. This should work for categoricals of any dtype. + s2 = s1.astype("category").cat.set_categories(s1) + s3 = s2.cat.set_categories(list(reversed(s1))) + + # These should all hash identically. + h1 = hash_pandas_object(s1, categorize=categorize) + h2 = hash_pandas_object(s2, categorize=categorize) + h3 = hash_pandas_object(s3, categorize=categorize) + + tm.assert_series_equal(h1, h2) + tm.assert_series_equal(h1, h3) + + +def test_categorical_with_nan_consistency(): + c = pd.Categorical.from_codes( + [-1, 0, 1, 2, 3, 4], + categories=pd.date_range("2012-01-01", periods=5, name="B")) + expected = hash_array(c, categorize=False) + + c = pd.Categorical.from_codes( + [-1, 0], + categories=[pd.Timestamp("2012-01-01")]) + result = hash_array(c, categorize=False) + + assert result[0] in expected + assert result[1] in expected + + +@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning") +@pytest.mark.parametrize("obj", [pd.Timestamp("20130101"), tm.makePanel()]) +def test_pandas_errors(obj): + msg = "Unexpected type for hashing" + with pytest.raises(TypeError, match=msg): + hash_pandas_object(obj) + + +def test_hash_keys(): + # Using different hash keys, should have + # different hashes for the same data. + # + # This only matters for object dtypes. + obj = Series(list("abc")) + + a = hash_pandas_object(obj, hash_key="9876543210123456") + b = hash_pandas_object(obj, hash_key="9876543210123465") + + assert (a != b).all() + + +def test_invalid_key(): + # This only matters for object dtypes. + msg = "key should be a 16-byte string encoded" + + with pytest.raises(ValueError, match=msg): + hash_pandas_object(Series(list("abc")), hash_key="foo") + + +def test_already_encoded(index): + # If already encoded, then ok. + obj = Series(list("abc")).str.encode("utf8") + _check_equal(obj, index=index) + + +def test_alternate_encoding(index): + obj = Series(list("abc")) + _check_equal(obj, index=index, encoding="ascii") + + +@pytest.mark.parametrize("l_exp", range(8)) +@pytest.mark.parametrize("l_add", [0, 1]) +def test_same_len_hash_collisions(l_exp, l_add): + length = 2**(l_exp + 8) + l_add + s = tm.rands_array(length, 2) + + result = hash_array(s, "utf8") + assert not result[0] == result[1] + + +def test_hash_collisions(): + # Hash collisions are bad. + # + # https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726 + hashes = ["Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9", # noqa + "Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe"] # noqa + + # These should be different. + result1 = hash_array(np.asarray(hashes[0:1], dtype=object), "utf8") + expected1 = np.array([14963968704024874985], dtype=np.uint64) + tm.assert_numpy_array_equal(result1, expected1) + + result2 = hash_array(np.asarray(hashes[1:2], dtype=object), "utf8") + expected2 = np.array([16428432627716348016], dtype=np.uint64) + tm.assert_numpy_array_equal(result2, expected2) + + result = hash_array(np.asarray(hashes, dtype=object), "utf8") + tm.assert_numpy_array_equal(result, np.concatenate([expected1, + expected2], axis=0))
https://api.github.com/repos/pandas-dev/pandas/pulls/24009
2018-11-30T06:09:51Z
2018-11-30T20:46:03Z
2018-11-30T20:46:03Z
2018-11-30T20:46:32Z
PERF: optimize NaT lookups in cython modules
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index 1205ebbe311e2..5528e183af6e6 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -40,8 +40,8 @@ from tslibs.conversion cimport (tz_convert_single, _TSObject, tz_convert_utc_to_tzlocal) # many modules still look for NaT and iNaT here despite them not being needed -from tslibs.nattype import nat_strings, NaT, iNaT # noqa:F821 -from tslibs.nattype cimport checknull_with_nat, NPY_NAT +from tslibs.nattype import nat_strings, iNaT # noqa:F821 +from tslibs.nattype cimport checknull_with_nat, NPY_NAT, c_NaT as NaT from tslibs.offsets cimport to_offset diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 4a34065fe471f..e6e7884f05b20 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -39,8 +39,8 @@ from timezones cimport (is_utc, is_tzlocal, is_fixed_offset, from timezones import UTC from parsing import parse_datetime_string -from nattype import nat_strings, NaT -from nattype cimport NPY_NAT, checknull_with_nat +from nattype import nat_strings +from nattype cimport NPY_NAT, checknull_with_nat, c_NaT as NaT # ---------------------------------------------------------------------- # Constants diff --git a/pandas/_libs/tslibs/nattype.pxd b/pandas/_libs/tslibs/nattype.pxd index 382ac9d323918..f649518e969be 100644 --- a/pandas/_libs/tslibs/nattype.pxd +++ b/pandas/_libs/tslibs/nattype.pxd @@ -1,9 +1,20 @@ # -*- coding: utf-8 -*- +from cpython.datetime cimport datetime + from numpy cimport int64_t cdef int64_t NPY_NAT cdef bint _nat_scalar_rules[6] + +cdef class _NaT(datetime): + cdef readonly: + int64_t value + object freq + +cdef _NaT c_NaT + + cdef bint checknull_with_nat(object val) cdef bint is_null_datetimelike(object val) diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx index 7b7f5f2e34c5f..42ec235992089 100644 --- a/pandas/_libs/tslibs/nattype.pyx +++ b/pandas/_libs/tslibs/nattype.pyx @@ -47,7 +47,7 @@ def _make_nan_func(func_name, doc): def _make_nat_func(func_name, doc): def f(*args, **kwargs): - return NaT + return c_NaT f.__name__ = func_name f.__doc__ = doc return f @@ -67,10 +67,10 @@ def _make_error_func(func_name, cls): cdef _nat_divide_op(self, other): - if PyDelta_Check(other) or is_timedelta64_object(other) or other is NaT: + if PyDelta_Check(other) or is_timedelta64_object(other) or other is c_NaT: return np.nan if is_integer_object(other) or is_float_object(other): - return NaT + return c_NaT return NotImplemented @@ -82,15 +82,15 @@ cdef _nat_rdivide_op(self, other): def __nat_unpickle(*args): # return constant defined in the module - return NaT + return c_NaT # ---------------------------------------------------------------------- cdef class _NaT(datetime): - cdef readonly: - int64_t value - object freq + # cdef readonly: + # int64_t value + # object freq def __hash__(_NaT self): # py3k needs this defined here @@ -116,18 +116,18 @@ cdef class _NaT(datetime): def __add__(self, other): if PyDateTime_Check(other): - return NaT + return c_NaT elif hasattr(other, 'delta'): # Timedelta, offsets.Tick, offsets.Week - return NaT + return c_NaT elif getattr(other, '_typ', None) in ['dateoffset', 'series', 'period', 'datetimeindex', 'timedeltaindex']: # Duplicate logic in _Timestamp.__add__ to avoid needing # to subclass; allows us to @final(_Timestamp.__add__) return NotImplemented - return NaT + return c_NaT def __sub__(self, other): # Duplicate some logic from _Timestamp.__sub__ to avoid needing @@ -184,19 +184,6 @@ cdef class _NaT(datetime): """ Returns a numpy.datetime64 object with 'ns' precision """ return np.datetime64('NaT', 'ns') - -class NaTType(_NaT): - """(N)ot-(A)-(T)ime, the time equivalent of NaN""" - - def __new__(cls): - cdef _NaT base - - base = _NaT.__new__(cls, 1, 1, 1) - base.value = NPY_NAT - base.freq = None - - return base - def __repr__(self): return 'NaT' @@ -216,20 +203,11 @@ class NaTType(_NaT): def __long__(self): return NPY_NAT - def __reduce_ex__(self, protocol): - # python 3.6 compat - # http://bugs.python.org/issue28730 - # now __reduce_ex__ is defined and higher priority than __reduce__ - return self.__reduce__() - - def __reduce__(self): - return (__nat_unpickle, (None, )) - def total_seconds(self): """ Total duration of timedelta in seconds (to ns precision) """ - # GH 10939 + # GH#10939 return np.nan @property @@ -260,6 +238,28 @@ class NaTType(_NaT): def is_year_end(self): return False + +class NaTType(_NaT): + """(N)ot-(A)-(T)ime, the time equivalent of NaN""" + + def __new__(cls): + cdef _NaT base + + base = _NaT.__new__(cls, 1, 1, 1) + base.value = NPY_NAT + base.freq = None + + return base + + def __reduce_ex__(self, protocol): + # python 3.6 compat + # http://bugs.python.org/issue28730 + # now __reduce_ex__ is defined and higher priority than __reduce__ + return self.__reduce__() + + def __reduce__(self): + return (__nat_unpickle, (None, )) + def __rdiv__(self, other): return _nat_rdivide_op(self, other) @@ -271,7 +271,7 @@ class NaTType(_NaT): def __rmul__(self, other): if is_integer_object(other) or is_float_object(other): - return NaT + return c_NaT return NotImplemented # ---------------------------------------------------------------------- @@ -659,14 +659,15 @@ class NaTType(_NaT): """) -NaT = NaTType() +c_NaT = NaTType() # C-visible +NaT = c_NaT # Python-visible # ---------------------------------------------------------------------- cdef inline bint checknull_with_nat(object val): """ utility to check if a value is a nat or not """ - return val is None or util.is_nan(val) or val is NaT + return val is None or util.is_nan(val) or val is c_NaT cdef inline bint is_null_datetimelike(object val): @@ -683,7 +684,7 @@ cdef inline bint is_null_datetimelike(object val): """ if val is None or util.is_nan(val): return True - elif val is NaT: + elif val is c_NaT: return True elif util.is_timedelta64_object(val): return val.view('int64') == NPY_NAT diff --git a/pandas/_libs/tslibs/period.pyx b/pandas/_libs/tslibs/period.pyx index e02e493c32a00..dfbf24cf177f6 100644 --- a/pandas/_libs/tslibs/period.pyx +++ b/pandas/_libs/tslibs/period.pyx @@ -46,8 +46,9 @@ from frequencies cimport (get_freq_code, get_base_alias, get_rule_month) from parsing import parse_time_string from resolution import Resolution -from nattype import nat_strings, NaT -from nattype cimport _nat_scalar_rules, NPY_NAT, is_null_datetimelike +from nattype import nat_strings +from nattype cimport ( + _nat_scalar_rules, NPY_NAT, is_null_datetimelike, c_NaT as NaT) from offsets cimport to_offset from offsets import _Tick diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 4d612a6f43107..b0bead2f66ce4 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -33,8 +33,8 @@ from ccalendar import DAY_SECONDS from np_datetime cimport (cmp_scalar, reverse_ops, td64_to_tdstruct, pandas_timedeltastruct) -from nattype import nat_strings, NaT -from nattype cimport checknull_with_nat, NPY_NAT +from nattype import nat_strings +from nattype cimport checknull_with_nat, NPY_NAT, c_NaT as NaT from offsets cimport to_offset # ---------------------------------------------------------------------- diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx index f414cd161e562..b4862a5f3b02f 100644 --- a/pandas/_libs/tslibs/timestamps.pyx +++ b/pandas/_libs/tslibs/timestamps.pyx @@ -26,8 +26,7 @@ from conversion import tz_localize_to_utc, normalize_i8_timestamps from conversion cimport (tz_convert_single, _TSObject, convert_to_tsobject, convert_datetime_to_tsobject) from fields import get_start_end_field, get_date_name_field -from nattype import NaT -from nattype cimport NPY_NAT +from nattype cimport NPY_NAT, c_NaT as NaT from np_datetime import OutOfBoundsDatetime from np_datetime cimport (reverse_ops, cmp_scalar, check_dts_bounds, npy_datetimestruct, dt64_to_dtstruct) diff --git a/pandas/_libs/tslibs/timezones.pyx b/pandas/_libs/tslibs/timezones.pyx index 5fa8a45af3083..9f8922b274abd 100644 --- a/pandas/_libs/tslibs/timezones.pyx +++ b/pandas/_libs/tslibs/timezones.pyx @@ -2,6 +2,8 @@ from cython import Py_ssize_t +from cpython.datetime cimport tzinfo + # dateutil compat from dateutil.tz import ( tzutc as _dateutil_tzutc, diff --git a/pandas/core/arrays/datetimes.py b/pandas/core/arrays/datetimes.py index 050442c530314..2341187c40a9e 100644 --- a/pandas/core/arrays/datetimes.py +++ b/pandas/core/arrays/datetimes.py @@ -6,10 +6,9 @@ from pytz import utc from pandas._libs import lib, tslib -from pandas._libs.tslib import NaT, Timestamp, iNaT from pandas._libs.tslibs import ( - ccalendar, conversion, fields, normalize_date, resolution as libresolution, - timezones) + NaT, Timestamp, ccalendar, conversion, fields, iNaT, normalize_date, + resolution as libresolution, timezones) import pandas.compat as compat from pandas.errors import PerformanceWarning from pandas.util._decorators import Appender, cache_readonly diff --git a/pandas/core/arrays/period.py b/pandas/core/arrays/period.py index 53629dca4d391..bb009d9370dec 100644 --- a/pandas/core/arrays/period.py +++ b/pandas/core/arrays/period.py @@ -4,8 +4,7 @@ import numpy as np -from pandas._libs.tslib import NaT, iNaT -from pandas._libs.tslibs import period as libperiod +from pandas._libs.tslibs import NaT, iNaT, period as libperiod from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.period import ( DIFFERENT_FREQ_INDEX, IncompatibleFrequency, Period, get_period_field_arr, diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index ddf3984744114..d2a31de5c0938 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -4,7 +4,7 @@ import pytest import pytz -from pandas._libs.tslib import iNaT +from pandas._libs.tslibs import iNaT from pandas import ( DatetimeIndex, Index, NaT, Period, Series, Timedelta, TimedeltaIndex, diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py index 82cfdb9e0751e..477c8aa4c3b0d 100644 --- a/pandas/tests/scalar/timedelta/test_timedelta.py +++ b/pandas/tests/scalar/timedelta/test_timedelta.py @@ -4,7 +4,7 @@ import numpy as np import pytest -from pandas._libs.tslib import NaT, iNaT +from pandas._libs.tslibs import NaT, iNaT import pandas.compat as compat import pandas as pd diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 269b017fa2141..030887ac731f3 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -5,13 +5,11 @@ import pytest import pytz -import pandas._libs.tslib as tslib -from pandas._libs.tslib import NaT, Timestamp -from pandas._libs.tslibs import conversion, timezones +from pandas._libs.tslibs import ( + NaT, OutOfBoundsDatetime, Timedelta, Timestamp, conversion, timezones) from pandas._libs.tslibs.frequencies import ( INVALID_FREQ_ERR_MSG, get_freq_code, get_freq_str) import pandas._libs.tslibs.offsets as liboffsets -from pandas._libs.tslibs.timedeltas import Timedelta import pandas.compat as compat from pandas.compat import range from pandas.compat.numpy import np_datetime64_compat @@ -124,7 +122,7 @@ def test_apply_out_of_range(self, tz_naive_fixture): assert isinstance(result, datetime) assert t.tzinfo == result.tzinfo - except tslib.OutOfBoundsDatetime: + except OutOfBoundsDatetime: raise except (ValueError, KeyError): # we are creating an invalid offset
By making `NaT` a cdef'd object that we can cimport, we take a module-level lookup out of each check of `if obj is NaT`. Since we tend to do this check a lot, avoiding these global lookups can get us some mileage: ``` In [3]: vals = np.array([pd.NaT for _ in range(10**6)]) In [4]: %timeit pd.to_datetime(vals) ``` master: ``` 10 loops, best of 3: 33.1 ms per loop 100 loops, best of 3: 29.9 ms per loop 10 loops, best of 3: 25.8 ms per loop 10 loops, best of 3: 20 ms per loop 10 loops, best of 3: 31 ms per loop 10 loops, best of 3: 19.8 ms per loop 10 loops, best of 3: 26.6 ms per loop 10 loops, best of 3: 32.2 ms per loop ``` PR ``` 10 loops, best of 3: 13.6 ms per loop 10 loops, best of 3: 20.6 ms per loop 10 loops, best of 3: 14.1 ms per loop 100 loops, best of 3: 14.7 ms per loop 10 loops, best of 3: 14.4 ms per loop 10 loops, best of 3: 20.9 ms per loop 10 loops, best of 3: 20.8 ms per loop 100 loops, best of 3: 16.6 ms per loop ```
https://api.github.com/repos/pandas-dev/pandas/pulls/24008
2018-11-30T05:24:15Z
2018-12-02T23:06:55Z
2018-12-02T23:06:55Z
2018-12-02T23:10:25Z
API: fix str-accessor on CategoricalIndex
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index f2cb6a3389a6d..cf2945d8544a5 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1256,6 +1256,7 @@ Categorical - Bug in :meth:`Categorical.take` with a user-provided ``fill_value`` not encoding the ``fill_value``, which could result in a ``ValueError``, incorrect results, or a segmentation fault (:issue:`23296`). - In meth:`Series.unstack`, specifying a ``fill_value`` not present in the categories now raises a ``TypeError`` rather than ignoring the ``fill_value`` (:issue:`23284`) - Bug when resampling :meth:`Dataframe.resample()` and aggregating on categorical data, the categorical dtype was getting lost. (:issue:`23227`) +- Bug in many methods of the ``.str``-accessor, which always failed on calling the ``CategoricalIndex.str`` constructor (:issue:`23555`, :issue:`23556`) Datetimelike ^^^^^^^^^^^^ diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 0b791f6f91aa3..fd76293b4a0ca 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -15,7 +15,7 @@ from pandas.core.dtypes.common import ( ensure_object, is_bool_dtype, is_categorical_dtype, is_integer, is_list_like, is_object_dtype, is_re, is_scalar, is_string_like) -from pandas.core.dtypes.generic import ABCIndex, ABCSeries +from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries from pandas.core.dtypes.missing import isna from pandas.core.algorithms import take_1d @@ -931,7 +931,7 @@ def str_extractall(arr, pat, flags=0): if regex.groups == 0: raise ValueError("pattern contains no capture groups") - if isinstance(arr, ABCIndex): + if isinstance(arr, ABCIndexClass): arr = arr.to_series().reset_index(drop=True) names = dict(zip(regex.groupindex.values(), regex.groupindex.keys())) @@ -1854,7 +1854,7 @@ def __iter__(self): def _wrap_result(self, result, use_codes=True, name=None, expand=None, fill_value=np.nan): - from pandas.core.index import Index, MultiIndex + from pandas import Index, Series, MultiIndex # for category, we do the stuff on the categories, so blow it up # to the full series again @@ -1862,7 +1862,8 @@ def _wrap_result(self, result, use_codes=True, # so make it possible to skip this step as the method already did this # before the transformation... if use_codes and self._is_categorical: - result = take_1d(result, self._orig.cat.codes, + # if self._orig is a CategoricalIndex, there is no .cat-accessor + result = take_1d(result, Series(self._orig, copy=False).cat.codes, fill_value=fill_value) if not hasattr(result, 'ndim') or not hasattr(result, 'dtype'): diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 117984ce89743..f3a7753b8bd48 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -245,9 +245,6 @@ def test_api_per_method(self, box, dtype, and inferred_dtype in ['boolean', 'date', 'time']): pytest.xfail(reason='Inferring incorrectly because of NaNs; ' 'solved by GH 23167') - if box == Index and dtype == 'category': - pytest.xfail(reason='Broken methods on CategoricalIndex; ' - 'see GH 23556') t = box(values, dtype=dtype) # explicit dtype to avoid casting method = getattr(t.str, method_name) @@ -264,6 +261,7 @@ def test_api_per_method(self, box, dtype, + ['mixed', 'mixed-integer'] * mixed_allowed) if inferred_dtype in allowed_types: + # xref GH 23555, GH 23556 method(*args, **kwargs) # works! else: # GH 23011, GH 23163
closes #23555 closes #23556 split off from #23167 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/24005
2018-11-29T23:37:46Z
2018-12-02T23:06:09Z
2018-12-02T23:06:09Z
2018-12-02T23:14:26Z
CLEAN: Removing outdated and unused CI files
diff --git a/ci/README.txt b/ci/README.txt deleted file mode 100644 index bb71dc25d6093..0000000000000 --- a/ci/README.txt +++ /dev/null @@ -1,17 +0,0 @@ -Travis is a ci service that's well-integrated with GitHub. -The following types of breakage should be detected -by Travis builds: - -1) Failing tests on any supported version of Python. -2) Pandas should install and the tests should run if no optional deps are installed. -That also means tests which rely on optional deps need to raise SkipTest() -if the dep is missing. -3) unicode related fails when running under exotic locales. - -We tried running the vbench suite for a while, but with varying load -on Travis machines, that wasn't useful. - -Travis currently (4/2013) has a 5-job concurrency limit. Exceeding it -basically doubles the total runtime for a commit through travis, and -since dep+pandas installation is already quite long, this should become -a hard limit on concurrent travis runs. diff --git a/ci/print_versions.py b/ci/print_versions.py deleted file mode 100755 index a2c93748b0388..0000000000000 --- a/ci/print_versions.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python - - -def show_versions(as_json=False): - import imp - import os - fn = __file__ - this_dir = os.path.dirname(fn) - pandas_dir = os.path.abspath(os.path.join(this_dir, "..")) - sv_path = os.path.join(pandas_dir, 'pandas', 'util') - mod = imp.load_module( - 'pvmod', *imp.find_module('print_versions', [sv_path])) - return mod.show_versions(as_json) - - -if __name__ == '__main__': - # optparse is 2.6-safe - from optparse import OptionParser - parser = OptionParser() - parser.add_option("-j", "--json", metavar="FILE", nargs=1, - help="Save output as JSON into file, " - "pass in '-' to output to stdout") - - (options, args) = parser.parse_args() - - if options.json == "-": - options.json = True - - show_versions(as_json=options.json)
Removing outdated and unused files. - `ci/print_versions.py` doesn't work, as the used path has changed. It's not used anywhere in the code, and when needed, `python -c "import pandas; pandas.show_versions();"` is used instead. - `ci/README.txt` it's outdated and incomplete. After updating the CI and making it more compact, I'll try to add a section to the contributing page in the docs, about what we have. This should be more useful than this file.
https://api.github.com/repos/pandas-dev/pandas/pulls/24004
2018-11-29T23:26:55Z
2018-11-30T21:34:12Z
2018-11-30T21:34:12Z
2018-11-30T21:34:14Z
Fix PEP-8 issues in contributing.rst
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index 6fdb5bdbb6b1d..c55452cf27309 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -670,6 +670,8 @@ Otherwise, you need to do it manually: .. code-block:: python + import warnings + def old_func(): """Summary of the function. @@ -679,6 +681,9 @@ Otherwise, you need to do it manually: warnings.warn('Use new_func instead.', FutureWarning, stacklevel=2) new_func() + def new_func(): + pass + You'll also need to 1. write a new test that asserts a warning is issued when calling with the deprecated argument @@ -933,6 +938,8 @@ If your change involves checking that a warning is actually emitted, use .. code-block:: python + df = pd.DataFrame() + with tm.assert_produces_warning(FutureWarning): df.some_operation() @@ -963,7 +970,7 @@ a single test. .. code-block:: python - with warch.catch_warnings(): + with warnings.catch_warnings(): warnings.simplefilter("ignore", FutureWarning) # Or use warnings.filterwarnings(...) diff --git a/setup.cfg b/setup.cfg index cc7393e5a09b9..b9994e9ea0b2c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -73,7 +73,6 @@ exclude = doc/source/comparison_with_sql.rst doc/source/comparison_with_stata.rst doc/source/computation.rst - doc/source/contributing.rst doc/source/contributing_docstring.rst doc/source/dsintro.rst doc/source/enhancingperf.rst
https://api.github.com/repos/pandas-dev/pandas/pulls/24003
2018-11-29T22:54:20Z
2018-12-02T22:00:37Z
2018-12-02T22:00:37Z
2018-12-03T19:09:36Z
DOC: Fix PEP-8 issues in computation.rst and comparison_*.rst
diff --git a/doc/source/comparison_with_r.rst b/doc/source/comparison_with_r.rst index eecacde8ad14e..704b0c4d80537 100644 --- a/doc/source/comparison_with_r.rst +++ b/doc/source/comparison_with_r.rst @@ -6,7 +6,7 @@ import pandas as pd import numpy as np - pd.options.display.max_rows=15 + pd.options.display.max_rows = 15 Comparison with R / R libraries ******************************* @@ -165,16 +165,15 @@ function. .. ipython:: python - df = pd.DataFrame({ - 'v1': [1,3,5,7,8,3,5,np.nan,4,5,7,9], - 'v2': [11,33,55,77,88,33,55,np.nan,44,55,77,99], - 'by1': ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12], - 'by2': ["wet", "dry", 99, 95, np.nan, "damp", 95, 99, "red", 99, np.nan, - np.nan] - }) + df = pd.DataFrame( + {'v1': [1, 3, 5, 7, 8, 3, 5, np.nan, 4, 5, 7, 9], + 'v2': [11, 33, 55, 77, 88, 33, 55, np.nan, 44, 55, 77, 99], + 'by1': ["red", "blue", 1, 2, np.nan, "big", 1, 2, "red", 1, np.nan, 12], + 'by2': ["wet", "dry", 99, 95, np.nan, "damp", 95, 99, "red", 99, np.nan, + np.nan]}) - g = df.groupby(['by1','by2']) - g[['v1','v2']].mean() + g = df.groupby(['by1', 'by2']) + g[['v1', 'v2']].mean() For more details and examples see :ref:`the groupby documentation <groupby.split>`. @@ -195,7 +194,7 @@ The :meth:`~pandas.DataFrame.isin` method is similar to R ``%in%`` operator: .. ipython:: python - s = pd.Series(np.arange(5),dtype=np.float32) + s = pd.Series(np.arange(5), dtype=np.float32) s.isin([2, 4]) The ``match`` function returns a vector of the positions of matches @@ -234,11 +233,11 @@ In ``pandas`` we may use :meth:`~pandas.pivot_table` method to handle this: import random import string - baseball = pd.DataFrame({ - 'team': ["team %d" % (x+1) for x in range(5)]*5, - 'player': random.sample(list(string.ascii_lowercase),25), - 'batting avg': np.random.uniform(.200, .400, 25) - }) + baseball = pd.DataFrame( + {'team': ["team %d" % (x + 1) for x in range(5)] * 5, + 'player': random.sample(list(string.ascii_lowercase), 25), + 'batting avg': np.random.uniform(.200, .400, 25)}) + baseball.pivot_table(values='batting avg', columns='team', aggfunc=np.max) For more details and examples see :ref:`the reshaping documentation @@ -341,15 +340,13 @@ In ``pandas`` the equivalent expression, using the .. ipython:: python - df = pd.DataFrame({ - 'x': np.random.uniform(1., 168., 120), - 'y': np.random.uniform(7., 334., 120), - 'z': np.random.uniform(1.7, 20.7, 120), - 'month': [5,6,7,8]*30, - 'week': np.random.randint(1,4, 120) - }) + df = pd.DataFrame({'x': np.random.uniform(1., 168., 120), + 'y': np.random.uniform(7., 334., 120), + 'z': np.random.uniform(1.7, 20.7, 120), + 'month': [5, 6, 7, 8] * 30, + 'week': np.random.randint(1, 4, 120)}) - grouped = df.groupby(['month','week']) + grouped = df.groupby(['month', 'week']) grouped['x'].agg([np.mean, np.std]) @@ -374,8 +371,8 @@ In Python, since ``a`` is a list, you can simply use list comprehension. .. ipython:: python - a = np.array(list(range(1,24))+[np.NAN]).reshape(2,3,4) - pd.DataFrame([tuple(list(x)+[val]) for x, val in np.ndenumerate(a)]) + a = np.array(list(range(1, 24)) + [np.NAN]).reshape(2, 3, 4) + pd.DataFrame([tuple(list(x) + [val]) for x, val in np.ndenumerate(a)]) |meltlist|_ ~~~~~~~~~~~~ @@ -393,7 +390,7 @@ In Python, this list would be a list of tuples, so .. ipython:: python - a = list(enumerate(list(range(1,5))+[np.NAN])) + a = list(enumerate(list(range(1, 5)) + [np.NAN])) pd.DataFrame(a) For more details and examples see :ref:`the Into to Data Structures @@ -419,12 +416,13 @@ In Python, the :meth:`~pandas.melt` method is the R equivalent: .. ipython:: python - cheese = pd.DataFrame({'first' : ['John', 'Mary'], - 'last' : ['Doe', 'Bo'], - 'height' : [5.5, 6.0], - 'weight' : [130, 150]}) + cheese = pd.DataFrame({'first': ['John', 'Mary'], + 'last': ['Doe', 'Bo'], + 'height': [5.5, 6.0], + 'weight': [130, 150]}) + pd.melt(cheese, id_vars=['first', 'last']) - cheese.set_index(['first', 'last']).stack() # alternative way + cheese.set_index(['first', 'last']).stack() # alternative way For more details and examples see :ref:`the reshaping documentation <reshaping.melt>`. @@ -452,16 +450,15 @@ In Python the best way is to make use of :meth:`~pandas.pivot_table`: .. ipython:: python - df = pd.DataFrame({ - 'x': np.random.uniform(1., 168., 12), - 'y': np.random.uniform(7., 334., 12), - 'z': np.random.uniform(1.7, 20.7, 12), - 'month': [5,6,7]*4, - 'week': [1,2]*6 - }) + df = pd.DataFrame({'x': np.random.uniform(1., 168., 12), + 'y': np.random.uniform(7., 334., 12), + 'z': np.random.uniform(1.7, 20.7, 12), + 'month': [5, 6, 7] * 4, + 'week': [1, 2] * 6}) + mdf = pd.melt(df, id_vars=['month', 'week']) - pd.pivot_table(mdf, values='value', index=['variable','week'], - columns=['month'], aggfunc=np.mean) + pd.pivot_table(mdf, values='value', index=['variable', 'week'], + columns=['month'], aggfunc=np.mean) Similarly for ``dcast`` which uses a data.frame called ``df`` in R to aggregate information based on ``Animal`` and ``FeedType``: @@ -491,13 +488,14 @@ using :meth:`~pandas.pivot_table`: 'Amount': [10, 7, 4, 2, 5, 6, 2], }) - df.pivot_table(values='Amount', index='Animal', columns='FeedType', aggfunc='sum') + df.pivot_table(values='Amount', index='Animal', columns='FeedType', + aggfunc='sum') The second approach is to use the :meth:`~pandas.DataFrame.groupby` method: .. ipython:: python - df.groupby(['Animal','FeedType'])['Amount'].sum() + df.groupby(['Animal', 'FeedType'])['Amount'].sum() For more details and examples see :ref:`the reshaping documentation <reshaping.pivot>` or :ref:`the groupby documentation<groupby.split>`. @@ -516,8 +514,8 @@ In pandas this is accomplished with ``pd.cut`` and ``astype("category")``: .. ipython:: python - pd.cut(pd.Series([1,2,3,4,5,6]), 3) - pd.Series([1,2,3,2,2,3]).astype("category") + pd.cut(pd.Series([1, 2, 3, 4, 5, 6]), 3) + pd.Series([1, 2, 3, 2, 2, 3]).astype("category") For more details and examples see :ref:`categorical introduction <categorical>` and the :ref:`API documentation <api.categorical>`. There is also a documentation regarding the diff --git a/doc/source/comparison_with_sql.rst b/doc/source/comparison_with_sql.rst index db143cd586441..021f37eb5c66f 100644 --- a/doc/source/comparison_with_sql.rst +++ b/doc/source/comparison_with_sql.rst @@ -23,7 +23,8 @@ structure. .. ipython:: python - url = 'https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv' + url = ('https://raw.github.com/pandas-dev' + '/pandas/master/pandas/tests/data/tips.csv') tips = pd.read_csv(url) tips.head() @@ -387,7 +388,7 @@ Top N rows with offset .. ipython:: python - tips.nlargest(10+5, columns='tip').tail(10) + tips.nlargest(10 + 5, columns='tip').tail(10) Top N rows per group ~~~~~~~~~~~~~~~~~~~~ @@ -411,8 +412,7 @@ Top N rows per group .groupby(['day']) .cumcount() + 1) .query('rn < 3') - .sort_values(['day','rn']) - ) + .sort_values(['day', 'rn'])) the same using `rank(method='first')` function @@ -421,8 +421,7 @@ the same using `rank(method='first')` function (tips.assign(rnk=tips.groupby(['day'])['total_bill'] .rank(method='first', ascending=False)) .query('rnk < 3') - .sort_values(['day','rnk']) - ) + .sort_values(['day', 'rnk'])) .. code-block:: sql @@ -445,11 +444,10 @@ Notice that when using ``rank(method='min')`` function .. ipython:: python (tips[tips['tip'] < 2] - .assign(rnk_min=tips.groupby(['sex'])['tip'] - .rank(method='min')) - .query('rnk_min < 3') - .sort_values(['sex','rnk_min']) - ) + .assign(rnk_min=tips.groupby(['sex'])['tip'] + .rank(method='min')) + .query('rnk_min < 3') + .sort_values(['sex', 'rnk_min'])) UPDATE diff --git a/doc/source/comparison_with_stata.rst b/doc/source/comparison_with_stata.rst index 6c518983d5904..e039843b22065 100644 --- a/doc/source/comparison_with_stata.rst +++ b/doc/source/comparison_with_stata.rst @@ -102,9 +102,7 @@ and the values are the data. .. ipython:: python - df = pd.DataFrame({ - 'x': [1, 3, 5], - 'y': [2, 4, 6]}) + df = pd.DataFrame({'x': [1, 3, 5], 'y': [2, 4, 6]}) df @@ -128,7 +126,8 @@ the data set if presented with a url. .. ipython:: python - url = 'https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv' + url = ('https://raw.github.com/pandas-dev' + '/pandas/master/pandas/tests/data/tips.csv') tips = pd.read_csv(url) tips.head() @@ -278,17 +277,17 @@ see the :ref:`timeseries documentation<timeseries>` for more details. tips['date1_year'] = tips['date1'].dt.year tips['date2_month'] = tips['date2'].dt.month tips['date1_next'] = tips['date1'] + pd.offsets.MonthBegin() - tips['months_between'] = (tips['date2'].dt.to_period('M') - - tips['date1'].dt.to_period('M')) + tips['months_between'] = (tips['date2'].dt.to_period('M') + - tips['date1'].dt.to_period('M')) - tips[['date1','date2','date1_year','date2_month', - 'date1_next','months_between']].head() + tips[['date1', 'date2', 'date1_year', 'date2_month', 'date1_next', + 'months_between']].head() .. ipython:: python :suppress: - tips = tips.drop(['date1','date2','date1_year', - 'date2_month','date1_next','months_between'], axis=1) + tips = tips.drop(['date1', 'date2', 'date1_year', 'date2_month', + 'date1_next', 'months_between'], axis=1) Selection of Columns ~~~~~~~~~~~~~~~~~~~~ @@ -472,7 +471,7 @@ The following tables will be used in the merge examples 'value': np.random.randn(4)}) df1 df2 = pd.DataFrame({'key': ['B', 'D', 'D', 'E'], - 'value': np.random.randn(4)}) + 'value': np.random.randn(4)}) df2 In Stata, to perform a merge, one data set must be in memory @@ -661,7 +660,7 @@ In pandas this would be written as: .. ipython:: python - tips.groupby(['sex','smoker']).first() + tips.groupby(['sex', 'smoker']).first() Other Considerations diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 0d2021de8f88e..251dce5141ea5 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -4,14 +4,15 @@ :suppress: import numpy as np + import matplotlib.pyplot as plt + + import pandas as pd + np.random.seed(123456) np.set_printoptions(precision=4, suppress=True) - import pandas as pd - import matplotlib - # matplotlib.style.use('default') - import matplotlib.pyplot as plt + pd.options.display.max_rows = 15 + plt.close('all') - pd.options.display.max_rows=15 .. _computation: @@ -75,7 +76,8 @@ series in the DataFrame, also excluding NA/null values. .. ipython:: python - frame = pd.DataFrame(np.random.randn(1000, 5), columns=['a', 'b', 'c', 'd', 'e']) + frame = pd.DataFrame(np.random.randn(1000, 5), + columns=['a', 'b', 'c', 'd', 'e']) frame.cov() ``DataFrame.cov`` also supports an optional ``min_periods`` keyword that @@ -127,7 +129,8 @@ Wikipedia has articles covering the above correlation coefficients: .. ipython:: python - frame = pd.DataFrame(np.random.randn(1000, 5), columns=['a', 'b', 'c', 'd', 'e']) + frame = pd.DataFrame(np.random.randn(1000, 5), + columns=['a', 'b', 'c', 'd', 'e']) frame.iloc[::2] = np.nan # Series with Series @@ -163,9 +166,10 @@ compute the correlation based on histogram intersection: .. ipython:: python # histogram intersection - histogram_intersection = lambda a, b: np.minimum( - np.true_divide(a, a.sum()), np.true_divide(b, b.sum()) - ).sum() + def histogram_intersection(a, b): + return np.minimum(np.true_divide(a, a.sum()), + np.true_divide(b, b.sum())).sum() + frame.corr(method=histogram_intersection) A related method :meth:`~DataFrame.corrwith` is implemented on DataFrame to @@ -192,7 +196,7 @@ assigned the mean of the ranks (by default) for the group: .. ipython:: python s = pd.Series(np.random.np.random.randn(5), index=list('abcde')) - s['d'] = s['b'] # so there's a tie + s['d'] = s['b'] # so there's a tie s.rank() :meth:`~DataFrame.rank` is also a DataFrame method and can rank either the rows @@ -202,7 +206,7 @@ ranking. .. ipython:: python df = pd.DataFrame(np.random.np.random.randn(10, 6)) - df[4] = df[2][:5] # some ties + df[4] = df[2][:5] # some ties df df.rank(1) @@ -243,7 +247,8 @@ objects, :class:`~pandas.core.window.Rolling`, :class:`~pandas.core.window.Expan .. ipython:: python - s = pd.Series(np.random.randn(1000), index=pd.date_range('1/1/2000', periods=1000)) + s = pd.Series(np.random.randn(1000), + index=pd.date_range('1/1/2000', periods=1000)) s = s.cumsum() s @@ -258,7 +263,7 @@ These object provide tab-completion of the available methods and properties. .. code-block:: ipython - In [14]: r. + In [14]: r.<TAB> # noqa: E225, E999 r.agg r.apply r.count r.exclusions r.max r.median r.name r.skew r.sum r.aggregate r.corr r.cov r.kurt r.mean r.min r.quantile r.std r.var @@ -336,7 +341,9 @@ compute the mean absolute deviation on a rolling basis: .. ipython:: python - mad = lambda x: np.fabs(x - x.mean()).mean() + def mad(x): + return np.fabs(x - x.mean()).mean() + @savefig rolling_apply_ex.png s.rolling(window=60).apply(mad, raw=True).plot(style='k') @@ -376,7 +383,8 @@ The list of recognized types are the `scipy.signal window functions .. ipython:: python - ser = pd.Series(np.random.randn(10), index=pd.date_range('1/1/2000', periods=10)) + ser = pd.Series(np.random.randn(10), + index=pd.date_range('1/1/2000', periods=10)) ser.rolling(window=5, win_type='triang').mean() @@ -423,7 +431,9 @@ This can be particularly useful for a non-regular time frequency index. .. ipython:: python dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}, - index=pd.date_range('20130101 09:00:00', periods=5, freq='s')) + index=pd.date_range('20130101 09:00:00', + periods=5, + freq='s')) dft This is a regular frequency index. Using an integer window parameter works to roll along the window frequency. @@ -445,12 +455,12 @@ Using a non-regular, but still monotonic index, rolling with an integer window d .. ipython:: python dft = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]}, - index = pd.Index([pd.Timestamp('20130101 09:00:00'), - pd.Timestamp('20130101 09:00:02'), - pd.Timestamp('20130101 09:00:03'), - pd.Timestamp('20130101 09:00:05'), - pd.Timestamp('20130101 09:00:06')], - name='foo')) + index=pd.Index([pd.Timestamp('20130101 09:00:00'), + pd.Timestamp('20130101 09:00:02'), + pd.Timestamp('20130101 09:00:03'), + pd.Timestamp('20130101 09:00:05'), + pd.Timestamp('20130101 09:00:06')], + name='foo')) dft dft.rolling(2).sum() @@ -496,11 +506,11 @@ from present information back to past information. This allows the rolling windo .. ipython:: python df = pd.DataFrame({'x': 1}, - index = [pd.Timestamp('20130101 09:00:01'), - pd.Timestamp('20130101 09:00:02'), - pd.Timestamp('20130101 09:00:03'), - pd.Timestamp('20130101 09:00:04'), - pd.Timestamp('20130101 09:00:06')]) + index=[pd.Timestamp('20130101 09:00:01'), + pd.Timestamp('20130101 09:00:02'), + pd.Timestamp('20130101 09:00:03'), + pd.Timestamp('20130101 09:00:04'), + pd.Timestamp('20130101 09:00:06')]) df["right"] = df.rolling('2s', closed='right').x.sum() # default df["both"] = df.rolling('2s', closed='both').x.sum() @@ -601,7 +611,8 @@ can even be omitted: .. ipython:: python - covs = df[['B','C','D']].rolling(window=50).cov(df[['A','B','C']], pairwise=True) + covs = (df[['B', 'C', 'D']].rolling(window=50) + .cov(df[['A', 'B', 'C']], pairwise=True)) covs.loc['2002-09-22':] .. ipython:: python @@ -637,7 +648,7 @@ perform multiple computations on the data. These operations are similar to the : dfa = pd.DataFrame(np.random.randn(1000, 3), index=pd.date_range('1/1/2000', periods=1000), columns=['A', 'B', 'C']) - r = dfa.rolling(window=60,min_periods=1) + r = dfa.rolling(window=60, min_periods=1) r We can aggregate by passing a function to the entire DataFrame, or select a @@ -649,7 +660,7 @@ Series (or multiple Series) via standard ``__getitem__``. r['A'].aggregate(np.sum) - r[['A','B']].aggregate(np.sum) + r[['A', 'B']].aggregate(np.sum) As you can see, the result of the aggregation will have the selected columns, or all columns if none are selected. @@ -683,24 +694,21 @@ By passing a dict to ``aggregate`` you can apply a different aggregation to the columns of a ``DataFrame``: .. ipython:: python - :okexcept: - :okwarning: - r.agg({'A' : np.sum, - 'B' : lambda x: np.std(x, ddof=1)}) + r.agg({'A': np.sum, 'B': lambda x: np.std(x, ddof=1)}) The function names can also be strings. In order for a string to be valid it must be implemented on the windowed object .. ipython:: python - r.agg({'A' : 'sum', 'B' : 'std'}) + r.agg({'A': 'sum', 'B': 'std'}) Furthermore you can pass a nested dict to indicate different aggregations on different columns. .. ipython:: python - r.agg({'A' : ['sum','std'], 'B' : ['mean','std'] }) + r.agg({'A': ['sum', 'std'], 'B': ['mean', 'std']}) .. _stats.moments.expanding:
- Fix PEP-8 issues in comparison_*.rst - Fix PEP-8 issues in computation.rst
https://api.github.com/repos/pandas-dev/pandas/pulls/24002
2018-11-29T22:54:02Z
2018-12-02T17:22:24Z
2018-12-02T17:22:24Z
2018-12-03T19:10:37Z
REF: prelim for fixing array_to_datetime
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx index ddeaffbfb3cc0..609608a0948c5 100644 --- a/pandas/_libs/tslib.pyx +++ b/pandas/_libs/tslib.pyx @@ -463,8 +463,7 @@ def array_with_unit_to_datetime(ndarray values, object unit, @cython.boundscheck(False) cpdef array_to_datetime(ndarray[object] values, str errors='raise', bint dayfirst=False, bint yearfirst=False, - object format=None, object utc=None, - bint require_iso8601=False): + object utc=None, bint require_iso8601=False): """ Converts a 1D array of date-like values to a numpy array of either: 1) datetime64[ns] data @@ -488,8 +487,6 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise', dayfirst parsing behavior when encountering datetime strings yearfirst : bool, default False yearfirst parsing behavior when encountering datetime strings - format : str, default None - format of the string to parse utc : bool, default None indicator whether the dates should be UTC require_iso8601 : bool, default False diff --git a/pandas/core/tools/datetimes.py b/pandas/core/tools/datetimes.py index ee44a64514f4f..1266b57c098cd 100644 --- a/pandas/core/tools/datetimes.py +++ b/pandas/core/tools/datetimes.py @@ -231,9 +231,9 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None, require_iso8601 = not infer_datetime_format format = None + tz_parsed = None + result = None try: - result = None - if format is not None: # shortcut formatting here if format == '%Y%m%d': @@ -267,7 +267,8 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None, raise result = arg - if result is None and (format is None or infer_datetime_format): + if result is None: + assert format is None or infer_datetime_format result, tz_parsed = tslib.array_to_datetime( arg, errors=errors, @@ -276,36 +277,37 @@ def _convert_listlike_datetimes(arg, box, format, name=None, tz=None, yearfirst=yearfirst, require_iso8601=require_iso8601 ) - if tz_parsed is not None: - if box: - # We can take a shortcut since the datetime64 numpy array - # is in UTC - return DatetimeIndex._simple_new(result, name=name, - tz=tz_parsed) - else: - # Convert the datetime64 numpy array to an numpy array - # of datetime objects - result = [Timestamp(ts, tz=tz_parsed).to_pydatetime() - for ts in result] - return np.array(result, dtype=object) - - if box: - # Ensure we return an Index in all cases where box=True - if is_datetime64_dtype(result): - return DatetimeIndex(result, tz=tz, name=name) - elif is_object_dtype(result): - # e.g. an Index of datetime objects - from pandas import Index - return Index(result, name=name) - return result - except ValueError as e: + # Fallback to try to convert datetime objects try: values, tz = conversion.datetime_to_datetime64(arg) return DatetimeIndex._simple_new(values, name=name, tz=tz) except (ValueError, TypeError): raise e + if tz_parsed is not None: + if box: + # We can take a shortcut since the datetime64 numpy array + # is in UTC + return DatetimeIndex._simple_new(result, name=name, + tz=tz_parsed) + else: + # Convert the datetime64 numpy array to an numpy array + # of datetime objects + result = [Timestamp(ts, tz=tz_parsed).to_pydatetime() + for ts in result] + return np.array(result, dtype=object) + + if box: + # Ensure we return an Index in all cases where box=True + if is_datetime64_dtype(result): + return DatetimeIndex(result, tz=tz, name=name) + elif is_object_dtype(result): + # e.g. an Index of datetime objects + from pandas import Index + return Index(result, name=name) + return result + def _adjust_to_origin(arg, origin, unit): """
`tslib.array_to_datetime` has an unused `format` kwarg. This gets rid of it. `to_datetime` (or more specifically, `_convert_listlike_datetimes`) has a bunch of boxing logic inside a try/except that doesn't need to be, obscures what Exception we're trying to catch. This moves that code outside the try/except and de-indents it. This overlaps with #23675. xref #23722 (the conversation there less than the OP) cc @mroeschke
https://api.github.com/repos/pandas-dev/pandas/pulls/24000
2018-11-29T20:07:57Z
2018-11-29T22:01:14Z
2018-11-29T22:01:14Z
2018-11-29T22:29:04Z
BUG: pandas.cut should disallow overlapping IntervalIndex bins
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst index f888648a9363e..3c01a6d330071 100644 --- a/doc/source/whatsnew/v0.24.0.rst +++ b/doc/source/whatsnew/v0.24.0.rst @@ -1529,6 +1529,7 @@ Reshaping - Bug in :func:`pandas.melt` when passing column names that are not present in ``DataFrame`` (:issue:`23575`) - Bug in :meth:`DataFrame.append` with a :class:`Series` with a dateutil timezone would raise a ``TypeError`` (:issue:`23682`) - Bug in ``Series`` construction when passing no data and ``dtype=str`` (:issue:`22477`) +- Bug in :func:`cut` with ``bins`` as an overlapping ``IntervalIndex`` where multiple bins were returned per item instead of raising a ``ValueError`` (:issue:`23980`) .. _whatsnew_0240.bug_fixes.sparse: diff --git a/pandas/core/reshape/tile.py b/pandas/core/reshape/tile.py index 8ad2a48e8767c..5d5f6cf8102be 100644 --- a/pandas/core/reshape/tile.py +++ b/pandas/core/reshape/tile.py @@ -43,7 +43,8 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, and maximum values of `x`. * sequence of scalars : Defines the bin edges allowing for non-uniform width. No extension of the range of `x` is done. - * IntervalIndex : Defines the exact bins to be used. + * IntervalIndex : Defines the exact bins to be used. Note that + IntervalIndex for `bins` must be non-overlapping. right : bool, default True Indicates whether `bins` includes the rightmost edge or not. If @@ -217,7 +218,9 @@ def cut(x, bins, right=True, labels=None, retbins=False, precision=3, bins[-1] += adj elif isinstance(bins, IntervalIndex): - pass + if bins.is_overlapping: + raise ValueError('Overlapping IntervalIndex is not accepted.') + else: bins = np.asarray(bins) bins = _convert_bin_to_numeric_type(bins, dtype) diff --git a/pandas/tests/reshape/test_tile.py b/pandas/tests/reshape/test_tile.py index f04e9a55a6c8d..b0445f5a9e2d5 100644 --- a/pandas/tests/reshape/test_tile.py +++ b/pandas/tests/reshape/test_tile.py @@ -91,6 +91,12 @@ def test_bins_from_intervalindex(self): tm.assert_numpy_array_equal(result.codes, np.array([1, 1, 2], dtype='int8')) + def test_bins_not_overlapping_from_intervalindex(self): + # verify if issue 23980 is properly solved. + ii = IntervalIndex.from_tuples([(0, 10), (2, 12), (4, 14)]) + with pytest.raises(ValueError): + cut([5, 6], bins=ii) + def test_bins_not_monotonic(self): data = [.2, 1.4, 2.5, 6.2, 9.7, 2.1] pytest.raises(ValueError, cut, data, [0.1, 1.5, 1, 10])
- [ ] closes #23980 - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/23999
2018-11-29T18:41:06Z
2018-12-01T00:17:14Z
2018-12-01T00:17:13Z
2018-12-01T00:18:15Z
BUG: Index removes name when sliced
diff --git a/doc/source/release.rst b/doc/source/release.rst index 2db899b180627..2d9b3649b6f22 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -336,6 +336,8 @@ pandas 0.12 - Fixed bug in plotting that wasn't raising on invalid colormap for matplotlib 1.1.1 (:issue:`4215`) - Fixed the legend displaying in ``DataFrame.plot(kind='kde')`` (:issue:`4216`) + - Fixed bug where Index slices weren't carrying the name attribute + (:issue:`4226`) pandas 0.11.0 ============= diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt index fe491ffa0ee2a..f913ebce33082 100644 --- a/doc/source/v0.12.0.txt +++ b/doc/source/v0.12.0.txt @@ -469,7 +469,9 @@ Bug Fixes - Fixed bug in plotting that wasn't raising on invalid colormap for matplotlib 1.1.1 (:issue:`4215`) - Fixed the legend displaying in ``DataFrame.plot(kind='kde')`` (:issue:`4216`) - + - Fixed bug where Index slices weren't carrying the name attribute + (:issue:`4226`) + See the :ref:`full release notes <release>` or issue tracker on GitHub for a complete list. diff --git a/pandas/core/index.py b/pandas/core/index.py index 7b20d791c6593..cb90dc9cb0cbb 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -14,7 +14,6 @@ from pandas.util.decorators import cache_readonly from pandas.core.common import isnull import pandas.core.common as com -from pandas.util import py3compat from pandas.core.config import get_option @@ -908,7 +907,7 @@ def reindex(self, target, method=None, level=None, limit=None, if self.equals(target): indexer = None - + # to avoid aliasing an existing index if copy_if_needed and target.name != self.name and self.name is not None: if target.name is None: @@ -1215,7 +1214,7 @@ def slice_locs(self, start=None, end=None): else: try: start_slice = self.get_loc(start) - + if not is_unique: # get_loc will return a boolean array for non_uniques @@ -2794,6 +2793,7 @@ def _get_consensus_names(indexes): return list(list(consensus_names)[0]) return [None] * indexes[0].nlevels + def _maybe_box(idx): from pandas.tseries.api import DatetimeIndex, PeriodIndex klasses = DatetimeIndex, PeriodIndex diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index 33533104919db..250728dc59481 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -13,7 +13,6 @@ from pandas.core.index import Index, Int64Index, MultiIndex from pandas.util.testing import assert_almost_equal from pandas.util import py3compat -import pandas.core.common as com import pandas.util.testing as tm import pandas.core.config as cf @@ -551,6 +550,10 @@ def test_get_level_values(self): result = self.strIndex.get_level_values(0) self.assert_(result.equals(self.strIndex)) + def test_slice_keep_name(self): + idx = Index(['a', 'b'], name='asdf') + self.assertEqual(idx.name, idx[1:].name) + class TestInt64Index(unittest.TestCase): _multiprocess_can_split_ = True @@ -918,6 +921,10 @@ def test_bytestring_with_unicode(self): else: str(idx) + def test_slice_keep_name(self): + idx = Int64Index([1, 2], name='asdf') + self.assertEqual(idx.name, idx[1:].name) + class TestMultiIndex(unittest.TestCase): _multiprocess_can_split_ = True @@ -1537,7 +1544,7 @@ def test_diff(self): self.assert_(first.equals(result)) self.assertEqual(first.names, result.names) - # name from non-empty array + # name from non-empty array result = first.diff([('foo', 'one')]) expected = pd.MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'), ('foo', 'two'), ('qux', 'one'), ('qux', 'two')]) @@ -1789,14 +1796,18 @@ def test_bytestring_with_unicode(self): else: str(idx) + def test_slice_keep_name(self): + x = MultiIndex.from_tuples([('a', 'b'), (1, 2), ('c', 'd')], + names=['x', 'y']) + self.assertEqual(x[1:].names, x.names) + def test_get_combined_index(): from pandas.core.index import _get_combined_index result = _get_combined_index([]) assert(result.equals(Index([]))) + if __name__ == '__main__': - import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - # '--with-coverage', '--cover-package=pandas.core'], exit=False) diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 7fdb6d9d2603d..b133939c2b404 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -1013,6 +1013,7 @@ def __array_finalize__(self, obj): self.offset = getattr(obj, 'offset', None) self.tz = getattr(obj, 'tz', None) + self.name = getattr(obj, 'name', None) def intersection(self, other): """ @@ -1069,7 +1070,7 @@ def intersection(self, other): return self._view_like(left_chunk) def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True): - + is_monotonic = self.is_monotonic if reso == 'year': @@ -1236,7 +1237,7 @@ def slice_locs(self, start=None, end=None): start_loc = self._get_string_slice(start).start else: start_loc = 0 - + if end: end_loc = self._get_string_slice(end).stop else: @@ -1254,7 +1255,7 @@ def slice_locs(self, start=None, end=None): start_loc = self._get_string_slice(start,use_rhs=False) else: start_loc = np.arange(len(self)) - + if end: end_loc = self._get_string_slice(end,use_lhs=False) else: diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index ac79fbd6bfb37..2db32b14e2eb3 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -1060,7 +1060,7 @@ def _format_native_types(self, na_rep=u'NaT', **kwargs): values = np.array(list(self),dtype=object) mask = isnull(self.values) values[mask] = na_rep - + imask = -mask values[imask] = np.array([ u'%s' % dt for dt in values[imask] ]) return values.tolist() @@ -1070,6 +1070,7 @@ def __array_finalize__(self, obj): return self.item() self.freq = getattr(obj, 'freq', None) + self.name = getattr(obj, 'name', None) def __repr__(self): output = str(self.__class__) + '\n' diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 95de08909a50a..01c984ec2b07d 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -27,6 +27,7 @@ from pandas import Series, TimeSeries, DataFrame from pandas.util.testing import assert_series_equal, assert_almost_equal import pandas.util.testing as tm +from numpy.testing import assert_array_equal class TestPeriodProperties(TestCase): @@ -464,6 +465,7 @@ def test_constructor_infer_freq(self): self.assertRaises(ValueError, Period, '2007-01-01 07:10:15.123456') + def noWrap(item): return item @@ -1723,11 +1725,11 @@ def test_badinput(self): def test_negative_ordinals(self): p = Period(ordinal=-1000, freq='A') - p = Period(ordinal=0, freq='A') - idx = PeriodIndex(ordinal=[-1, 0, 1], freq='A') - idx = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A') + idx1 = PeriodIndex(ordinal=[-1, 0, 1], freq='A') + idx2 = PeriodIndex(ordinal=np.array([-1, 0, 1]), freq='A') + assert_array_equal(idx1,idx2) def test_dti_to_period(self): dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M') @@ -1947,7 +1949,7 @@ def test_fields(self): end_intv = Period('2006-12-31', 'W') i1 = PeriodIndex(end=end_intv, periods=10) - self._check_all_fields(pi) + self._check_all_fields(i1) def _check_all_fields(self, periodindex): fields = ['year', 'month', 'day', 'hour', 'minute', @@ -2047,6 +2049,11 @@ def test_pickle_freq(self): new_prng = pickle.loads(pickle.dumps(prng)) self.assertEqual(new_prng.freq,'M') + def test_slice_keep_name(self): + idx = period_range('20010101', periods=10, freq='D', name='bob') + self.assertEqual(idx.name, idx[1:].name) + + def _permute(obj): return obj.take(np.random.permutation(len(obj))) @@ -2152,8 +2159,6 @@ def test_greaterEqual(self): def test_greaterEqual_Raises_Value(self): self.assertRaises(ValueError, self.january1.__ge__, self.day) - - def test_greaterEqual_Raises_Value(self): self.assertRaises(TypeError, self.january1.__ge__, 1) def test_smallerEqual(self): @@ -2179,6 +2184,7 @@ def test_sort(self): correctPeriods = [self.january1, self.february, self.march] self.assertEqual(sorted(periods), correctPeriods) + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index 07780b575fa95..f41d31d2afbd0 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -32,8 +32,10 @@ import pandas.index as _index import cPickle as pickle +from pandas import read_pickle import pandas.core.datetools as dt from numpy.random import rand +from numpy.testing import assert_array_equal from pandas.util.testing import assert_frame_equal import pandas.util.py3compat as py3compat from pandas.core.datetools import BDay @@ -260,11 +262,13 @@ def test_indexing(self): # this is a single date, so will raise self.assertRaises(KeyError, df.__getitem__, df.index[2],) + def assert_range_equal(left, right): assert(left.equals(right)) assert(left.freq == right.freq) assert(left.tz == right.tz) + class TestTimeSeries(unittest.TestCase): _multiprocess_can_split_ = True @@ -1295,7 +1299,6 @@ def test_to_period(self): def test_to_period_tz(self): _skip_if_no_pytz() from dateutil.tz import tzlocal - from pandas.tseries.period import period_range from pytz import utc as UTC xp = date_range('1/1/2000', '4/1/2000').to_period() @@ -1949,7 +1952,15 @@ def test_does_not_convert_mixed_integer(self): joined = cols.join(df.columns) self.assertEqual(cols.dtype, np.dtype('O')) self.assertEqual(cols.dtype, joined.dtype) - self.assert_(np.array_equal(cols.values, joined.values)) + assert_array_equal(cols.values, joined.values) + + def test_slice_keeps_name(self): + # GH4226 + st = pd.Timestamp('2013-07-01 00:00:00', tz='America/Los_Angeles') + et = pd.Timestamp('2013-07-02 00:00:00', tz='America/Los_Angeles') + dr = pd.date_range(st, et, freq='H', name='timebucket') + self.assertEqual(dr[1:].name, dr.name) + class TestLegacySupport(unittest.TestCase): _multiprocess_can_split_ = True @@ -1971,7 +1982,6 @@ def setUpClass(cls): def test_pass_offset_warn(self): from StringIO import StringIO - import sys buf = StringIO() sys.stderr = buf @@ -2064,7 +2074,7 @@ def test_unpickle_daterange(self): pth, _ = os.path.split(os.path.abspath(__file__)) filepath = os.path.join(pth, 'data', 'daterange_073.pickle') - rng = com.load(filepath) + rng = read_pickle(filepath) self.assert_(type(rng[0]) == datetime) self.assert_(isinstance(rng.offset, offsets.BDay)) self.assert_(rng.values.dtype == object) @@ -2935,6 +2945,7 @@ def test_hash_equivalent(self): stamp = Timestamp(datetime(2011, 1, 1)) self.assertEquals(d[stamp], 5) + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
closes #4226
https://api.github.com/repos/pandas-dev/pandas/pulls/4233
2013-07-13T18:05:44Z
2013-07-14T13:32:53Z
2013-07-14T13:32:53Z
2014-06-30T17:57:37Z
CLN/TST: clean up and raise on bs4 version and no tables
diff --git a/doc/source/release.rst b/doc/source/release.rst index 45335fa49aa23..f5d29c11e0589 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -106,6 +106,8 @@ pandas 0.12 - Added ``layout`` keyword to DataFrame.hist() for more customizable layout (:issue:`4050`) - Timestamp.min and Timestamp.max now represent valid Timestamp instances instead of the default datetime.min and datetime.max (respectively), thanks @SleepingPills + - ``read_html`` now raises when no tables are found and BeautifulSoup==4.2.0 + is detected (:issue:`4214`) **API Changes** diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt index e1484c82ff165..f735803d9af33 100644 --- a/doc/source/v0.12.0.txt +++ b/doc/source/v0.12.0.txt @@ -344,6 +344,9 @@ Other Enhancements - Timestamp.min and Timestamp.max now represent valid Timestamp instances instead of the default datetime.min and datetime.max (respectively), thanks @SleepingPills + - ``read_html`` now raises when no tables are found and BeautifulSoup==4.2.0 + is detected (:issue:`4214`) + Experimental Features ~~~~~~~~~~~~~~~~~~~~~ diff --git a/pandas/io/html.py b/pandas/io/html.py index 08a9403cd18a7..64fba1cadc6c2 100644 --- a/pandas/io/html.py +++ b/pandas/io/html.py @@ -8,23 +8,18 @@ import numbers import urllib2 import urlparse -import contextlib import collections - -try: - from importlib import import_module -except ImportError: - import_module = __import__ +from distutils.version import LooseVersion import numpy as np from pandas import DataFrame, MultiIndex, isnull -from pandas.io.common import _is_url +from pandas.io.common import _is_url, urlopen try: - import_module('bs4') + import bs4 except ImportError: _HAS_BS4 = False else: @@ -32,7 +27,7 @@ try: - import_module('lxml') + import lxml except ImportError: _HAS_LXML = False else: @@ -40,7 +35,7 @@ try: - import_module('html5lib') + import html5lib except ImportError: _HAS_HTML5LIB = False else: @@ -119,7 +114,7 @@ def _read(io): """ if _is_url(io): try: - with contextlib.closing(urllib2.urlopen(io)) as url: + with urlopen(io) as url: raw_text = url.read() except urllib2.URLError: raise ValueError('Invalid URL: "{0}"'.format(io)) @@ -131,7 +126,8 @@ def _read(io): elif isinstance(io, basestring): raw_text = io else: - raise ValueError("Cannot read object of type '{0}'".format(type(io))) + raise TypeError("Cannot read object of type " + "'{0.__class__.__name__!r}'".format(io)) return raw_text @@ -414,6 +410,7 @@ def _parse_tables(self, doc, match, attrs): element_name = self._strainer.name tables = doc.find_all(element_name, attrs=attrs) if not tables: + # known sporadically working release raise AssertionError('No tables found') mts = [table.find(text=match) for table in tables] @@ -429,7 +426,8 @@ def _parse_tables(self, doc, match, attrs): def _setup_build_doc(self): raw_text = _read(self.io) if not raw_text: - raise AssertionError('No text parsed from document') + raise AssertionError('No text parsed from document: ' + '{0}'.format(self.io)) return raw_text def _build_doc(self): @@ -721,6 +719,14 @@ def _parser_dispatch(flavor): raise ImportError("html5lib not found please install it") if not _HAS_BS4: raise ImportError("bs4 not found please install it") + if bs4.__version__ == LooseVersion('4.2.0'): + raise AssertionError("You're using a version" + " of BeautifulSoup4 (4.2.0) that has been" + " known to cause problems on certain" + " operating systems such as Debian. " + "Please install a version of" + " BeautifulSoup4 != 4.2.0, both earlier" + " and later releases will work.") else: if not _HAS_LXML: raise ImportError("lxml not found please install it") diff --git a/pandas/io/tests/test_html.py b/pandas/io/tests/test_html.py index eaf06730a84c3..d7c46ea898b33 100644 --- a/pandas/io/tests/test_html.py +++ b/pandas/io/tests/test_html.py @@ -2,18 +2,24 @@ import re from cStringIO import StringIO from unittest import TestCase -from urllib2 import urlopen -from contextlib import closing import warnings +from distutils.version import LooseVersion import nose +from nose.tools import assert_raises import numpy as np from numpy.random import rand from numpy.testing.decorators import slow -from pandas.io.html import read_html, import_module -from pandas.io.html import _remove_whitespace +try: + from importlib import import_module +except ImportError: + import_module = __import__ + +from pandas.io.html import read_html +from pandas.io.common import urlopen + from pandas import DataFrame, MultiIndex, read_csv, Timestamp from pandas.util.testing import (assert_frame_equal, network, get_data_path) @@ -60,14 +66,26 @@ def assert_framelist_equal(list1, list2, *args, **kwargs): assert not frame_i.empty, 'frames are both empty' +def test_bs4_version_fails(): + _skip_if_no('bs4') + import bs4 + if bs4.__version__ == LooseVersion('4.2.0'): + assert_raises(AssertionError, read_html, os.path.join(DATA_PATH, + "spam.html"), + flavor='bs4') + + class TestReadHtmlBase(TestCase): def run_read_html(self, *args, **kwargs): - self.try_skip() kwargs['flavor'] = kwargs.get('flavor', self.flavor) return read_html(*args, **kwargs) def try_skip(self): _skip_if_none_of(('bs4', 'html5lib')) + import bs4 + if (bs4.__version__ == LooseVersion('4.2.0') and + self.flavor != ['lxml']): + raise nose.SkipTest def setup_data(self): self.spam_data = os.path.join(DATA_PATH, 'spam.html') @@ -77,6 +95,7 @@ def setup_flavor(self): self.flavor = 'bs4' def setUp(self): + self.try_skip() self.setup_data() self.setup_flavor() @@ -347,6 +366,7 @@ def test_pythonxy_plugins_table(self): @slow def test_banklist_header(self): + from pandas.io.html import _remove_whitespace def try_remove_ws(x): try: return _remove_whitespace(x) @@ -438,10 +458,9 @@ def test_invalid_flavor(): def get_elements_from_url(url, element='table', base_url="file://"): _skip_if_none_of(('bs4', 'html5lib')) url = "".join([base_url, url]) - from bs4 import BeautifulSoup, SoupStrainer - strainer = SoupStrainer(element) - with closing(urlopen(url)) as f: - soup = BeautifulSoup(f, features='html5lib', parse_only=strainer) + from bs4 import BeautifulSoup + with urlopen(url) as f: + soup = BeautifulSoup(f, features='html5lib') return soup.find_all(element)
closes #4214
https://api.github.com/repos/pandas-dev/pandas/pulls/4232
2013-07-13T16:54:53Z
2013-07-13T22:00:55Z
2013-07-13T22:00:55Z
2014-06-30T17:58:37Z
BUG: raise on invalid colormap for older mpl
diff --git a/doc/source/release.rst b/doc/source/release.rst index b1a4827c28e55..45335fa49aa23 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -331,6 +331,8 @@ pandas 0.12 - Fixed bug in ``Series.where`` where broadcasting a single element input vector to the length of the series resulted in multiplying the value inside the input (:issue:`4192`) + - Fixed bug in plotting that wasn't raising on invalid colormap for + matplotlib 1.1.1 (:issue:`4215`) pandas 0.11.0 ============= diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt index 33228d99dd097..e1484c82ff165 100644 --- a/doc/source/v0.12.0.txt +++ b/doc/source/v0.12.0.txt @@ -463,6 +463,8 @@ Bug Fixes argument in ``to_datetime`` (:issue:`4152`) - Fixed bug in ``PandasAutoDateLocator`` where ``invert_xaxis`` triggered incorrectly ``MilliSecondLocator`` (:issue:`3990`) + - Fixed bug in plotting that wasn't raising on invalid colormap for + matplotlib 1.1.1 (:issue:`4215`) See the :ref:`full release notes <release>` or issue tracker diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index f1e4ef1106080..41297e3e17972 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -990,8 +990,7 @@ def test_option_mpl_style(self): pass def test_invalid_colormap(self): - df = DataFrame(np.random.randn(500, 2), columns=['A', 'B']) - + df = DataFrame(np.random.randn(3, 2), columns=['A', 'B']) self.assertRaises(ValueError, df.plot, colormap='invalid_colormap') diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index 0052ed1cecbe4..ce4ab3de09613 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -91,14 +91,17 @@ def _get_standard_kind(kind): return {'density': 'kde'}.get(kind, kind) -def _get_standard_colors(num_colors=None, colormap=None, - color_type='default', color=None): +def _get_standard_colors(num_colors=None, colormap=None, color_type='default', + color=None): import matplotlib.pyplot as plt if color is None and colormap is not None: if isinstance(colormap, basestring): import matplotlib.cm as cm + cmap = colormap colormap = cm.get_cmap(colormap) + if colormap is None: + raise ValueError("Colormap {0} is not recognized".format(cmap)) colors = map(colormap, np.linspace(0, 1, num=num_colors)) elif color is not None: if colormap is not None:
closes #4215
https://api.github.com/repos/pandas-dev/pandas/pulls/4231
2013-07-13T15:58:46Z
2013-07-13T18:19:43Z
2013-07-13T18:19:43Z
2014-07-16T08:19:08Z
TST: little endian failure, GH4222
diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 603924ac6a292..9257338cd4913 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -327,8 +327,16 @@ def _read_header(self): typlist = [ord(self.path_or_buf.read(1)) for i in range(self.nvar)] else: typlist = [self.OLD_TYPE_MAPPING[self._decode_bytes(self.path_or_buf.read(1))] for i in range(self.nvar)] - self.typlist = [self.TYPE_MAP[typ] for typ in typlist] - self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist] + + try: + self.typlist = [self.TYPE_MAP[typ] for typ in typlist] + except: + raise ValueError("cannot convert stata types [{0}]".format(','.join(typlist))) + try: + self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist] + except: + raise ValueError("cannot convert stata dtypes [{0}]".format(','.join(typlist))) + if self.format_version > 108: self.varlist = [self._null_terminate(self.path_or_buf.read(33)) for i in range(self.nvar)] else: diff --git a/pandas/io/tests/test_stata.py b/pandas/io/tests/test_stata.py index 0e32fb91fc743..fa8bf6f80ad03 100644 --- a/pandas/io/tests/test_stata.py +++ b/pandas/io/tests/test_stata.py @@ -132,7 +132,7 @@ def test_read_dta4(self): def test_write_dta5(self): if not is_little_endian(): raise nose.SkipTest("known failure of test_write_dta5 on non-little endian") - + original = DataFrame([(np.nan, np.nan, np.nan, np.nan, np.nan)], columns=['float_miss', 'double_miss', 'byte_miss', 'int_miss', 'long_miss']) original.index.name = 'index' @@ -145,7 +145,7 @@ def test_write_dta5(self): def test_write_dta6(self): if not is_little_endian(): raise nose.SkipTest("known failure of test_write_dta6 on non-little endian") - + original = self.read_csv(self.csv3) original.index.name = 'index' @@ -191,6 +191,9 @@ def test_read_dta9(self): ) def test_read_dta10(self): + if not is_little_endian(): + raise nose.SkipTest("known failure of test_write_dta10 on non-little endian") + original = DataFrame( data= [
closes #4222
https://api.github.com/repos/pandas-dev/pandas/pulls/4228
2013-07-13T00:44:41Z
2013-07-13T01:27:34Z
2013-07-13T01:27:34Z
2014-07-16T08:19:04Z
BUG: Display the legend when calling plot with kind='density'. GH4216
diff --git a/doc/source/release.rst b/doc/source/release.rst index 45335fa49aa23..2fbaea95d975a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -333,6 +333,7 @@ pandas 0.12 inside the input (:issue:`4192`) - Fixed bug in plotting that wasn't raising on invalid colormap for matplotlib 1.1.1 (:issue:`4215`) + - Fixed the legend displaying in ``DataFrame.plot(kind='kde')`` (:issue:`4216`) pandas 0.11.0 ============= diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt index e1484c82ff165..cd284a542ec1e 100644 --- a/doc/source/v0.12.0.txt +++ b/doc/source/v0.12.0.txt @@ -465,7 +465,8 @@ Bug Fixes incorrectly ``MilliSecondLocator`` (:issue:`3990`) - Fixed bug in plotting that wasn't raising on invalid colormap for matplotlib 1.1.1 (:issue:`4215`) - + - Fixed the legend displaying in ``DataFrame.plot(kind='kde')`` (:issue:`4216`) + See the :ref:`full release notes <release>` or issue tracker on GitHub for a complete list. diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 41297e3e17972..1b7052bf62824 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -543,6 +543,8 @@ def test_kde(self): df = DataFrame(np.random.randn(100, 4)) _check_plot_works(df.plot, kind='kde') _check_plot_works(df.plot, kind='kde', subplots=True) + ax = df.plot(kind='kde') + self.assert_(ax.get_legend() is not None) axes = df.plot(kind='kde', logy=True, subplots=True) for ax in axes: self.assert_(ax.get_yscale() == 'log') diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index ce4ab3de09613..ad305382dd8cc 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1172,7 +1172,7 @@ def _make_plot(self): ax.grid(self.grid) def _post_plot_logic(self): - if self.subplots and self.legend: + if self.legend: for ax in self.axes: ax.legend(loc='best')
See issue https://github.com/pydata/pandas/issues/4216
https://api.github.com/repos/pandas-dev/pandas/pulls/4221
2013-07-12T12:32:13Z
2013-07-13T22:59:56Z
2013-07-13T22:59:56Z
2014-07-06T07:46:21Z
ENH: Treat 'Inf' as infinity in text parser
diff --git a/doc/source/release.rst b/doc/source/release.rst index 478e7375b0b30..496294fd86b19 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -41,6 +41,9 @@ pandas 0.13 - ``read_excel`` now supports an integer in its ``sheetname`` argument giving the index of the sheet to read in (:issue:`4301`). - Added a test for ``read_clipboard()`` and ``to_clipboard()`` (:issue:`4282`) + - Text parser now treats anything that reads like inf ("inf", "Inf", "-Inf", + "iNf", etc.) to infinity. (:issue:`4220`, :issue:`4219`), affecting + ``read_table``, ``read_csv``, etc. **API Changes** diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt index 24d1b30d470ee..5af972ad4253c 100644 --- a/doc/source/v0.13.0.txt +++ b/doc/source/v0.13.0.txt @@ -11,6 +11,9 @@ API changes - ``read_excel`` now supports an integer in its ``sheetname`` argument giving the index of the sheet to read in (:issue:`4301`). + - Text parser now treats anything that reads like inf ("inf", "Inf", "-Inf", + "iNf", etc.) to infinity. (:issue:`4220`, :issue:`4219`), affecting + ``read_table``, ``read_csv``, etc. Enhancements ~~~~~~~~~~~~ diff --git a/pandas/io/tests/test_parsers.py b/pandas/io/tests/test_parsers.py index 784d650a524a7..b88b1ab776ab4 100644 --- a/pandas/io/tests/test_parsers.py +++ b/pandas/io/tests/test_parsers.py @@ -167,9 +167,22 @@ def test_inf_parsing(self): data = """\ ,A a,inf -b,-inf""" +b,-inf +c,Inf +d,-Inf +e,INF +f,-INF +g,INf +h,-INf +i,inF +j,-inF""" + inf = float('inf') + expected = Series([inf, -inf] * 5) df = read_csv(StringIO(data), index_col=0) - self.assertTrue(np.isinf(np.abs(df['A'])).all()) + assert_almost_equal(df['A'].values, expected.values) + df = read_csv(StringIO(data), index_col=0, na_filter=False) + print df['A'].values + assert_almost_equal(df['A'].values, expected.values) def test_multiple_date_col(self): # Can use multiple date parsers diff --git a/pandas/parser.pyx b/pandas/parser.pyx index 185cf1a752803..71d2e1c1e5381 100644 --- a/pandas/parser.pyx +++ b/pandas/parser.pyx @@ -3,7 +3,7 @@ from libc.stdio cimport fopen, fclose from libc.stdlib cimport malloc, free -from libc.string cimport strncpy, strlen, strcmp +from libc.string cimport strncpy, strlen, strcmp, strcasecmp cimport libc.stdio as stdio from cpython cimport (PyObject, PyBytes_FromString, @@ -1399,9 +1399,9 @@ cdef _try_double(parser_t *parser, int col, int line_start, int line_end, else: error = to_double(word, data, parser.sci, parser.decimal) if error != 1: - if strcmp(word, cinf) == 0: + if strcasecmp(word, cinf) == 0: data[0] = INF - elif strcmp(word, cneginf) == 0: + elif strcasecmp(word, cneginf) == 0: data[0] = NEGINF else: return None, None @@ -1415,9 +1415,9 @@ cdef _try_double(parser_t *parser, int col, int line_start, int line_end, word = COLITER_NEXT(it) error = to_double(word, data, parser.sci, parser.decimal) if error != 1: - if strcmp(word, cinf) == 0: + if strcasecmp(word, cinf) == 0: data[0] = INF - elif strcmp(word, cneginf) == 0: + elif strcasecmp(word, cneginf) == 0: data[0] = NEGINF else: return None, None diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 0dc80f59e4699..275853d4533c9 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -126,7 +126,7 @@ def assert_almost_equal(a, b, check_less_precise = False): return assert_dict_equal(a, b) if isinstance(a, basestring): - assert a == b, "%s != %s" % (a, b) + assert a == b, "%r != %r" % (a, b) return True if isiterable(a):
Makes 'inf' checks case insensitive using strcasecmp Fixes #4219.
https://api.github.com/repos/pandas-dev/pandas/pulls/4220
2013-07-12T00:54:27Z
2013-07-26T12:03:37Z
2013-07-26T12:03:37Z
2014-06-14T11:07:09Z
BLD: Use 'git describe' to generate the version string for dev versions
diff --git a/setup.py b/setup.py index ec7f1628aada2..d66ac345aa61a 100755 --- a/setup.py +++ b/setup.py @@ -189,7 +189,7 @@ def build_extensions(self): MAJOR = 0 MINOR = 12 MICRO = 0 -ISRELEASED = True +ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) QUALIFIER = '' @@ -199,19 +199,20 @@ def build_extensions(self): try: import subprocess try: - pipe = subprocess.Popen(["git", "rev-parse", "--short", "HEAD"], + pipe = subprocess.Popen(["git", "describe", "HEAD"], stdout=subprocess.PIPE).stdout except OSError: # msysgit compatibility pipe = subprocess.Popen( - ["git.cmd", "rev-parse", "--short", "HEAD"], + ["git.cmd", "describe", "HEAD"], stdout=subprocess.PIPE).stdout rev = pipe.read().strip() # makes distutils blow up on Python 2.7 if sys.version_info[0] >= 3: rev = rev.decode('ascii') - FULLVERSION += "-%s" % rev + FULLVERSION = rev.lstrip('v') + except: warnings.warn("WARNING: Couldn't get git revision") else:
closes https://github.com/pydata/pandas/issues/4046 ``` λ sudo python ./setup.py develop ... Installed /home/user1/src/pandas Processing dependencies for pandas==v0.12.0rc1-48-g9442eac Searching for numpy==1.7.1 Best match: numpy 1.7.1 Adding numpy 1.7.1 to easy-install.pth file λ python -c 'import pandas; print pandas.version.version' v0.12.0rc1-48-g9442eac ```
https://api.github.com/repos/pandas-dev/pandas/pulls/4218
2013-07-11T20:19:26Z
2013-07-25T01:37:31Z
2013-07-25T01:37:31Z
2014-06-16T22:16:17Z
DOC: remove savefig width parameter (#4203)
diff --git a/doc/source/10min.rst b/doc/source/10min.rst index af84efb93bb5e..e3cfcc765d7c3 100644 --- a/doc/source/10min.rst +++ b/doc/source/10min.rst @@ -626,7 +626,7 @@ Plotting ts = pd.Series(randn(1000), index=pd.date_range('1/1/2000', periods=1000)) ts = ts.cumsum() - @savefig series_plot_basic.png width=6in + @savefig series_plot_basic.png ts.plot() On DataFrame, ``plot`` is a convenience to plot all of the columns with labels: @@ -637,7 +637,7 @@ On DataFrame, ``plot`` is a convenience to plot all of the columns with labels: columns=['A', 'B', 'C', 'D']) df = df.cumsum() - @savefig frame_plot_basic.png width=6in + @savefig frame_plot_basic.png plt.figure(); df.plot(); plt.legend(loc='best') Getting Data In/Out diff --git a/doc/source/computation.rst b/doc/source/computation.rst index 9b10c0dc25e16..eca5bf902aa39 100644 --- a/doc/source/computation.rst +++ b/doc/source/computation.rst @@ -232,7 +232,7 @@ These functions can be applied to ndarrays or Series objects: ts.plot(style='k--') - @savefig rolling_mean_ex.png width=6in + @savefig rolling_mean_ex.png rolling_mean(ts, 60).plot(style='k') They can also be applied to DataFrame objects. This is really just syntactic @@ -249,7 +249,7 @@ sugar for applying the moving window operator to all of the DataFrame's columns: columns=['A', 'B', 'C', 'D']) df = df.cumsum() - @savefig rolling_mean_frame.png width=6in + @savefig rolling_mean_frame.png rolling_sum(df, 60).plot(subplots=True) The ``rolling_apply`` function takes an extra ``func`` argument and performs @@ -260,7 +260,7 @@ compute the mean absolute deviation on a rolling basis: .. ipython:: python mad = lambda x: np.fabs(x - x.mean()).mean() - @savefig rolling_apply_ex.png width=6in + @savefig rolling_apply_ex.png rolling_apply(ts, 60, mad).plot(style='k') The ``rolling_window`` function performs a generic rolling window computation @@ -363,7 +363,7 @@ columns using ``ix`` indexing: .. ipython:: python - @savefig rolling_corr_pairwise_ex.png width=6in + @savefig rolling_corr_pairwise_ex.png correls.ix[:, 'A', 'C'].plot() Expanding window moment functions @@ -436,7 +436,7 @@ relative impact of an individual data point. As an example, here is the ts.plot(style='k--') - @savefig expanding_mean_frame.png width=6in + @savefig expanding_mean_frame.png expanding_mean(ts).plot(style='k') Exponentially weighted moment functions @@ -487,7 +487,7 @@ Here are an example for a univariate time series: plt.close('all') ts.plot(style='k--') - @savefig ewma_ex.png width=6in + @savefig ewma_ex.png ewma(ts, span=20).plot(style='k') .. note:: diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 04492210137ee..68387ba9f873c 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -205,7 +205,7 @@ adopted to pandas's data structures. For example: rng = period_range('1987Q2', periods=10, freq='Q-DEC') data = Series(np.random.randn(10), index=rng) - @savefig skts_ts_plot.png width=6in + @savefig skts_ts_plot.png plt.figure(); data.plot() Converting to and from period format diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 88d7519c31f4e..90722bcf4b68b 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -450,7 +450,7 @@ We can also visually compare the original and transformed data sets. compare = DataFrame({'Original': ts, 'Transformed': transformed}) - @savefig groupby_transform_plot.png width=4in + @savefig groupby_transform_plot.png compare.plot() Another common data transform is to replace missing data with the group mean. diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index 5ff436f6d0d50..2d8ac5d953a21 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -258,7 +258,7 @@ interpolation assumes equally spaced points. ts.interpolate().head() - @savefig series_interpolate.png width=6in + @savefig series_interpolate.png ts.interpolate().plot() Index aware interpolation is available via the ``method`` keyword: diff --git a/doc/source/rplot.rst b/doc/source/rplot.rst index e9bae8502996f..f268bafc2aa07 100644 --- a/doc/source/rplot.rst +++ b/doc/source/rplot.rst @@ -54,7 +54,7 @@ RPlot is a flexible API for producing Trellis plots. These plots allow you to ar plot.add(rplot.TrellisGrid(['sex', 'smoker'])) plot.add(rplot.GeomHistogram()) - @savefig rplot1_tips.png width=8in + @savefig rplot1_tips.png plot.render(plt.gcf()) In the example above, data from the tips data set is arranged by the attributes 'sex' and 'smoker'. Since both of those attributes can take on one of two values, the resulting grid has two columns and two rows. A histogram is displayed for each cell of the grid. @@ -67,7 +67,7 @@ In the example above, data from the tips data set is arranged by the attributes plot.add(rplot.TrellisGrid(['sex', 'smoker'])) plot.add(rplot.GeomDensity()) - @savefig rplot2_tips.png width=8in + @savefig rplot2_tips.png plot.render(plt.gcf()) Example above is the same as previous except the plot is set to kernel density estimation. This shows how easy it is to have different plots for the same Trellis structure. @@ -81,7 +81,7 @@ Example above is the same as previous except the plot is set to kernel density e plot.add(rplot.GeomScatter()) plot.add(rplot.GeomPolyFit(degree=2)) - @savefig rplot3_tips.png width=8in + @savefig rplot3_tips.png plot.render(plt.gcf()) The plot above shows that it is possible to have two or more plots for the same data displayed on the same Trellis grid cell. @@ -95,7 +95,7 @@ The plot above shows that it is possible to have two or more plots for the same plot.add(rplot.GeomScatter()) plot.add(rplot.GeomDensity2D()) - @savefig rplot4_tips.png width=8in + @savefig rplot4_tips.png plot.render(plt.gcf()) Above is a similar plot but with 2D kernel desnity estimation plot superimposed. @@ -108,7 +108,7 @@ Above is a similar plot but with 2D kernel desnity estimation plot superimposed. plot.add(rplot.TrellisGrid(['sex', '.'])) plot.add(rplot.GeomHistogram()) - @savefig rplot5_tips.png width=8in + @savefig rplot5_tips.png plot.render(plt.gcf()) It is possible to only use one attribute for grouping data. The example above only uses 'sex' attribute. If the second grouping attribute is not specified, the plots will be arranged in a column. @@ -121,7 +121,7 @@ It is possible to only use one attribute for grouping data. The example above on plot.add(rplot.TrellisGrid(['.', 'smoker'])) plot.add(rplot.GeomHistogram()) - @savefig rplot6_tips.png width=8in + @savefig rplot6_tips.png plot.render(plt.gcf()) If the first grouping attribute is not specified the plots will be arranged in a row. @@ -138,7 +138,7 @@ If the first grouping attribute is not specified the plots will be arranged in a plot.add(rplot.TrellisGrid(['sex', 'smoker'])) plot.add(rplot.GeomPoint(size=80.0, colour=rplot.ScaleRandomColour('day'), shape=rplot.ScaleShape('size'), alpha=1.0)) - @savefig rplot7_tips.png width=8in + @savefig rplot7_tips.png plot.render(plt.gcf()) As shown above, scatter plots are also possible. Scatter plots allow you to map various data attributes to graphical properties of the plot. In the example above the colour and shape of the scatter plot graphical objects is mapped to 'day' and 'size' attributes respectively. You use scale objects to specify these mappings. The list of scale classes is given below with initialization arguments for quick reference. diff --git a/doc/source/v0.8.0.txt b/doc/source/v0.8.0.txt index 243b7466d7dee..3b11582ac2a04 100644 --- a/doc/source/v0.8.0.txt +++ b/doc/source/v0.8.0.txt @@ -168,7 +168,7 @@ New plotting methods fx['FR'].plot(style='g') - @savefig whatsnew_secondary_y.png width=6in + @savefig whatsnew_secondary_y.png fx['IT'].plot(style='k--', secondary_y=True) Vytautas Jancauskas, the 2012 GSOC participant, has added many new plot @@ -180,7 +180,7 @@ types. For example, ``'kde'`` is a new option: np.random.randn(1000) * 0.5 + 3))) plt.figure() s.hist(normed=True, alpha=0.2) - @savefig whatsnew_kde.png width=6in + @savefig whatsnew_kde.png s.plot(kind='kde') See :ref:`the plotting page <visualization.other>` for much more. diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst index f1a9880047691..a3a02e1a978af 100644 --- a/doc/source/visualization.rst +++ b/doc/source/visualization.rst @@ -44,7 +44,7 @@ The ``plot`` method on Series and DataFrame is just a simple wrapper around ts = Series(randn(1000), index=date_range('1/1/2000', periods=1000)) ts = ts.cumsum() - @savefig series_plot_basic.png width=6in + @savefig series_plot_basic.png ts.plot() If the index consists of dates, it calls ``gcf().autofmt_xdate()`` to try to @@ -53,7 +53,7 @@ for controlling the look of the plot: .. ipython:: python - @savefig series_plot_basic2.png width=6in + @savefig series_plot_basic2.png plt.figure(); ts.plot(style='k--', label='Series'); plt.legend() On DataFrame, ``plot`` is a convenience to plot all of the columns with labels: @@ -63,7 +63,7 @@ On DataFrame, ``plot`` is a convenience to plot all of the columns with labels: df = DataFrame(randn(1000, 4), index=ts.index, columns=list('ABCD')) df = df.cumsum() - @savefig frame_plot_basic.png width=6in + @savefig frame_plot_basic.png plt.figure(); df.plot(); plt.legend(loc='best') You may set the ``legend`` argument to ``False`` to hide the legend, which is @@ -71,14 +71,14 @@ shown by default. .. ipython:: python - @savefig frame_plot_basic_noleg.png width=6in + @savefig frame_plot_basic_noleg.png df.plot(legend=False) Some other options are available, like plotting each Series on a different axis: .. ipython:: python - @savefig frame_plot_subplots.png width=6in + @savefig frame_plot_subplots.png df.plot(subplots=True, figsize=(6, 6)); plt.legend(loc='best') You may pass ``logy`` to get a log-scale Y axis. @@ -90,7 +90,7 @@ You may pass ``logy`` to get a log-scale Y axis. ts = Series(randn(1000), index=date_range('1/1/2000', periods=1000)) ts = np.exp(ts.cumsum()) - @savefig series_plot_logy.png width=6in + @savefig series_plot_logy.png ts.plot(logy=True) You can plot one column versus another using the `x` and `y` keywords in @@ -103,7 +103,7 @@ You can plot one column versus another using the `x` and `y` keywords in df3 = DataFrame(randn(1000, 2), columns=['B', 'C']).cumsum() df3['A'] = Series(range(len(df))) - @savefig df_plot_xy.png width=6in + @savefig df_plot_xy.png df3.plot(x='A', y='B') @@ -118,7 +118,7 @@ To plot data on a secondary y-axis, use the ``secondary_y`` keyword: df.A.plot() - @savefig series_plot_secondary_y.png width=6in + @savefig series_plot_secondary_y.png df.B.plot(secondary_y=True, style='g') @@ -133,7 +133,7 @@ keyword: plt.figure() ax = df.plot(secondary_y=['A', 'B']) ax.set_ylabel('CD scale') - @savefig frame_plot_secondary_y.png width=6in + @savefig frame_plot_secondary_y.png ax.right_ax.set_ylabel('AB scale') @@ -146,7 +146,7 @@ with "(right)" in the legend. To turn off the automatic marking, use the plt.figure() - @savefig frame_plot_secondary_y_no_right.png width=6in + @savefig frame_plot_secondary_y_no_right.png df.plot(secondary_y=['A', 'B'], mark_right=False) @@ -164,7 +164,7 @@ Here is the default behavior, notice how the x-axis tick labelling is performed: plt.figure() - @savefig ser_plot_suppress.png width=6in + @savefig ser_plot_suppress.png df.A.plot() @@ -174,7 +174,7 @@ Using the ``x_compat`` parameter, you can suppress this behavior: plt.figure() - @savefig ser_plot_suppress_parm.png width=6in + @savefig ser_plot_suppress_parm.png df.A.plot(x_compat=True) @@ -187,7 +187,7 @@ in ``pandas.plot_params`` can be used in a `with statement`: plt.figure() - @savefig ser_plot_suppress_context.png width=6in + @savefig ser_plot_suppress_context.png with pd.plot_params.use('x_compat', True): df.A.plot(color='r') df.B.plot(color='g') @@ -215,7 +215,7 @@ You can pass an ``ax`` argument to ``Series.plot`` to plot on a particular axis: df['B'].plot(ax=axes[0,1]); axes[0,1].set_title('B') df['C'].plot(ax=axes[1,0]); axes[1,0].set_title('C') - @savefig series_plot_multi.png width=6in + @savefig series_plot_multi.png df['D'].plot(ax=axes[1,1]); axes[1,1].set_title('D') @@ -235,7 +235,7 @@ For labeled, non-time series data, you may wish to produce a bar plot: plt.figure(); - @savefig bar_plot_ex.png width=6in + @savefig bar_plot_ex.png df.ix[5].plot(kind='bar'); plt.axhline(0, color='k') Calling a DataFrame's ``plot`` method with ``kind='bar'`` produces a multiple @@ -250,7 +250,7 @@ bar plot: df2 = DataFrame(rand(10, 4), columns=['a', 'b', 'c', 'd']) - @savefig bar_plot_multi_ex.png width=5in + @savefig bar_plot_multi_ex.png df2.plot(kind='bar'); To produce a stacked bar plot, pass ``stacked=True``: @@ -262,7 +262,7 @@ To produce a stacked bar plot, pass ``stacked=True``: .. ipython:: python - @savefig bar_plot_stacked_ex.png width=5in + @savefig bar_plot_stacked_ex.png df2.plot(kind='bar', stacked=True); To get horizontal bar plots, pass ``kind='barh'``: @@ -274,7 +274,7 @@ To get horizontal bar plots, pass ``kind='barh'``: .. ipython:: python - @savefig barh_plot_stacked_ex.png width=5in + @savefig barh_plot_stacked_ex.png df2.plot(kind='barh', stacked=True); Histograms @@ -283,7 +283,7 @@ Histograms plt.figure(); - @savefig hist_plot_ex.png width=6in + @savefig hist_plot_ex.png df['A'].diff().hist() @@ -294,7 +294,7 @@ subplots: plt.figure() - @savefig frame_hist_ex.png width=6in + @savefig frame_hist_ex.png df.diff().hist(color='k', alpha=0.5, bins=50) @@ -309,8 +309,8 @@ New since 0.10.0, the ``by`` keyword can be specified to plot grouped histograms data = Series(randn(1000)) - @savefig grouped_hist.png width=6in - data.hist(by=randint(0, 4, 1000)) + @savefig grouped_hist.png + data.hist(by=randint(0, 4, 1000), figsize=(6, 4)) .. _visualization.box: @@ -329,7 +329,7 @@ a uniform random variable on [0,1). df = DataFrame(rand(10,5)) plt.figure(); - @savefig box_plot_ex.png width=6in + @savefig box_plot_ex.png bp = df.boxplot() You can create a stratified boxplot using the ``by`` keyword argument to create @@ -342,7 +342,7 @@ groupings. For instance, plt.figure(); - @savefig box_plot_ex2.png width=6in + @savefig box_plot_ex2.png bp = df.boxplot(by='X') You can also pass a subset of columns to plot, as well as group by multiple @@ -356,7 +356,7 @@ columns: plt.figure(); - @savefig box_plot_ex3.png width=6in + @savefig box_plot_ex3.png bp = df.boxplot(column=['Col1','Col2'], by=['X','Y']) .. _visualization.scatter_matrix: @@ -372,7 +372,7 @@ Scatter plot matrix from pandas.tools.plotting import scatter_matrix df = DataFrame(randn(1000, 4), columns=['a', 'b', 'c', 'd']) - @savefig scatter_matrix_kde.png width=6in + @savefig scatter_matrix_kde.png scatter_matrix(df, alpha=0.2, figsize=(6, 6), diagonal='kde') .. _visualization.kde: @@ -389,7 +389,7 @@ setting `kind='kde'`: ser = Series(randn(1000)) - @savefig kde_plot.png width=6in + @savefig kde_plot.png ser.plot(kind='kde') .. _visualization.andrews_curves: @@ -414,7 +414,7 @@ of the same class will usually be closer together and form larger structures. plt.figure() - @savefig andrews_curves.png width=6in + @savefig andrews_curves.png andrews_curves(data, 'Name') .. _visualization.parallel_coordinates: @@ -437,7 +437,7 @@ represents one data point. Points that tend to cluster will appear closer togeth plt.figure() - @savefig parallel_coordinates.png width=6in + @savefig parallel_coordinates.png parallel_coordinates(data, 'Name') Lag Plot @@ -456,7 +456,7 @@ implies that the underlying data are not random. data = Series(0.1 * rand(1000) + 0.9 * np.sin(np.linspace(-99 * np.pi, 99 * np.pi, num=1000))) - @savefig lag_plot.png width=6in + @savefig lag_plot.png lag_plot(data) Autocorrelation Plot @@ -479,7 +479,7 @@ confidence band. data = Series(0.7 * rand(1000) + 0.3 * np.sin(np.linspace(-9 * np.pi, 9 * np.pi, num=1000))) - @savefig autocorrelation_plot.png width=6in + @savefig autocorrelation_plot.png autocorrelation_plot(data) .. _visualization.bootstrap: @@ -499,7 +499,7 @@ are what constitutes the bootstrap plot. data = Series(rand(1000)) - @savefig bootstrap_plot.png width=6in + @savefig bootstrap_plot.png bootstrap_plot(data, size=50, samples=500, color='grey') .. _visualization.radviz: @@ -529,7 +529,7 @@ be colored differently. plt.figure() - @savefig radviz.png width=6in + @savefig radviz.png radviz(data, 'Name') .. _visualization.colormaps: @@ -550,7 +550,7 @@ To use the jet colormap, we can simply pass ``'jet'`` to ``colormap=`` plt.figure() - @savefig jet.png width=6in + @savefig jet.png df.plot(colormap='jet') or we can pass the colormap itself @@ -561,7 +561,7 @@ or we can pass the colormap itself plt.figure() - @savefig jet_cm.png width=6in + @savefig jet_cm.png df.plot(colormap=cm.jet) Colormaps can also be used other plot types, like bar charts: @@ -573,7 +573,7 @@ Colormaps can also be used other plot types, like bar charts: plt.figure() - @savefig greens.png width=6in + @savefig greens.png dd.plot(kind='bar', colormap='Greens') Parallel coordinates charts: @@ -582,7 +582,7 @@ Parallel coordinates charts: plt.figure() - @savefig parallel_gist_rainbow.png width=6in + @savefig parallel_gist_rainbow.png parallel_coordinates(data, 'Name', colormap='gist_rainbow') Andrews curves charts: @@ -591,5 +591,5 @@ Andrews curves charts: plt.figure() - @savefig andrews_curve_winter.png width=6in + @savefig andrews_curve_winter.png andrews_curves(data, 'Name', colormap='winter')
Closes #4203.
https://api.github.com/repos/pandas-dev/pandas/pulls/4217
2013-07-11T16:29:01Z
2013-07-14T14:26:48Z
2013-07-14T14:26:48Z
2014-06-27T11:45:17Z
BLD: pip 1.4 is coming, preempt issues with pytz install
diff --git a/setup.py b/setup.py index 939931f08d2bd..28d2627ef2940 100755 --- a/setup.py +++ b/setup.py @@ -43,7 +43,7 @@ setuptools_kwargs = {'use_2to3': True, 'zip_safe': False, 'install_requires': ['python-dateutil >= 2', - 'pytz', + 'pytz >= 2011k', 'numpy >= %s' % min_numpy_ver], 'setup_requires': ['numpy >= %s' % min_numpy_ver], 'use_2to3_exclude_fixers': ['lib2to3.fixes.fix_next', @@ -57,7 +57,7 @@ min_numpy_ver = '1.6.1' setuptools_kwargs = { 'install_requires': ['python-dateutil', - 'pytz', + 'pytz >= 2011k', 'numpy >= %s' % min_numpy_ver], 'setup_requires': ['numpy >= %s' % min_numpy_ver], 'zip_safe': False,
https://github.com/pypa/pip/issues/974 2011k is the version shipped with precise, I hope that's conservative enough to avoid disruption for existing users.
https://api.github.com/repos/pandas-dev/pandas/pulls/4210
2013-07-11T15:28:25Z
2013-07-11T15:33:03Z
2013-07-11T15:33:03Z
2014-06-27T14:38:03Z
PERF: add ix scalar get benchmark
diff --git a/vb_suite/indexing.py b/vb_suite/indexing.py index 9f07cc6ed15c3..1264ae053ffca 100644 --- a/vb_suite/indexing.py +++ b/vb_suite/indexing.py @@ -61,23 +61,26 @@ bm_df_getitem2 = Benchmark(statement, setup, name='datamatrix_getitem_scalar') -setup = common_setup + """ -try: - klass = DataMatrix -except: - klass = DataFrame +#---------------------------------------------------------------------- +# ix get scalar + +setup = common_setup + """ index = [tm.rands(10) for _ in xrange(1000)] columns = [tm.rands(10) for _ in xrange(30)] -df = klass(np.random.rand(1000, 30), index=index, +df = DataFrame(np.random.randn(1000, 30), index=index, columns=columns) idx = index[100] col = columns[10] """ -statement = "df.get_value(idx, col)" -bm_df_getitem3 = Benchmark(statement, setup, - name='dataframe_get_value', - start_date=datetime(2011, 11, 12)) + +indexing_frame_get_value_ix = Benchmark("df.ix[idx,col]", setup, + name='indexing_frame_get_value_ix', + start_date=datetime(2011, 11, 12)) + +indexing_frame_get_value = Benchmark("df.get_value(idx,col)", setup, + name='indexing_frame_get_value', + start_date=datetime(2011, 11, 12)) #---------------------------------------------------------------------- # Boolean DataFrame row selection
https://api.github.com/repos/pandas-dev/pandas/pulls/4207
2013-07-11T13:33:27Z
2013-07-11T13:33:33Z
2013-07-11T13:33:33Z
2014-07-16T08:18:50Z
DOC: more prominent HDFStore store docs about storer/table formats
diff --git a/doc/source/io.rst b/doc/source/io.rst index 5e5943f066c3e..27488f3c5916d 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1651,11 +1651,6 @@ Closing a Store, Context Manager import os os.remove('store.h5') - -These stores are **not** appendable once written (though you can simply -remove them and rewrite). Nor are they **queryable**; they must be -retrieved in their entirety. - Read/Write API ~~~~~~~~~~~~~~ @@ -1674,10 +1669,33 @@ similar to how ``read_csv`` and ``to_csv`` work. (new in 0.11.0) os.remove('store_tl.h5') +.. _io.hdf5-storer: + +Storer Format +~~~~~~~~~~~~~ + +The examples above show storing using ``put``, which write the HDF5 to ``PyTables`` in a fixed array format, called +the ``storer`` format. These types of stores are are **not** appendable once written (though you can simply +remove them and rewrite). Nor are they **queryable**; they must be +retrieved in their entirety. These offer very fast writing and slightly faster reading than ``table`` stores. + +.. warning:: + + A ``storer`` format will raise a ``TypeError`` if you try to retrieve using a ``where`` . + + .. code-block:: python + + DataFrame(randn(10,2)).to_hdf('test_storer.h5','df') + + pd.read_hdf('test_storer.h5','df',where='index>5') + TypeError: cannot pass a where specification when reading a non-table + this store must be selected in its entirety + + .. _io.hdf5-table: -Storing in Table format -~~~~~~~~~~~~~~~~~~~~~~~ +Table Format +~~~~~~~~~~~~ ``HDFStore`` supports another ``PyTables`` format on disk, the ``table`` format. Conceptually a ``table`` is shaped very much like a DataFrame, @@ -1708,6 +1726,10 @@ supported. # the type of stored data store.root.df._v_attrs.pandas_type +.. note:: + + You can also create a ``table`` by passing ``table=True`` to a ``put`` operation. + .. _io.hdf5-keys: Hierarchical Keys @@ -2121,9 +2143,6 @@ Notes & Caveats in a string, or a ``NaT`` in a datetime-like column counts as having a value), then those rows **WILL BE DROPPED IMPLICITLY**. This limitation *may* be addressed in the future. - - You can not append/select/delete to a non-table (table creation is - determined on the first append, or by passing ``table=True`` in a - put operation) - ``HDFStore`` is **not-threadsafe for writing**. The underlying ``PyTables`` only supports concurrent reads (via threading or processes). If you need reading and writing *at the same time*, you diff --git a/doc/source/release.rst b/doc/source/release.rst index c40e1804fe3c0..2379dc29fbce7 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -113,7 +113,7 @@ pandas 0.12 - When removing an object, ``remove(key)`` raises ``KeyError`` if the key is not a valid store object. - raise a ``TypeError`` on passing ``where`` or ``columns`` - to select with a Storer; these are invalid parameters at this time + to select with a Storer; these are invalid parameters at this time (:issue:`4189`) - can now specify an ``encoding`` option to ``append/put`` to enable alternate encodings (:issue:`3750`) - enable support for ``iterator/chunksize`` with ``read_hdf`` diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 43b3197667d2b..d6ad6aa0c351a 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1746,9 +1746,11 @@ def f(values, freq=None, tz=None): def validate_read(self, kwargs): if kwargs.get('columns') is not None: - raise TypeError("cannot pass a column specification when reading a Storer") + raise TypeError("cannot pass a column specification when reading a non-table " + "this store must be selected in its entirety") if kwargs.get('where') is not None: - raise TypeError("cannot pass a where specification when reading a Storer") + raise TypeError("cannot pass a where specification when reading from a non-table " + "this store must be selected in its entirety") @property def is_exists(self):
closes #4189
https://api.github.com/repos/pandas-dev/pandas/pulls/4206
2013-07-11T13:01:45Z
2013-07-11T13:17:51Z
2013-07-11T13:17:51Z
2014-06-23T16:47:24Z
BUG: (GH4192) Fixed buglet in the broadcasting logic in Series.where
diff --git a/doc/source/release.rst b/doc/source/release.rst index a9e88f1341992..ada899e099aaa 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -324,6 +324,9 @@ pandas 0.12 (:issue:`4145`, :issue:`4146`) - Fixed bug in the parsing of microseconds when using the ``format`` argument in ``to_datetime`` (:issue:`4152`) + - Fixed bug in ``Series.where`` where broadcasting a single element input vector + to the length of the series resulted in multiplying the value + inside the input (:issue:`4192`) pandas 0.11.0 ============= diff --git a/pandas/core/series.py b/pandas/core/series.py index 06abd1d5b4127..81a68138ce202 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -751,11 +751,12 @@ def where(self, cond, other=nan, inplace=False): if len(other) != len(ser): icond = ~cond - + print "First", other # GH 2745 # treat like a scalar if len(other) == 1: - other = np.array(other[0]*len(ser)) + other = np.array(other[0]) + print "EQ 1", other # GH 3235 # match True cond to other @@ -767,7 +768,10 @@ def where(self, cond, other=nan, inplace=False): other = new_other else: + print "Raising", other raise ValueError('Length of replacements must equal series length') + + print "Finished", other change = ser if inplace else None com._maybe_upcast_putmask(ser,~cond,other,change=change) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index b4ad172ddf340..a1858a350447d 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -1090,11 +1090,6 @@ def test_where(self): expected = Series([0,2]) assert_series_equal(s,expected) - s = Series([1,2]) - s[[True, False]] = [0] - expected = Series([0,2]) - assert_series_equal(s,expected) - # failures self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]), [0,2,3]) self.assertRaises(ValueError, s.__setitem__, tuple([[[True, False]]]), []) @@ -1142,6 +1137,24 @@ def test_where(self): s = Series(np.arange(10)) mask = s > 5 self.assertRaises(ValueError, s.__setitem__, mask, ([0]*5,)) + + def test_where_broadcast(self): + # Test a variety of differently sized series + for size in range(2, 6): + # Test a variety of boolean indices + for selection in [np.resize([True, False, False, False, False], size), # First element should be set + np.resize([True, False], size), # Set alternating elements] + np.resize([False], size)]: # No element should be set + # Test a variety of different numbers as content + for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]: + # Test numpy arrays, lists and tuples as the input to be broadcast + for arr in [np.array([item]), [item], (item,)]: + data = np.arange(size, dtype=float) + s = Series(data) + s[selection] = arr + # Construct the expected series by taking the source data or item based on the selection + expected = Series([item if use_item else data[i] for i, use_item in enumerate(selection)]) + assert_series_equal(s,expected) def test_where_inplace(self): s = Series(np.random.randn(5))
Fixed #4192
https://api.github.com/repos/pandas-dev/pandas/pulls/4195
2013-07-10T17:08:06Z
2013-07-10T18:52:48Z
2013-07-10T18:52:48Z
2014-08-11T01:31:56Z
FIX disallow MultiIndex in Series constructor GH4187
diff --git a/doc/source/release.rst b/doc/source/release.rst index a965d92e5dbe3..d97a99426ff7d 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -199,6 +199,7 @@ pandas 0.12 - Fixed an esoteric excel reading bug, xlrd>= 0.9.0 now required for excel support. Should provide python3 support (for reading) which has been lacking. (:issue:`3164`) + - Disallow Series constructor called with MultiIndex which caused segfault (:issue:`4187`) - Allow unioning of date ranges sharing a timezone (:issue:`3491`) - Fix to_csv issue when having a large number of rows and ``NaT`` in some columns (:issue:`3437`) diff --git a/pandas/core/series.py b/pandas/core/series.py index 06abd1d5b4127..cb81c15a24122 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -432,6 +432,9 @@ def __new__(cls, data=None, index=None, dtype=None, name=None, if data is None: data = {} + if isinstance(data, MultiIndex): + raise NotImplementedError + if index is not None: index = _ensure_index(index) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index b4ad172ddf340..e2afb2bf26a6c 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -311,6 +311,10 @@ def test_constructor(self): xp = 'Series' self.assertEqual(rs, xp) + # raise on MultiIndex GH4187 + m = MultiIndex.from_arrays([[1, 2], [3,4]]) + self.assertRaises(NotImplementedError, Series, m) + def test_constructor_empty(self): empty = Series() empty2 = Series([])
fixes #4187 (at least the segfault part). Constructing Series with a MultiIndex raises NotImplementedError, the option (to implement this) is to return `.to_series()` ?
https://api.github.com/repos/pandas-dev/pandas/pulls/4190
2013-07-10T13:11:52Z
2013-07-13T08:21:49Z
2013-07-13T08:21:49Z
2014-07-07T12:44:52Z
TST: fix ujson tests failures on 32-bit
diff --git a/pandas/io/tests/test_json/test_ujson.py b/pandas/io/tests/test_json/test_ujson.py index 23bd41d245f75..f838f8b7ea15c 100644 --- a/pandas/io/tests/test_json/test_ujson.py +++ b/pandas/io/tests/test_json/test_ujson.py @@ -96,7 +96,12 @@ def test_decimalDecodeTest(self): sut = {u'a': 4.56} encoded = ujson.encode(sut) decoded = ujson.decode(encoded) - self.assertNotEqual(sut, decoded) + + # Roundtrip works on 32-bit / fails on 64-bit + if sys.maxsize < 2**32: + self.assertEqual(sut, decoded) + else: + self.assertNotEqual(sut, decoded) def test_decimalDecodeTestPrecise(self): sut = {u'a': 4.56} @@ -1396,25 +1401,27 @@ def test_decodeArrayFaultyUnicode(self): def test_decodeFloatingPointAdditionalTests(self): - self.assertEquals(-1.1234567893, ujson.loads("-1.1234567893")) - self.assertEquals(-1.234567893, ujson.loads("-1.234567893")) - self.assertEquals(-1.34567893, ujson.loads("-1.34567893")) - self.assertEquals(-1.4567893, ujson.loads("-1.4567893")) - self.assertEquals(-1.567893, ujson.loads("-1.567893")) - self.assertEquals(-1.67893, ujson.loads("-1.67893")) - self.assertEquals(-1.7893, ujson.loads("-1.7893")) - self.assertEquals(-1.893, ujson.loads("-1.893")) - self.assertEquals(-1.3, ujson.loads("-1.3")) - - self.assertEquals(1.1234567893, ujson.loads("1.1234567893")) - self.assertEquals(1.234567893, ujson.loads("1.234567893")) - self.assertEquals(1.34567893, ujson.loads("1.34567893")) - self.assertEquals(1.4567893, ujson.loads("1.4567893")) - self.assertEquals(1.567893, ujson.loads("1.567893")) - self.assertEquals(1.67893, ujson.loads("1.67893")) - self.assertEquals(1.7893, ujson.loads("1.7893")) - self.assertEquals(1.893, ujson.loads("1.893")) - self.assertEquals(1.3, ujson.loads("1.3")) + places = 15 + + self.assertAlmostEquals(-1.1234567893, ujson.loads("-1.1234567893"), places=places) + self.assertAlmostEquals(-1.234567893, ujson.loads("-1.234567893"), places=places) + self.assertAlmostEquals(-1.34567893, ujson.loads("-1.34567893"), places=places) + self.assertAlmostEquals(-1.4567893, ujson.loads("-1.4567893"), places=places) + self.assertAlmostEquals(-1.567893, ujson.loads("-1.567893"), places=places) + self.assertAlmostEquals(-1.67893, ujson.loads("-1.67893"), places=places) + self.assertAlmostEquals(-1.7893, ujson.loads("-1.7893"), places=places) + self.assertAlmostEquals(-1.893, ujson.loads("-1.893"), places=places) + self.assertAlmostEquals(-1.3, ujson.loads("-1.3"), places=places) + + self.assertAlmostEquals(1.1234567893, ujson.loads("1.1234567893"), places=places) + self.assertAlmostEquals(1.234567893, ujson.loads("1.234567893"), places=places) + self.assertAlmostEquals(1.34567893, ujson.loads("1.34567893"), places=places) + self.assertAlmostEquals(1.4567893, ujson.loads("1.4567893"), places=places) + self.assertAlmostEquals(1.567893, ujson.loads("1.567893"), places=places) + self.assertAlmostEquals(1.67893, ujson.loads("1.67893"), places=places) + self.assertAlmostEquals(1.7893, ujson.loads("1.7893"), places=places) + self.assertAlmostEquals(1.893, ujson.loads("1.893"), places=places) + self.assertAlmostEquals(1.3, ujson.loads("1.3"), places=places) def test_encodeBigSet(self): s = set()
As pointed out by @lodagro [here](https://github.com/pydata/pandas/commit/a8d15bd4c6dae2c65ed4aa4b2f76c029b6358113#commitcomment-3521549) and [here](https://github.com/pydata/pandas/commit/a8d15bd4c6dae2c65ed4aa4b2f76c029b6358113#commitcomment-3521581) some of the ujson tests fail on 32-bit due to precision errors. The changes below amend the tests so they pass.
https://api.github.com/repos/pandas-dev/pandas/pulls/4186
2013-07-10T01:36:44Z
2013-07-10T18:56:00Z
2013-07-10T18:56:00Z
2014-07-16T08:18:41Z
TST: raise when no data are found when trying to dld multiple symbols
diff --git a/pandas/io/data.py b/pandas/io/data.py index e5985c703f93f..e3b0af542bb41 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -312,7 +312,12 @@ def _dl_mult_symbols(symbols, start, end, chunksize, retry_count, pause, 'NaN.'.format(sym), SymbolWarning) stocks[sym] = np.nan - return Panel(stocks).swapaxes('items', 'minor') + try: + return Panel(stocks).swapaxes('items', 'minor') + except AttributeError: + # cannot construct a panel with just 1D nans indicating no data + raise RemoteDataError("No data fetched using " + "{0!r}".format(method.__name__)) _source_functions = {'google': _get_hist_google, 'yahoo': _get_hist_yahoo}
closes #4182.
https://api.github.com/repos/pandas-dev/pandas/pulls/4184
2013-07-09T23:31:56Z
2013-07-09T23:59:45Z
2013-07-09T23:59:45Z
2014-07-16T08:18:39Z
ENH: drop_level argument for xs
diff --git a/doc/source/release.rst b/doc/source/release.rst index b301dcb80445a..bddf720a6b72e 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -48,6 +48,7 @@ pandas 0.13 overlapping color and style arguments (:issue:`4402`) - Significant table writing performance improvements in ``HDFStore`` - JSON date serialisation now performed in low-level C code. + - Add ``drop_level`` argument to xs (:issue:`4180`) - ``Index.copy()`` and ``MultiIndex.copy()`` now accept keyword arguments to change attributes (i.e., ``names``, ``levels``, ``labels``) (:issue:`4039`) diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 31f7179f8e328..60492b13c30b8 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2063,7 +2063,7 @@ def _sanitize_column(self, key, value): def _series(self): return self._data.get_series_dict() - def xs(self, key, axis=0, level=None, copy=True): + def xs(self, key, axis=0, level=None, copy=True, drop_level=True): """ Returns a cross-section (row(s) or column(s)) from the DataFrame. Defaults to cross-section on the rows (axis=0). @@ -2079,6 +2079,8 @@ def xs(self, key, axis=0, level=None, copy=True): which levels are used. Levels can be referred by label or position. copy : boolean, default True Whether to make a copy of the data + drop_level, default True + If False, returns object with same levels as self. Examples -------- @@ -2130,11 +2132,13 @@ def xs(self, key, axis=0, level=None, copy=True): Returns ------- xs : Series or DataFrame + """ axis = self._get_axis_number(axis) labels = self._get_axis(axis) if level is not None: - loc, new_ax = labels.get_loc_level(key, level=level) + loc, new_ax = labels.get_loc_level(key, level=level, + drop_level=drop_level) if not copy and not isinstance(loc, slice): raise ValueError('Cannot retrieve view (copy=False)') @@ -2168,7 +2172,8 @@ def xs(self, key, axis=0, level=None, copy=True): index = self.index if isinstance(index, MultiIndex): - loc, new_index = self.index.get_loc_level(key) + loc, new_index = self.index.get_loc_level(key, + drop_level=drop_level) else: loc = self.index.get_loc(key) diff --git a/pandas/core/index.py b/pandas/core/index.py index 22bd7f318a237..05eb53a444294 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -2560,7 +2560,7 @@ def get_loc(self, key): else: return self._get_level_indexer(key, level=0) - def get_loc_level(self, key, level=0): + def get_loc_level(self, key, level=0, drop_level=True): """ Get integer location slice for requested label or tuple @@ -2572,7 +2572,9 @@ def get_loc_level(self, key, level=0): ------- loc : int or slice object """ - def _drop_levels(indexer, levels): + def _maybe_drop_levels(indexer, levels, drop_level): + if not drop_level: + return self[indexer] # kludgearound new_index = self[indexer] levels = [self._get_level_number(i) for i in levels] @@ -2593,7 +2595,8 @@ def _drop_levels(indexer, levels): loc = mask result = loc if result is None else result & loc - return result, _drop_levels(result, level) + + return result, _maybe_drop_levels(result, level, drop_level) level = self._get_level_number(level) @@ -2606,7 +2609,7 @@ def _drop_levels(indexer, levels): try: if key in self.levels[0]: indexer = self._get_level_indexer(key, level=level) - new_index = _drop_levels(indexer, [0]) + new_index = _maybe_drop_levels(indexer, [0], drop_level) return indexer, new_index except TypeError: pass @@ -2625,7 +2628,7 @@ def _drop_levels(indexer, levels): raise KeyError(key) ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] - return indexer, _drop_levels(indexer, ilevels) + return indexer, _maybe_drop_levels(indexer, ilevels, drop_level) else: indexer = None for i, k in enumerate(key): @@ -2652,10 +2655,10 @@ def _drop_levels(indexer, levels): indexer = slice(None, None) ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] - return indexer, _drop_levels(indexer, ilevels) + return indexer, _maybe_drop_levels(indexer, ilevels, drop_level) else: indexer = self._get_level_indexer(key, level=level) - new_index = _drop_levels(indexer, [level]) + new_index = _maybe_drop_levels(indexer, [level], drop_level) return indexer, new_index def _get_level_indexer(self, key, level=0): diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 517c984fa0e64..8e769ed91137f 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -7261,6 +7261,18 @@ def test_xs_duplicates(self): exp = df.irow(2) assert_series_equal(cross, exp) + def test_xs_keep_level(self): + df = DataFrame({'day': {0: 'sat', 1: 'sun'}, + 'flavour': {0: 'strawberry', 1: 'strawberry'}, + 'sales': {0: 10, 1: 12}, + 'year': {0: 2008, 1: 2008}}).set_index(['year','flavour','day']) + result = df.xs('sat', level='day', drop_level=False) + expected = df[:1] + assert_frame_equal(result, expected) + + result = df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False) + assert_frame_equal(result, expected) + def test_pivot(self): data = { 'index': ['A', 'B', 'C', 'C', 'B', 'A'],
As discussed [here](http://stackoverflow.com/questions/17552997/how-to-update-a-subset-of-a-multiindexed-pandas-dataframe), xs usually remove the level which you are accessing. This commit allows you to explicitly say you don't want that (and want to keep the same levels in the result). ``` In [4]: df = DataFrame({'day': {0: 'sat', 1: 'sun'}, 'flavour': {0: 'strawberry', 1: 'strawberry'}, 'sales': {0: 10, 1: 12}, 'year': {0: 2008, 1: 2008}}).set_index(['year','flavour','day']) In [5]: df Out[5]: sales year flavour day 2008 strawberry sat 10 sun 12 In [6]: df.xs('sat', level='day') Out[6]: sales year flavour 2008 strawberry 10 In [7]: df.xs('sat', level='day', drop_level=False) Out[7]: sales year flavour day 2008 strawberry sat 10 In [8]: df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False) Out[8]: sales year flavour day 2008 strawberry sat 10 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/4180
2013-07-09T17:24:54Z
2013-08-27T12:18:58Z
2013-08-27T12:18:58Z
2014-06-14T07:25:56Z
CLN: write the attributes in a HDFStore as strings
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index fdb86c43b7160..43b3197667d2b 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -1622,8 +1622,8 @@ def __unicode__(self): def set_object_info(self): """ set my pandas type & version """ - self.attrs.pandas_type = self.pandas_kind - self.attrs.pandas_version = _version + self.attrs.pandas_type = str(self.pandas_kind) + self.attrs.pandas_version = str(_version) self.set_version() def copy(self): @@ -2377,7 +2377,7 @@ def set_info(self): def set_attrs(self): """ set our table type & indexables """ - self.attrs.table_type = self.table_type + self.attrs.table_type = str(self.table_type) self.attrs.index_cols = self.index_cols() self.attrs.values_cols = self.values_cols() self.attrs.non_index_axes = self.non_index_axes
These strings are converted to bytes, previously these were written as unicode
https://api.github.com/repos/pandas-dev/pandas/pulls/4178
2013-07-09T16:31:46Z
2013-07-09T17:07:40Z
2013-07-09T17:07:40Z
2014-07-16T08:18:36Z
TST: Do not skip MySQL tests on Travis.
diff --git a/.travis.yml b/.travis.yml index 3d3d90eb47129..96dba262ebfb6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -40,6 +40,9 @@ install: - echo "Waldo2" - ci/install.sh +before_script: + - mysql -e 'create database pandas_nosetest;' + script: - echo "Waldo3" - ci/script.sh diff --git a/ci/requirements-2.7.txt b/ci/requirements-2.7.txt index 57acc459adc13..a65a2991c673c 100644 --- a/ci/requirements-2.7.txt +++ b/ci/requirements-2.7.txt @@ -13,3 +13,4 @@ patsy==0.1.0 html5lib==1.0b2 lxml==3.2.1 scikits.timeseries==0.91.3 +MySQL-python==1.2.4 diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index 8bbd03268f2de..5b23bf173ec4e 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -1,8 +1,11 @@ +from __future__ import with_statement from pandas.util.py3compat import StringIO import unittest import sqlite3 import sys +import warnings + import nose import numpy as np @@ -160,7 +163,7 @@ def _check_roundtrip(self, frame): sql.write_frame(frame, name='test_table', con=self.db) result = sql.read_frame("select * from test_table", self.db) - # HACK! + # HACK! Change this once indexes are handled properly. result.index = frame.index expected = frame @@ -175,6 +178,8 @@ def _check_roundtrip(self, frame): expected = frame.copy() expected.index = Index(range(len(frame2))) + 10 expected.index.name = 'Idx' + print expected.index.names + print result.index.names tm.assert_frame_equal(expected, result) def test_tquery(self): @@ -239,20 +244,27 @@ def test_onecolumn_of_integer(self): class TestMySQL(unittest.TestCase): def setUp(self): + _skip_if_no_MySQLdb() + import MySQLdb try: - import MySQLdb - except ImportError: - raise nose.SkipTest + # Try Travis defaults. + # No real user should allow root access with a blank password. + self.db = MySQLdb.connect(host='localhost', user='root', passwd='', + db='pandas_nosetest') + except: + pass + else: + return try: self.db = MySQLdb.connect(read_default_group='pandas') - except MySQLdb.Error, e: + except MySQLdb.ProgrammingError, e: raise nose.SkipTest( - "Cannot connect to database. " "Create a group of connection parameters under the heading " "[pandas] in your system's mysql default file, " "typically located at ~/.my.cnf or /etc/.my.cnf. ") - except MySQLdb.ProgrammingError, e: + except MySQLdb.Error, e: raise nose.SkipTest( + "Cannot connect to database. " "Create a group of connection parameters under the heading " "[pandas] in your system's mysql default file, " "typically located at ~/.my.cnf or /etc/.my.cnf. ") @@ -288,7 +300,9 @@ def test_execute(self): drop_sql = "DROP TABLE IF EXISTS test" create_sql = sql.get_schema(frame, 'test', 'mysql') cur = self.db.cursor() - cur.execute(drop_sql) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Unknown table.*") + cur.execute(drop_sql) cur.execute(create_sql) ins = "INSERT INTO test VALUES (%s, %s, %s, %s)" @@ -379,27 +393,36 @@ def _check_roundtrip(self, frame): _skip_if_no_MySQLdb() drop_sql = "DROP TABLE IF EXISTS test_table" cur = self.db.cursor() - cur.execute(drop_sql) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Unknown table.*") + cur.execute(drop_sql) sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql') result = sql.read_frame("select * from test_table", self.db) - # HACK! + # HACK! Change this once indexes are handled properly. result.index = frame.index + result.index.name = frame.index.name expected = frame tm.assert_frame_equal(result, expected) frame['txt'] = ['a'] * len(frame) frame2 = frame.copy() - frame2['Idx'] = Index(range(len(frame2))) + 10 + index = Index(range(len(frame2))) + 10 + frame2['Idx'] = index drop_sql = "DROP TABLE IF EXISTS test_table2" cur = self.db.cursor() - cur.execute(drop_sql) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Unknown table.*") + cur.execute(drop_sql) sql.write_frame(frame2, name='test_table2', con=self.db, flavor='mysql') result = sql.read_frame("select * from test_table2", self.db, index_col='Idx') expected = frame.copy() - expected.index = Index(range(len(frame2))) + 10 + + # HACK! Change this once indexes are handled properly. + expected.index = index + expected.index.names = result.index.names tm.assert_frame_equal(expected, result) def test_tquery(self):
A first step for #4163
https://api.github.com/repos/pandas-dev/pandas/pulls/4177
2013-07-09T16:01:36Z
2013-07-10T18:26:39Z
2013-07-10T18:26:39Z
2014-07-16T08:18:34Z
TST: remove double call to yahoo finance
diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py index 2f4185154b8e6..df1b292d9ba5f 100644 --- a/pandas/io/tests/test_data.py +++ b/pandas/io/tests/test_data.py @@ -117,8 +117,6 @@ def test_get_quote_series(self): @network def test_get_quote_string(self): df = web.get_quote_yahoo('GOOG') - df2 = web.get_quote_yahoo('GOOG') - assert_frame_equal(df, df2) @network def test_get_quote_stringlist(self):
closes #4174.
https://api.github.com/repos/pandas-dev/pandas/pulls/4176
2013-07-09T15:25:25Z
2013-07-09T16:24:44Z
2013-07-09T16:24:44Z
2014-07-16T08:18:32Z
DOC to_datetime warning about dayfirst strictness
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index c39f65f95d99f..dc3eb1183dd2b 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -61,6 +61,8 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, Errors are ignored by default (values left untouched) dayfirst : boolean, default False If True parses dates with the day first, eg 20/01/2005 + Warning: dayfirst=True is not strict, but will prefer to parse + with day first (this is a known bug). utc : boolean, default None Return UTC DatetimeIndex if True (converting any tz-aware datetime.datetime objects as well)
cc #3341 Doesn't address the issue, but warns users about it in the docstring (it's already got a warning in the docs).
https://api.github.com/repos/pandas-dev/pandas/pulls/4175
2013-07-09T14:49:40Z
2013-07-09T15:41:56Z
2013-07-09T15:41:56Z
2014-07-16T08:18:31Z
BLD: use the wheel url for scikits timeseries
diff --git a/ci/requirements-2.7.txt b/ci/requirements-2.7.txt index 797066a0f1699..57acc459adc13 100644 --- a/ci/requirements-2.7.txt +++ b/ci/requirements-2.7.txt @@ -12,4 +12,4 @@ xlrd==0.9.2 patsy==0.1.0 html5lib==1.0b2 lxml==3.2.1 -http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r= +scikits.timeseries==0.91.3
was previously downloading from sourceforge, now uses the wheel
https://api.github.com/repos/pandas-dev/pandas/pulls/4168
2013-07-09T01:02:38Z
2013-07-09T14:29:05Z
2013-07-09T14:29:04Z
2014-07-16T08:18:29Z
TST: to_datetime format tests
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index f952483f54a9a..07780b575fa95 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -793,17 +793,40 @@ def test_to_datetime_default(self): xp = datetime(2001, 1, 1) self.assert_(rs, xp) + def test_dayfirst(self): + + # GH 3341 + result = to_datetime('13-01-2012', dayfirst=True) + expected = Timestamp('20120113') + self.assert_(result == expected) + + #### dayfirst is essentially broken + #### to_datetime('01-13-2012', dayfirst=True) + #### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True)) + def test_to_datetime_format(self): values = ['1/1/2000', '1/2/2000', '1/3/2000'] - def _parse_format(fmt, values): - return to_datetime([datetime.strptime(x, fmt) - for x in values]) - - for fmt in ['%d/%m/%Y', '%m/%d/%Y']: - result = to_datetime(values, format=fmt) - expected = _parse_format(fmt, values) - self.assert_(result.equals(expected)) + results1 = [ Timestamp('20000101'), Timestamp('20000201'), + Timestamp('20000301') ] + results2 = [ Timestamp('20000101'), Timestamp('20000102'), + Timestamp('20000103') ] + for vals, expecteds in [ (values, (Index(results1), Index(results2))), + (Series(values),(Series(results1), Series(results2))), + (values[0], (results1[0], results2[0])), + (values[1], (results1[1], results2[1])), + (values[2], (results1[2], results2[2])) ]: + + for i, fmt in enumerate(['%d/%m/%Y', '%m/%d/%Y']): + result = to_datetime(vals, format=fmt) + expected = expecteds[i] + + if isinstance(expected, Series): + assert_series_equal(result, Series(expected)) + elif isinstance(expected, Timestamp): + self.assert_(result == expected) + else: + self.assert_(result.equals(expected)) def test_to_datetime_format_microsecond(self): val = '01-Apr-2011 00:00:01.978' @@ -2812,10 +2835,10 @@ def check(val,unit=None,h=1,s=1,us=0): # nan result = Timestamp(np.nan) self.assert_(result is NaT) - + result = Timestamp(None) self.assert_(result is NaT) - + result = Timestamp(iNaT) self.assert_(result is NaT) diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py index c39f65f95d99f..6dc6b91073f19 100644 --- a/pandas/tseries/tools.py +++ b/pandas/tseries/tools.py @@ -76,12 +76,26 @@ def to_datetime(arg, errors='ignore', dayfirst=False, utc=None, box=True, ------- ret : datetime if parsing succeeded """ + from pandas import Timestamp from pandas.core.series import Series from pandas.tseries.index import DatetimeIndex - def _convert_f(arg): - arg = com._ensure_object(arg) + def _convert_listlike(arg, box): + + if isinstance(arg, (list,tuple)): + arg = np.array(arg, dtype='O') + if com.is_datetime64_dtype(arg): + if box and not isinstance(arg, DatetimeIndex): + try: + return DatetimeIndex(arg, tz='utc' if utc else None) + except ValueError, e: + values, tz = tslib.datetime_to_datetime64(arg) + return DatetimeIndex._simple_new(values, None, tz=tz) + + return arg + + arg = com._ensure_object(arg) try: if format is not None: result = tslib.array_strptime(arg, format) @@ -92,6 +106,7 @@ def _convert_f(arg): if com.is_datetime64_dtype(result) and box: result = DatetimeIndex(result, tz='utc' if utc else None) return result + except ValueError, e: try: values, tz = tslib.datetime_to_datetime64(arg) @@ -99,37 +114,17 @@ def _convert_f(arg): except (ValueError, TypeError): raise e - def _convert_listlike(arg): - if isinstance(arg, list): - arg = np.array(arg, dtype='O') - - if com.is_datetime64_dtype(arg): - if box and not isinstance(arg, DatetimeIndex): - try: - return DatetimeIndex(arg, tz='utc' if utc else None) - except ValueError, e: - try: - values, tz = tslib.datetime_to_datetime64(arg) - return DatetimeIndex._simple_new(values, None, tz=tz) - except (ValueError, TypeError): - raise e - return arg - - return _convert_f(arg) - if arg is None: return arg - elif isinstance(arg, datetime): + elif isinstance(arg, Timestamp): return arg elif isinstance(arg, Series): - values = arg.values - if not com.is_datetime64_dtype(values): - values = _convert_f(values) + values = _convert_listlike(arg.values, box=False) return Series(values, index=arg.index, name=arg.name) - elif isinstance(arg, (np.ndarray, list)): - return _convert_listlike(arg) + elif com.is_list_like(arg): + return _convert_listlike(arg, box=box) - return _convert_listlike(np.array([ arg ], dtype='O'))[0] + return _convert_listlike(np.array([ arg ]), box=box)[0] class DateParseError(ValueError): pass
related to #4166
https://api.github.com/repos/pandas-dev/pandas/pulls/4167
2013-07-09T00:34:35Z
2013-07-09T14:49:19Z
2013-07-09T14:49:19Z
2014-07-16T08:18:27Z
BUG: wrong parsing of microseconds with format arg (#4152)
diff --git a/doc/source/release.rst b/doc/source/release.rst index f4d61e70e94b3..a965d92e5dbe3 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -321,7 +321,8 @@ pandas 0.12 object Series/Frame was not converting properly (:issue:`4119`) - Fixed bugs in multi-index selection with column multi-index and duplicates (:issue:`4145`, :issue:`4146`) - + - Fixed bug in the parsing of microseconds when using the ``format`` + argument in ``to_datetime`` (:issue:`4152`) pandas 0.11.0 ============= diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt index b44d54d0eb31e..ff27df1f68bd3 100644 --- a/doc/source/v0.12.0.txt +++ b/doc/source/v0.12.0.txt @@ -459,7 +459,9 @@ Bug Fixes (:issue:`4089`) - Fixed bug in ``DataFrame.replace`` where a nested dict wasn't being iterated over when regex=False (:issue:`4115`) - + - Fixed bug in the parsing of microseconds when using the ``format`` + argument in ``to_datetime`` (:issue:`4152`) + See the :ref:`full release notes <release>` or issue tracker on GitHub for a complete list. diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py index d8c3caaabb36f..f952483f54a9a 100644 --- a/pandas/tseries/tests/test_timeseries.py +++ b/pandas/tseries/tests/test_timeseries.py @@ -805,6 +805,13 @@ def _parse_format(fmt, values): expected = _parse_format(fmt, values) self.assert_(result.equals(expected)) + def test_to_datetime_format_microsecond(self): + val = '01-Apr-2011 00:00:01.978' + format = '%d-%b-%Y %H:%M:%S.%f' + result = to_datetime(val, format=format) + exp = dt.datetime.strptime(val, format) + self.assert_(result == exp) + def test_to_datetime_on_datetime64_series(self): # #2699 s = Series(date_range('1/1/2000', periods=10)) diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx index 48def4a22a673..eb1b460df0bca 100644 --- a/pandas/tslib.pyx +++ b/pandas/tslib.pyx @@ -1049,8 +1049,7 @@ def array_strptime(ndarray[object] values, object fmt): Py_ssize_t i, n = len(values) pandas_datetimestruct dts ndarray[int64_t] iresult - int year, month, day, minute, hour, second, weekday, julian - float64_t fraction + int year, month, day, minute, hour, second, fraction, weekday, julian global _TimeRE_cache, _regex_cache with _cache_lock: @@ -1247,7 +1246,7 @@ def array_strptime(ndarray[object] values, object fmt): dts.hour = hour dts.min = minute dts.sec = second - dts.us = int(fraction * 1000000) + dts.us = fraction iresult[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts) _check_dts_bounds(iresult[i], &dts)
Closes #4152. `fraction` was already in microseconds, so no need to multiply it with 1000000, which caused faulty parsing of dates with microseconds. With this commit I get: ``` pandas version: 0.12.0.dev Value to parse: 01-Apr-2011 00:00:01.978 datetime.strptime : 2011-04-01 00:00:01.978000 to_datetime, w/out format: 2011-04-01 00:00:01.978000 to_datetime, w/ format : 2011-04-01 00:00:01.978000 ``` for ``` import datetime as dt import pandas as pd val = '01-Apr-2011 00:00:01.978' print 'pandas version:',pd.__version__ print 'Value to parse:',val format = '%d-%b-%Y %H:%M:%S.%f' print 'datetime.strptime :',dt.datetime.strptime(val, format) print 'to_datetime, w/out format:',pd.to_datetime(val) print 'to_datetime, w/ format :', pd.to_datetime(val, format=format) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/4166
2013-07-08T21:59:27Z
2013-07-09T13:10:36Z
2013-07-09T13:10:36Z
2014-07-16T08:18:25Z
DOC: increased width of text area (#4159)
diff --git a/doc/source/themes/nature_with_gtoc/static/nature.css_t b/doc/source/themes/nature_with_gtoc/static/nature.css_t index ebfeb6e5a8b08..2e0bed922c1e6 100644 --- a/doc/source/themes/nature_with_gtoc/static/nature.css_t +++ b/doc/source/themes/nature_with_gtoc/static/nature.css_t @@ -30,7 +30,8 @@ div.documentwrapper { div.bodywrapper { /* ugly hack, probably not attractive with other font size for re*/ margin: 0 0 0 {{ theme_sidebarwidth|toint}}px; - max-width: 600px; + min-width: 540px; + max-width: 720px; } diff --git a/doc/sphinxext/ipython_directive.py b/doc/sphinxext/ipython_directive.py index b237341e81125..0c28e397a0005 100644 --- a/doc/sphinxext/ipython_directive.py +++ b/doc/sphinxext/ipython_directive.py @@ -621,6 +621,8 @@ class IpythonDirective(Directive): shell = EmbeddedSphinxShell() + seen_docs = set() + def get_config_options(self): # contains sphinx configuration variables config = self.state.document.settings.env.config @@ -644,6 +646,12 @@ def get_config_options(self): return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout def setup(self): + + if not self.state.document.current_source in self.seen_docs: + self.shell.IP.history_manager.reset() + self.shell.IP.execution_count = 1 + self.seen_docs.add(self.state.document.current_source) + # get config values (savefig_dir, source_dir, rgxin, rgxout, promptin, promptout) = self.get_config_options()
See #4159 This adds a minimum and maximum width of the text area (instead of one fixed value of, at the moment 600px), and I also increased the maximum value (from 600 to 840). I took the values of the standard sphinx docs which are 780-1080 (minus 240 for the sidebar, in the standard sphinx docs the width is defined for the whole body, here only for the main text area)
https://api.github.com/repos/pandas-dev/pandas/pulls/4165
2013-07-08T20:51:57Z
2013-07-10T18:55:26Z
2013-07-10T18:55:26Z
2014-06-17T19:31:27Z
ENH: add expression evaluation functionality via eval
diff --git a/bench/bench_with_subset.R b/bench/bench_with_subset.R new file mode 100644 index 0000000000000..69d0f7a9eec63 --- /dev/null +++ b/bench/bench_with_subset.R @@ -0,0 +1,53 @@ +library(microbenchmark) +library(data.table) + + +data.frame.subset.bench <- function (n=1e7, times=30) { + df <- data.frame(a=rnorm(n), b=rnorm(n), c=rnorm(n)) + print(microbenchmark(subset(df, a <= b & b <= (c ^ 2 + b ^ 2 - a) & b > c), + times=times)) +} + + +# data.table allows something very similar to query with an expression +# but we have chained comparisons AND we're faster BOO YAH! +data.table.subset.expression.bench <- function (n=1e7, times=30) { + dt <- data.table(a=rnorm(n), b=rnorm(n), c=rnorm(n)) + print(microbenchmark(dt[, a <= b & b <= (c ^ 2 + b ^ 2 - a) & b > c], + times=times)) +} + + +# compare against subset with data.table for good measure +data.table.subset.bench <- function (n=1e7, times=30) { + dt <- data.table(a=rnorm(n), b=rnorm(n), c=rnorm(n)) + print(microbenchmark(subset(dt, a <= b & b <= (c ^ 2 + b ^ 2 - a) & b > c), + times=times)) +} + + +data.frame.with.bench <- function (n=1e7, times=30) { + df <- data.frame(a=rnorm(n), b=rnorm(n), c=rnorm(n)) + + print(microbenchmark(with(df, a + b * (c ^ 2 + b ^ 2 - a) / (a * c) ^ 3), + times=times)) +} + + +data.table.with.bench <- function (n=1e7, times=30) { + dt <- data.table(a=rnorm(n), b=rnorm(n), c=rnorm(n)) + print(microbenchmark(with(dt, a + b * (c ^ 2 + b ^ 2 - a) / (a * c) ^ 3), + times=times)) +} + + +bench <- function () { + data.frame.subset.bench() + data.table.subset.expression.bench() + data.table.subset.bench() + data.frame.with.bench() + data.table.with.bench() +} + + +bench() diff --git a/bench/bench_with_subset.py b/bench/bench_with_subset.py new file mode 100644 index 0000000000000..99b98c9838a90 --- /dev/null +++ b/bench/bench_with_subset.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python + +""" +Microbenchmarks for comparison with R's "with" and "subset" functions +""" + +from __future__ import print_function +import numpy as np +from numpy import array +from timeit import repeat as timeit +from pandas.compat import range, zip +from pandas import DataFrame + + +setup_common = """from pandas import DataFrame +from numpy.random import randn +df = DataFrame(randn(%d, 3), columns=list('abc')) +%s""" + + +setup_with = "s = 'a + b * (c ** 2 + b ** 2 - a) / (a * c) ** 3'" + + +def bench_with(n, times=10, repeat=3, engine='numexpr'): + return np.array(timeit('df.eval(s, engine=%r)' % engine, + setup=setup_common % (n, setup_with), + repeat=repeat, number=times)) / times + + +setup_subset = "s = 'a <= b <= c ** 2 + b ** 2 - a and b > c'" + + +def bench_subset(n, times=10, repeat=3, engine='numexpr'): + return np.array(timeit('df.query(s, engine=%r)' % engine, + setup=setup_common % (n, setup_subset), + repeat=repeat, number=times)) / times + + +def bench(mn=1, mx=7, num=100, engines=('python', 'numexpr'), verbose=False): + r = np.logspace(mn, mx, num=num).round().astype(int) + + ev = DataFrame(np.empty((num, len(engines))), columns=engines) + qu = ev.copy(deep=True) + + ev['size'] = qu['size'] = r + + for engine in engines: + for i, n in enumerate(r): + if verbose: + print('engine: %r, i == %d' % (engine, i)) + ev.loc[i, engine] = bench_with(n, times=1, repeat=1, engine=engine) + qu.loc[i, engine] = bench_subset(n, times=1, repeat=1, + engine=engine) + + return ev, qu + + +def plot_perf(df, engines, title, filename=None): + from matplotlib.pyplot import figure, rc + + try: + from mpltools import style + except ImportError: + pass + else: + style.use('ggplot') + + rc('text', usetex=True) + + fig = figure(figsize=(4, 3), dpi=100) + ax = fig.add_subplot(111) + + for engine in engines: + ax.plot(df.size, df[engine], label=engine, lw=2) + + ax.set_xlabel('Number of Rows') + ax.set_ylabel('Time (s)') + ax.set_title(title) + ax.legend(loc='best') + ax.tick_params(top=False, right=False) + + fig.tight_layout() + + if filename is not None: + fig.savefig(filename) + + +if __name__ == '__main__': + import os + import pandas as pd + + pandas_dir = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) + static_path = os.path.join(pandas_dir, 'doc', 'source', '_static') + + join = lambda p: os.path.join(static_path, p) + + fn = join('eval-query-perf-data.h5') + + engines = 'python', 'numexpr' + + if not os.path.exists(fn): + ev, qu = bench(verbose=True) + ev.to_hdf(fn, 'eval') + qu.to_hdf(fn, 'query') + else: + ev = pd.read_hdf(fn, 'eval') + qu = pd.read_hdf(fn, 'query') + + plot_perf(ev, engines, 'DataFrame.eval()', filename=join('eval-perf.png')) + plot_perf(qu, engines, 'DataFrame.query()', + filename=join('query-perf.png')) + + plot_perf(ev[ev.size <= 50000], engines, 'DataFrame.eval()', + filename=join('eval-perf-small.png')) + plot_perf(qu[qu.size <= 100000], engines, 'DataFrame.query()', + filename=join('query-perf-small.png')) diff --git a/doc/source/_static/eval-perf-small.png b/doc/source/_static/eval-perf-small.png new file mode 100644 index 0000000000000..d86018363ffdc Binary files /dev/null and b/doc/source/_static/eval-perf-small.png differ diff --git a/doc/source/_static/eval-perf.png b/doc/source/_static/eval-perf.png new file mode 100644 index 0000000000000..14c69c1b85d9e Binary files /dev/null and b/doc/source/_static/eval-perf.png differ diff --git a/doc/source/_static/query-perf-small.png b/doc/source/_static/query-perf-small.png new file mode 100644 index 0000000000000..56fcc787a66af Binary files /dev/null and b/doc/source/_static/query-perf-small.png differ diff --git a/doc/source/_static/query-perf.png b/doc/source/_static/query-perf.png new file mode 100644 index 0000000000000..d96318df94357 Binary files /dev/null and b/doc/source/_static/query-perf.png differ diff --git a/doc/source/api.rst b/doc/source/api.rst index 538965d0be7ad..28c1515e93bc5 100644 --- a/doc/source/api.rst +++ b/doc/source/api.rst @@ -155,6 +155,17 @@ Top-level dealing with datetimes to_datetime +Top-level evaluation +~~~~~~~~~~~~~~~~~~~~ + +.. currentmodule:: pandas + +.. autosummary:: + :toctree: generated/ + + eval + + Standard moving window functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -452,6 +463,7 @@ Indexing, iteration DataFrame.tail DataFrame.xs DataFrame.isin + DataFrame.query Binary operator functions ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -502,6 +514,7 @@ Computations / Descriptive Stats DataFrame.cumsum DataFrame.describe DataFrame.diff + DataFrame.eval DataFrame.kurt DataFrame.mad DataFrame.max diff --git a/doc/source/comparison_with_r.rst b/doc/source/comparison_with_r.rst index 5759768051c0e..ef609aaa7d70c 100644 --- a/doc/source/comparison_with_r.rst +++ b/doc/source/comparison_with_r.rst @@ -1,28 +1,87 @@ .. currentmodule:: pandas .. _compare_with_r: -******************************* Comparison with R / R libraries ******************************* -Since pandas aims to provide a lot of the data manipulation and analysis -functionality that people use R for, this page was started to provide a more -detailed look at the R language and it's many 3rd party libraries as they -relate to pandas. In offering comparisons with R and CRAN libraries, we care -about the following things: +Since ``pandas`` aims to provide a lot of the data manipulation and analysis +functionality that people use `R <http://www.r-project.org/>`__ for, this page +was started to provide a more detailed look at the `R language +<http://en.wikipedia.org/wiki/R_(programming_language)>`__ and its many third +party libraries as they relate to ``pandas``. In comparisons with R and CRAN +libraries, we care about the following things: - - **Functionality / flexibility**: what can / cannot be done with each tool - - **Performance**: how fast are operations. Hard numbers / benchmarks are + - **Functionality / flexibility**: what can/cannot be done with each tool + - **Performance**: how fast are operations. Hard numbers/benchmarks are preferable - - **Ease-of-use**: is one tool easier or harder to use (you may have to be - the judge of this given side-by-side code comparisons) + - **Ease-of-use**: Is one tool easier/harder to use (you may have to be + the judge of this, given side-by-side code comparisons) + +This page is also here to offer a bit of a translation guide for users of these +R packages. + +Base R +------ + +|subset|_ +~~~~~~~~~~ + +.. versionadded:: 0.13 + +The :meth:`~pandas.DataFrame.query` method is similar to the base R ``subset`` +function. In R you might want to get the rows of a ``data.frame`` where one +column's values are less than another column's values: + + .. code-block:: r + + df <- data.frame(a=rnorm(10), b=rnorm(10)) + subset(df, a <= b) + df[df$a <= df$b,] # note the comma + +In ``pandas``, there are a few ways to perform subsetting. You can use +:meth:`~pandas.DataFrame.query` or pass an expression as if it were an +index/slice as well as standard boolean indexing: + + .. ipython:: python + + from pandas import DataFrame + from numpy.random import randn + + df = DataFrame({'a': randn(10), 'b': randn(10)}) + df.query('a <= b') + df[df.a <= df.b] + df.loc[df.a <= df.b] -As I do not have an encyclopedic knowledge of R packages, feel free to suggest -additional CRAN packages to add to this list. This is also here to offer a big -of a translation guide for users of these R packages. +For more details and examples see :ref:`the query documentation +<indexing.query>`. -data.frame ----------- + +|with|_ +~~~~~~~~ + +.. versionadded:: 0.13 + +An expression using a data.frame called ``df`` in R with the columns ``a`` and +``b`` would be evaluated using ``with`` like so: + + .. code-block:: r + + df <- data.frame(a=rnorm(10), b=rnorm(10)) + with(df, a + b) + df$a + df$b # same as the previous expression + +In ``pandas`` the equivalent expression, using the +:meth:`~pandas.DataFrame.eval` method, would be: + + .. ipython:: python + + df = DataFrame({'a': randn(10), 'b': randn(10)}) + df.eval('a + b') + df.a + df.b # same as the previous expression + +In certain cases :meth:`~pandas.DataFrame.eval` will be much faster than +evaluation in pure Python. For more details and examples see :ref:`the eval +documentation <enhancingperf.eval>`. zoo --- @@ -36,3 +95,9 @@ plyr reshape / reshape2 ------------------ + +.. |with| replace:: ``with`` +.. _with: http://finzi.psych.upenn.edu/R/library/base/html/with.html + +.. |subset| replace:: ``subset`` +.. _subset: http://finzi.psych.upenn.edu/R/library/base/html/subset.html diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index 95428bd27e2a2..87b68248c3e9e 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -225,8 +225,8 @@ the rows, applying our ``integrate_f_typed``, and putting this in the zeros arra .. note:: - Loop like this would be *extremely* slow in python, but in cython looping over - numpy arrays is *fast*. + Loops like this would be *extremely* slow in python, but in Cython looping + over numpy arrays is *fast*. .. ipython:: python @@ -289,3 +289,262 @@ Further topics - Loading C modules into cython. Read more in the `cython docs <http://docs.cython.org/>`__. + +.. _enhancingperf.eval: + +Expression Evaluation via :func:`~pandas.eval` (Experimental) +------------------------------------------------------------- + +.. versionadded:: 0.13 + +The top-level function :func:`~pandas.eval` implements expression evaluation of +:class:`~pandas.Series` and :class:`~pandas.DataFrame` objects. + +.. note:: + + To benefit from using :func:`~pandas.eval` you need to + install ``numexpr``. See the :ref:`recommended dependencies section + <install.recommended_dependencies>` for more details. + +The point of using :func:`~pandas.eval` for expression evaluation rather than +plain Python is two-fold: 1) large :class:`~pandas.DataFrame` objects are +evaluated more efficiently and 2) large arithmetic and boolean expressions are +evaluated all at once by the underlying engine (by default ``numexpr`` is used +for evaluation). + +.. note:: + + You should not use :func:`~pandas.eval` for simple + expressions or for expressions involving small DataFrames. In fact, + :func:`~pandas.eval` is many orders of magnitude slower for + smaller expressions/objects than plain ol' Python. A good rule of thumb is + to only use :func:`~pandas.eval` when you have a + :class:`~pandas.core.frame.DataFrame` with more than 10,000 rows. + + +:func:`~pandas.eval` supports all arithmetic expressions supported by the +engine in addition to some extensions available only in pandas. + +.. note:: + + The larger the frame and the larger the expression the more speedup you will + see from using :func:`~pandas.eval`. + + +:func:`~pandas.eval` Examples +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:func:`~pandas.eval` works wonders for expressions containing large arrays + +First let's create 4 decent-sized arrays to play with: + +.. ipython:: python + + import pandas as pd + from pandas import DataFrame, Series + from numpy.random import randn + import numpy as np + nrows, ncols = 20000, 100 + df1, df2, df3, df4 = [DataFrame(randn(nrows, ncols)) for _ in xrange(4)] + + +Now let's compare adding them together using plain ol' Python versus +:func:`~pandas.eval`: + + +.. ipython:: python + + %timeit df1 + df2 + df3 + df4 + +.. ipython:: python + + %timeit pd.eval('df1 + df2 + df3 + df4') + + +Now let's do the same thing but with comparisons: + +.. ipython:: python + + %timeit (df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0) + +.. ipython:: python + + %timeit pd.eval('(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)') + + +:func:`~pandas.eval` also works with unaligned pandas objects: + + +.. ipython:: python + + s = Series(randn(50)) + %timeit df1 + df2 + df3 + df4 + s + +.. ipython:: python + + %timeit pd.eval('df1 + df2 + df3 + df4 + s') + +.. note:: + + Operations such as ``1 and 2`` should be performed in Python. An exception + will be raised if you try to performed any boolean or bitwise operations + with scalar operands that are not of type ``bool`` or ``np.bool_``. *This + includes bitwise operations on scalars.* You should perform these kinds of + operations in Python. + +The ``DataFrame.eval`` method (Experimental) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In addition to the top level :func:`~pandas.eval` function you can also +evaluate an expression in the "context" of a ``DataFrame``. + + +.. ipython:: python + + df = DataFrame(randn(5, 2), columns=['a', 'b']) + df.eval('a + b') + + +Any expression that is a valid :func:`~pandas.eval` expression is also a valid +``DataFrame.eval`` expression, with the added benefit that *you don't have to +prefix the name of the* ``DataFrame`` *to the column you're interested in +evaluating*. + + +Local Variables +~~~~~~~~~~~~~~~ + +You can refer to local variables the same way you would in vanilla Python + +.. ipython:: python + + df = DataFrame(randn(5, 2), columns=['a', 'b']) + newcol = randn(len(df)) + df.eval('b + newcol') + +.. note:: + + The one exception is when you have a local (or global) with the same name as + a column in the ``DataFrame`` + + .. code-block:: python + + df = DataFrame(randn(5, 2), columns=['a', 'b']) + a = randn(len(df)) + df.eval('a + b') + NameResolutionError: resolvers and locals overlap on names ['a'] + + + To deal with these conflicts, a special syntax exists for referring + variables with the same name as a column + + .. ipython:: python + :suppress: + + a = randn(len(df)) + + .. ipython:: python + + df.eval('@a + b') + + The same is true for :meth:`~pandas.DataFrame.query` + + .. ipython:: python + + df.query('@a < b') + + .. ipython:: python + :suppress: + + del a + + +:func:`~pandas.eval` Parsers +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There are two different parsers and and two different engines you can use as +the backend. + +The default ``'pandas'`` parser allows a more intuitive syntax for expressing +query-like operations (comparisons, conjunctions and disjunctions). In +particular, the precedence of the ``&`` and ``|`` operators is made equal to +the precedence of the corresponding boolean operations ``and`` and ``or``. + +For example, the above conjunction can be written without parentheses. +Alternatively, you can use the ``'python'`` parser to enforce strict Python +semantics. + +.. ipython:: python + + expr = '(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)' + x = pd.eval(expr, parser='python') + expr_no_parens = 'df1 > 0 & df2 > 0 & df3 > 0 & df4 > 0' + y = pd.eval(expr_no_parens, parser='pandas') + np.all(x == y) + + +The same expression can be "anded" together with the word :keyword:`and` as +well: + +.. ipython:: python + + expr = '(df1 > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)' + x = pd.eval(expr, parser='python') + expr_with_ands = 'df1 > 0 and df2 > 0 and df3 > 0 and df4 > 0' + y = pd.eval(expr_with_ands, parser='pandas') + np.all(x == y) + + +The ``and`` and ``or`` operators here have the same precedence that they would +in vanilla Python. + + +:func:`~pandas.eval` Backends +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There's also the option to make :func:`~pandas.eval` operate identical to plain +ol' Python. + +.. note:: + + Using the ``'python'`` engine is generally *not* useful, except for testing + other :func:`~pandas.eval` engines against it. You will acheive **no** + performance benefits using :func:`~pandas.eval` with ``engine='python'``. + +You can see this by using :func:`~pandas.eval` with the ``'python'`` engine is +actually a bit slower (not by much) than evaluating the same expression in +Python: + +.. ipython:: python + + %timeit df1 + df2 + df3 + df4 + +.. ipython:: python + + %timeit pd.eval('df1 + df2 + df3 + df4', engine='python') + + +:func:`~pandas.eval` Performance +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:func:`~pandas.eval` is intended to speed up certain kinds of operations. In +particular, those operations involving complex expressions with large +``DataFrame``/``Series`` objects should see a significant performance benefit. +Here is a plot showing the running time of :func:`~pandas.eval` as function of +the size of the frame involved in the computation. The two lines are two +different engines. + + +.. image:: _static/eval-perf.png + + +.. note:: + + Operations with smallish objects (around 15k-20k rows) are faster using + plain Python: + + .. image:: _static/eval-perf-small.png + + +This plot was created using a ``DataFrame`` with 3 columns each containing +floating point values generated using ``numpy.random.randn()``. diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index d2fd11ee43615..2f2a47d4b0bf2 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -26,48 +26,58 @@ The axis labeling information in pandas objects serves many purposes: - Enables automatic and explicit data alignment - Allows intuitive getting and setting of subsets of the data set -In this section / chapter, we will focus on the final point: namely, how to -slice, dice, and generally get and set subsets of pandas objects. The primary -focus will be on Series and DataFrame as they have received more development -attention in this area. Expect more work to be invested higher-dimensional data -structures (including Panel) in the future, especially in label-based advanced +In this section, we will focus on the final point: namely, how to slice, dice, +and generally get and set subsets of pandas objects. The primary focus will be +on Series and DataFrame as they have received more development attention in +this area. Expect more work to be invested higher-dimensional data structures +(including ``Panel``) in the future, especially in label-based advanced indexing. .. note:: - The Python and NumPy indexing operators ``[]`` and attribute operator ``.`` provide quick and easy access to pandas data structures - across a wide range of use cases. This makes interactive work intuitive, as - there's little new to learn if you already know how to deal with Python - dictionaries and NumPy arrays. However, since the type of the data to be accessed - isn't known in advance, directly using - standard operators has some optimization limits. For production code, we recommended - that you take advantage of the optimized pandas data access methods exposed in this chapter. + The Python and NumPy indexing operators ``[]`` and attribute operator ``.`` + provide quick and easy access to pandas data structures across a wide range + of use cases. This makes interactive work intuitive, as there's little new + to learn if you already know how to deal with Python dictionaries and NumPy + arrays. However, since the type of the data to be accessed isn't known in + advance, directly using standard operators has some optimization limits. For + production code, we recommended that you take advantage of the optimized + pandas data access methods exposed in this chapter. .. warning:: - Whether a copy or a reference is returned for a setting operation, may depend on the context. - This is sometimes called ``chained assignment`` and should be avoided. - See :ref:`Returning a View versus Copy <indexing.view_versus_copy>` + Whether a copy or a reference is returned for a setting operation, may + depend on the context. This is sometimes called ``chained assignment`` and + should be avoided. See :ref:`Returning a View versus Copy + <indexing.view_versus_copy>` See the :ref:`cookbook<cookbook.selection>` for some advanced strategies -Choice ------- +Different Choices for Indexing (``loc``, ``iloc``, and ``ix``) +-------------------------------------------------------------- + +.. versionadded:: 0.11.0 -Starting in 0.11.0, object selection has had a number of user-requested additions in -order to support more explicit location based indexing. Pandas now supports -three types of multi-axis indexing. +Object selection has had a number of user-requested additions in order to +support more explicit location based indexing. Pandas now supports three types +of multi-axis indexing. -- ``.loc`` is strictly label based, will raise ``KeyError`` when the items are not found, allowed inputs are: +- ``.loc`` is strictly label based, will raise ``KeyError`` when the items are + not found, allowed inputs are: - - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is interpreted as a *label* of the index. This use is **not** an integer position along the index) + - A single label, e.g. ``5`` or ``'a'``, (note that ``5`` is interpreted as a + *label* of the index. This use is **not** an integer position along the + index) - A list or array of labels ``['a', 'b', 'c']`` - - A slice object with labels ``'a':'f'``, (note that contrary to usual python slices, **both** the start and the stop are included!) + - A slice object with labels ``'a':'f'``, (note that contrary to usual python + slices, **both** the start and the stop are included!) - A boolean array See more at :ref:`Selection by Label <indexing.label>` -- ``.iloc`` is strictly integer position based (from ``0`` to ``length-1`` of the axis), will raise ``IndexError`` when the requested indicies are out of bounds. Allowed inputs are: +- ``.iloc`` is strictly integer position based (from ``0`` to ``length-1`` of + the axis), will raise ``IndexError`` when the requested indicies are out of + bounds. Allowed inputs are: - An integer e.g. ``5`` - A list or array of integers ``[4, 3, 0]`` @@ -75,20 +85,24 @@ three types of multi-axis indexing. See more at :ref:`Selection by Position <indexing.integer>` -- ``.ix`` supports mixed integer and label based access. It is primarily label based, but will fallback to integer positional access. ``.ix`` is the most general - and will support any of the inputs to ``.loc`` and ``.iloc``, as well as support for floating point label schemes. ``.ix`` is especially useful when dealing with mixed positional and label - based hierarchial indexes. - - As using integer slices with ``.ix`` have different behavior depending on whether the slice is interpreted as position based or label based, it's +- ``.ix`` supports mixed integer and label based access. It is primarily label + based, but will fallback to integer positional access. ``.ix`` is the most + general and will support any of the inputs to ``.loc`` and ``.iloc``, as well + as support for floating point label schemes. ``.ix`` is especially useful + when dealing with mixed positional and label based hierarchial indexes. + As using integer slices with ``.ix`` have different behavior depending on + whether the slice is interpreted as position based or label based, it's usually better to be explicit and use ``.iloc`` or ``.loc``. - See more at :ref:`Advanced Indexing <indexing.advanced>`, :ref:`Advanced Hierarchical <indexing.advanced_hierarchical>` and :ref:`Fallback Indexing <indexing.fallback>` + See more at :ref:`Advanced Indexing <indexing.advanced>`, :ref:`Advanced + Hierarchical <indexing.advanced_hierarchical>` and :ref:`Fallback Indexing + <indexing.fallback>` Getting values from an object with multi-axes selection uses the following notation (using ``.loc`` as an example, but applies to ``.iloc`` and ``.ix`` as well). Any of the axes accessors may be the null slice ``:``. Axes left out of the specification are assumed to be ``:``. (e.g. ``p.loc['a']`` is equiv to -``p.loc['a',:,:]``) +``p.loc['a', :, :]``) .. csv-table:: :header: "Object Type", "Indexers" @@ -100,7 +114,7 @@ the specification are assumed to be ``:``. (e.g. ``p.loc['a']`` is equiv to Panel; ``p.loc[item_indexer,major_indexer,minor_indexer]`` Deprecations -~~~~~~~~~~~~ +------------ Beginning with version 0.11.0, it's recommended that you transition away from the following methods as they *may* be deprecated in future versions. @@ -168,7 +182,7 @@ You may find this useful for applying a transform (in-place) to a subset of the columns. Attribute Access -~~~~~~~~~~~~~~~~ +---------------- .. _indexing.columns.multiple: @@ -213,7 +227,7 @@ If you are using the IPython environment, you may also use tab-completion to see these accessable attributes. Slicing ranges -~~~~~~~~~~~~~~ +-------------- The most robust and consistent way of slicing ranges along arbitrary axes is described in the :ref:`Selection by Position <indexing.integer>` section @@ -247,7 +261,7 @@ largely as a convenience since it is such a common operation. .. _indexing.label: Selection By Label -~~~~~~~~~~~~~~~~~~ +------------------ .. warning:: @@ -318,7 +332,7 @@ For getting a value explicity (equiv to deprecated ``df.get_value('a','A')``) .. _indexing.integer: Selection By Position -~~~~~~~~~~~~~~~~~~~~~ +--------------------- .. warning:: @@ -415,7 +429,7 @@ Pandas will detect this and raise ``IndexError``, rather than return an empty st .. _indexing.basics.partial_setting: Setting With Enlargement -~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------ .. versionadded:: 0.13 @@ -450,7 +464,7 @@ This is like an ``append`` operation on the ``DataFrame``. .. _indexing.basics.get_value: Fast scalar value getting and setting -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------- Since indexing with ``[]`` must handle a lot of cases (single-label access, slicing, boolean indexing, etc.), it has a bit of overhead in order to figure @@ -481,7 +495,7 @@ You can also set using these same indexers. df Boolean indexing -~~~~~~~~~~~~~~~~ +---------------- .. _indexing.boolean: @@ -572,8 +586,8 @@ You can also describe columns using integer location: df.isin(values, iloc=True) -Where and Masking -~~~~~~~~~~~~~~~~~ +The :meth:`~pandas.DataFrame.where` Method and Masking +------------------------------------------------------ Selecting values from a Series with a boolean vector generally returns a subset of the data. To guarantee that selection output has the same shape as @@ -673,8 +687,304 @@ This is equivalent (but faster than) the following. s.mask(s >= 0) df.mask(df >= 0) +.. _indexing.query: + +The :meth:`~pandas.DataFrame.query` Method (Experimental) +--------------------------------------------------------- + +.. versionadded:: 0.13 + +:class:`~pandas.DataFrame` objects have a :meth:`~pandas.DataFrame.query` +method that allows selection using an expression. + +You can get the value of the frame where column ``b`` has values +between the values of columns ``a`` and ``c``. For example: + +.. ipython:: python + :suppress: + + from numpy.random import randint, rand + np.random.seed(1234) + +.. ipython:: python + + n = 10 + df = DataFrame(rand(n, 3), columns=list('abc')) + df + + # pure python + df[(df.a < df.b) & (df.b < df.c)] + + # query + df.query('(a < b) & (b < c)') + +Do the same thing but fallback on a named index if there is no column +with the name ``a``. + +.. ipython:: python + + df = DataFrame(randint(n / 2, size=(n, 2)), columns=list('bc')) + df.index.name = 'a' + df + df.query('a < b and b < c') + +If instead you don't want to or cannot name your index, you can use the name +``index`` in your query expression: + +.. ipython:: python + :suppress: + + old_index = index + del index + +.. ipython:: python + + df = DataFrame(randint(n, size=(n, 2)), columns=list('bc')) + df + df.query('index < b < c') + +.. ipython:: python + :suppress: + + index = old_index + del old_index + + +:class:`~pandas.MultiIndex` :meth:`~pandas.DataFrame.query` Syntax +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can also use the levels of a ``DataFrame`` with a +:class:`~pandas.MultiIndex` as if they were columns in the frame: + +.. ipython:: python + + import pandas.util.testing as tm + + n = 10 + colors = tm.choice(['red', 'green'], size=n) + foods = tm.choice(['eggs', 'ham'], size=n) + colors + foods + + index = MultiIndex.from_arrays([colors, foods], names=['color', 'food']) + df = DataFrame(randn(n, 2), index=index) + df + df.query('color == "red"') + +If the levels of the ``MultiIndex`` are unnamed, you can refer to them using +special names: + + +.. ipython:: python + + df.index.names = [None, None] + df + df.query('ilevel_0 == "red"') + + +The convention is ``ilevel_0``, which means "index level 0" for the 0th level +of the ``index``. + + +:meth:`~pandas.DataFrame.query` Use Cases +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A use case for :meth:`~pandas.DataFrame.query` is when you have a collection of +:class:`~pandas.DataFrame` objects that have a subset of column names (or index +levels/names) in common. You can pass the same query to both frames *without* +having to specify which frame you're interested in querying + +.. ipython:: python + + df = DataFrame(rand(n, 3), columns=list('abc')) + df + df2 = DataFrame(rand(n + 2, 3), columns=df.columns) + df2 + expr = '0.0 <= a <= c <= 0.5' + map(lambda frame: frame.query(expr), [df, df2]) + +:meth:`~pandas.DataFrame.query` Python versus pandas Syntax Comparison +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Full numpy-like syntax + +.. ipython:: python + + df = DataFrame(randint(n, size=(n, 3)), columns=list('abc')) + df + df.query('(a < b) & (b < c)') + df[(df.a < df.b) & (df.b < df.c)] + +Slightly nicer by removing the parentheses (by binding making comparison +operators bind tighter than ``&``/``|``) + +.. ipython:: python + + df.query('a < b & b < c') + +Use English instead of symbols + +.. ipython:: python + + df.query('a < b and b < c') + +Pretty close to how you might write it on paper + +.. ipython:: python + + df.query('a < b < c') + +The ``in`` and ``not in`` operators +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:meth:`~pandas.DataFrame.query` also supports special use of Python's ``in`` and +``not in`` comparison operators, providing a succint syntax for calling the +``isin`` method of a ``Series`` or ``DataFrame``. + +.. ipython:: python + :suppress: + + try: + old_d = d + del d + except NameError: + pass + +.. ipython:: python + + # get all rows where columns "a" and "b" have overlapping values + df = DataFrame({'a': list('aabbccddeeff'), 'b': list('aaaabbbbcccc'), + 'c': randint(5, size=12), 'd': randint(9, size=12)}) + df + df.query('a in b') + + # How you'd do it in pure Python + df[df.a.isin(df.b)] + + df.query('a not in b') + + # pure Python + df[~df.a.isin(df.b)] + + +You can combine this with other expressions for very succinct queries: + + +.. ipython:: python + + # rows where cols a and b have overlapping values and col c's values are less than col d's + df.query('a in b and c < d') + + # pure Python + df[df.b.isin(df.a) & (df.c < df.d)] + + +.. note:: + + Note that ``in`` and ``not in`` are evaluated in Python, since ``numexpr`` + has no equivalent of this operation. However, **only the** ``in``/``not in`` + **expression itself** is evaluated in vanilla Python. For example, in the + expression + + .. code-block:: python + + df.query('a in b + c + d') + + ``(b + c + d)`` is evaluated by ``numexpr`` and *then* the ``in`` + operation is evaluated in plain Python. In general, any operations that can + be evaluated using ``numexpr`` will be. + +Special use of the ``==`` operator with ``list`` objects +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Comparing a ``list`` of values to a column using ``==``/``!=`` works similarly +to ``in``/``not in`` + +.. ipython:: python + + df.query('b == ["a", "b", "c"]') + + # pure Python + df[df.b.isin(["a", "b", "c"])] + + df.query('c == [1, 2]') + + df.query('c != [1, 2]') + + # using in/not in + df.query('[1, 2] in c') + + df.query('[1, 2] not in c') + + # pure Python + df[df.c.isin([1, 2])] + + +Boolean Operators +~~~~~~~~~~~~~~~~~ + +You can negate boolean expressions with the word ``not`` or the ``~`` operator. + +.. ipython:: python + + df = DataFrame(rand(n, 3), columns=list('abc')) + df['bools'] = rand(len(df)) > 0.5 + df.query('~bools') + df.query('not bools') + df.query('not bools') == df[~df.bools] + +Of course, expressions can be arbitrarily complex too + +.. ipython:: python + + # short query syntax + shorter = df.query('a < b < c and (not bools) or bools > 2') + + # equivalent in pure Python + longer = df[(df.a < df.b) & (df.b < df.c) & (~df.bools) | (df.bools > 2)] + + shorter + longer + + shorter == longer + +.. ipython:: python + :suppress: + + try: + d = old_d + del old_d + except NameError: + pass + + +Performance of :meth:`~pandas.DataFrame.query` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``DataFrame.query()`` using ``numexpr`` is slightly faster than Python for +large frames + +.. image:: _static/query-perf.png + +.. note:: + + You will only see the performance benefits of using the ``numexpr`` engine + with ``DataFrame.query()`` if your frame has more than approximately 50,000 + rows + + .. image:: _static/query-perf-small.png + +This plot was created using a ``DataFrame`` with 3 columns each containing +floating point values generated using ``numpy.random.randn()``. + +.. ipython:: python + :suppress: + + df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D']) + df2 = df.copy() + Take Methods -~~~~~~~~~~~~ +------------ .. _indexing.take: @@ -740,7 +1050,7 @@ faster than fancy indexing. timeit ser.take(indexer) Duplicate Data -~~~~~~~~~~~~~~ +-------------- .. _indexing.duplicate: @@ -766,8 +1076,8 @@ should be taken instead. .. _indexing.dictionarylike: -Dictionary-like ``get`` method -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Dictionary-like :meth:`~pandas.DataFrame.get` method +---------------------------------------------------- Each of Series, DataFrame, and Panel have a ``get`` method which can return a default value. @@ -865,8 +1175,8 @@ labels or even boolean vectors: Slicing with labels is closely related to the ``truncate`` method which does precisely ``.ix[start:stop]`` but returns a copy (for legacy reasons). -The ``select`` method -~~~~~~~~~~~~~~~~~~~~~ +The :meth:`~pandas.DataFrame.select` Method +------------------------------------------- Another way to extract slices from an object is with the ``select`` method of Series, DataFrame, and Panel. This method should be used only when there is no @@ -877,8 +1187,8 @@ more direct way. ``select`` takes a function which operates on labels along df.select(lambda x: x == 'A', axis=1) -The ``lookup`` method -~~~~~~~~~~~~~~~~~~~~~ +The :meth:`~pandas.DataFrame.lookup` Method +------------------------------------------- Sometimes you want to extract a set of values given a sequence of row labels and column labels, and the ``lookup`` method allows for this and returns a @@ -890,7 +1200,7 @@ numpy array. For instance, dflookup.lookup(list(range(0,10,2)), ['B','C','A','B','D']) Setting values in mixed-type DataFrame -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +-------------------------------------- .. _indexing.mixed_type_setting: @@ -909,7 +1219,7 @@ scalar values, though setting arbitrary vectors is not yet supported: .. _indexing.view_versus_copy: Returning a view versus a copy -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------ The rules about when a view on the data is returned are entirely dependent on NumPy. Whenever an array of labels or a boolean vector are involved in the @@ -970,7 +1280,7 @@ When assigning values to subsets of your data, thus, make sure to either use the pandas access methods or explicitly handle the assignment creating a copy. Fallback indexing -~~~~~~~~~~~~~~~~~~~~ +----------------- .. _indexing.fallback: @@ -1006,6 +1316,71 @@ convert to an integer index: df_new[(df_new['index'] >= 1.0) & (df_new['index'] < 2)] +.. _indexing.class: + +Index objects +------------- + +The pandas :class:`~pandas.Index` class and its subclasses can be viewed as +implementing an *ordered multiset*. Duplicates are allowed. However, if you try +to convert an :class:`~pandas.Index` object with duplicate entries into a +``set``, an exception will be raised. + +:class:`~pandas.Index` also provides the infrastructure necessary for +lookups, data alignment, and reindexing. The easiest way to create an +:class:`~pandas.Index` directly is to pass a ``list`` or other sequence to +:class:`~pandas.Index`: + +.. ipython:: python + + index = Index(['e', 'd', 'a', 'b']) + index + 'd' in index + +You can also pass a ``name`` to be stored in the index: + + +.. ipython:: python + + index = Index(['e', 'd', 'a', 'b'], name='something') + index.name + +Starting with pandas 0.5, the name, if set, will be shown in the console +display: + +.. ipython:: python + + index = Index(list(range(5)), name='rows') + columns = Index(['A', 'B', 'C'], name='cols') + df = DataFrame(np.random.randn(5, 3), index=index, columns=columns) + df + df['A'] + + +Set operations on Index objects +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. _indexing.set_ops: + +The three main operations are ``union (|)``, ``intersection (&)``, and ``diff +(-)``. These can be directly called as instance methods or used via overloaded +operators: + +.. ipython:: python + + a = Index(['c', 'b', 'a']) + b = Index(['c', 'e', 'd']) + a.union(b) + a | b + a & b + a - b + +The ``isin`` method of Index objects +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +One additional operation is the ``isin`` method that works analogously to the +``Series.isin`` method found :ref:`here <indexing.boolean>`. + .. _indexing.hierarchical: Hierarchical indexing (MultiIndex) @@ -1206,7 +1581,7 @@ mailing list. .. _indexing.xs: Cross-section with hierarchical index -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``xs`` method of ``DataFrame`` additionally takes a level argument to make selecting data at a particular level of a MultiIndex easier. @@ -1238,8 +1613,8 @@ instance: print df2_aligned -The need for sortedness -~~~~~~~~~~~~~~~~~~~~~~~ +The need for sortedness with :class:`~pandas.MultiIndex` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Caveat emptor**: the present implementation of ``MultiIndex`` requires that the labels be sorted for some of the slicing / indexing routines to work @@ -1311,8 +1686,8 @@ However: ... KeyError: Key length (3) was greater than MultiIndex lexsort depth (2) -Swapping levels with ``swaplevel`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Swapping levels with :meth:`~pandas.MultiIndex.swaplevel` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``swaplevel`` function can switch the order of two levels: @@ -1323,8 +1698,8 @@ The ``swaplevel`` function can switch the order of two levels: .. _indexing.reorderlevels: -Reordering levels with ``reorder_levels`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Reordering levels with :meth:`~pandas.MultiIndex.reorder_levels` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``reorder_levels`` function generalizes the ``swaplevel`` function, allowing you to permute the hierarchical index levels in one step: @@ -1354,68 +1729,9 @@ not check (or care) whether the levels themselves are sorted. Fortunately, the constructors ``from_tuples`` and ``from_arrays`` ensure that this is true, but if you compute the levels and labels yourself, please be careful. -.. _indexing.class: - -Index objects -------------- - -The pandas Index class and its subclasses can be viewed as implementing an -*ordered set* in addition to providing the support infrastructure necessary for -lookups, data alignment, and reindexing. The easiest way to create one directly -is to pass a list or other sequence to ``Index``: - -.. ipython:: python - - index = Index(['e', 'd', 'a', 'b']) - index - 'd' in index - -You can also pass a ``name`` to be stored in the index: - - -.. ipython:: python - - index = Index(['e', 'd', 'a', 'b'], name='something') - index.name - -Starting with pandas 0.5, the name, if set, will be shown in the console -display: - -.. ipython:: python - - index = Index(list(range(5)), name='rows') - columns = Index(['A', 'B', 'C'], name='cols') - df = DataFrame(np.random.randn(5, 3), index=index, columns=columns) - df - df['A'] - - -Set operations on Index objects -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. _indexing.set_ops: - -The three main operations are ``union (|)``, ``intersection (&)``, and ``diff -(-)``. These can be directly called as instance methods or used via overloaded -operators: - -.. ipython:: python - - a = Index(['c', 'b', 'a']) - b = Index(['c', 'e', 'd']) - a.union(b) - a | b - a & b - a - b - -``isin`` method of Index objects -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -One additional operation is the ``isin`` method that works analogously to the -``Series.isin`` method found :ref:`here <indexing.boolean>`. Setting index metadata (``name(s)``, ``levels``, ``labels``) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------------------------------ .. _indexing.set_metadata: @@ -1444,7 +1760,7 @@ add an index after you've already done so. There are a couple of different ways. Add an index using DataFrame columns -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------ .. _indexing.set_index: @@ -1487,7 +1803,7 @@ the index in-place (without creating a new object): data Remove / reset the index, ``reset_index`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +------------------------------------------ As a convenience, there is a new function on DataFrame called ``reset_index`` which transfers the index values into the DataFrame's columns and sets a simple @@ -1518,7 +1834,7 @@ discards the index, instead of putting index values in the DataFrame's columns. deprecated. Adding an ad hoc index -~~~~~~~~~~~~~~~~~~~~~~ +---------------------- If you create an index yourself, you can just assign it to the ``index`` field: @@ -1531,9 +1847,9 @@ Indexing internal details .. note:: - The following is largely relevant for those actually working on the pandas - codebase. And the source code is still the best place to look at the - specifics of how things are implemented. + The following is largely relevant for those actually working on the pandas + codebase. The source code is still the best place to look at the specifics + of how things are implemented. In pandas there are a few objects implemented which can serve as valid containers for the axis labels: @@ -1545,6 +1861,8 @@ containers for the axis labels: - ``Int64Index``: a version of ``Index`` highly optimized for 64-bit integer data, such as time stamps - ``MultiIndex``: the standard hierarchical index object + - ``PeriodIndex``: An Index object with Period elements + - ``DatetimeIndex``: An Index object with Timestamp elements - ``date_range``: fixed frequency date range generated from a time rule or DateOffset. An ndarray of Python datetime objects diff --git a/doc/source/io.rst b/doc/source/io.rst index c29af29d2e63f..e30eb030afb88 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1962,7 +1962,7 @@ storing/selecting from homogeneous index DataFrames. store.select('df_mi') # the levels are automatically included as data columns - store.select('df_mi', Term('foo=bar')) + store.select('df_mi', 'foo=bar') .. _io.hdf5-query: @@ -1970,49 +1970,102 @@ storing/selecting from homogeneous index DataFrames. Querying a Table ~~~~~~~~~~~~~~~~ +.. warning:: + + This query capabilities have changed substantially starting in ``0.13.0``. + Queries from prior version are accepted (with a ``DeprecationWarning``) printed + if its not string-like. + ``select`` and ``delete`` operations have an optional criterion that can be specified to select/delete only a subset of the data. This allows one to have a very large on-disk table and retrieve only a portion of the data. -A query is specified using the ``Term`` class under the hood. +A query is specified using the ``Term`` class under the hood, as a boolean expression. - - 'index' and 'columns' are supported indexers of a DataFrame - - 'major_axis', 'minor_axis', and 'items' are supported indexers of + - ``index`` and ``columns`` are supported indexers of a DataFrame + - ``major_axis``, ``minor_axis``, and ``items`` are supported indexers of the Panel + - if ``data_columns`` are specified, these can be used as additional indexers + +Valid comparison operators are: + + - ``=, ==, !=, >, >=, <, <=`` + +Valid boolean expressions are combined with: + + - ``|`` : or + - ``&`` : and + - ``(`` and ``)`` : for grouping + +These rules are similar to how boolean expressions are used in pandas for indexing. + +.. note:: + + - ``=`` will be automatically expanded to the comparison operator ``==`` + - ``~`` is the not operator, but can only be used in very limited + circumstances + - If a list/tuple of expressions is passed they will be combined via ``&`` + +The following are valid expressions: + + - ``'index>=date'`` + - ``"columns=['A', 'D']"`` + - ``"columns in ['A', 'D']"`` + - ``'columns=A'`` + - ``'columns==A'`` + - ``"~(columns=['A','B'])"`` + - ``'index>df.index[3] & string="bar"'`` + - ``'(index>df.index[3] & index<=df.index[6]) | string="bar"'`` + - ``"ts>=Timestamp('2012-02-01')"`` + - ``"major_axis>=20130101"`` + +The ``indexers`` are on the left-hand side of the sub-expression: -Valid terms can be created from ``dict, list, tuple, or -string``. Objects can be embeded as values. Allowed operations are: ``<, -<=, >, >=, =, !=``. ``=`` will be inferred as an implicit set operation -(e.g. if 2 or more values are provided). The following are all valid -terms. + - ``columns``, ``major_axis``, ``ts`` - - ``dict(field = 'index', op = '>', value = '20121114')`` - - ``('index', '>', '20121114')`` - - ``'index > 20121114'`` - - ``('index', '>', datetime(2012, 11, 14))`` - - ``('index', ['20121114', '20121115'])`` - - ``('major_axis', '=', Timestamp('2012/11/14'))`` - - ``('minor_axis', ['A', 'B'])`` +The right-hand side of the sub-expression (after a comparsion operator) can be: -Queries are built up using a list of ``Terms`` (currently only -**anding** of terms is supported). An example query for a panel might be -specified as follows. ``['major_axis>20000102', ('minor_axis', '=', -['A', 'B']) ]``. This is roughly translated to: `major_axis must be -greater than the date 20000102 and the minor_axis must be A or B` + - functions that will be evaluated, e.g. ``Timestamp('2012-02-01')`` + - strings, e.g. ``"bar"`` + - date-like, e.g. ``20130101``, or ``"20130101"`` + - lists, e.g. ``"['A','B']"`` + - variables that are defined in the local names space, e.g. ``date`` + +Here are some examples: + +.. ipython:: python + + dfq = DataFrame(randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10)) + store.append('dfq',dfq,format='table',data_columns=True) + +Use boolean expressions, with in-line function evaluation. + +.. ipython:: python + + store.select('dfq',"index>Timestamp('20130104') & columns=['A', 'B']") + +Use and inline column reference + +.. ipython:: python + + store.select('dfq',where="A>0 or C>0") + +Works with a Panel as well. .. ipython:: python store.append('wp',wp) store - store.select('wp', [ Term('major_axis>20000102'), Term('minor_axis', '=', ['A', 'B']) ]) + store.select('wp', "major_axis>Timestamp('20000102') & minor_axis=['A', 'B']") -The ``columns`` keyword can be supplied to select a list of columns to be returned, -this is equivalent to passing a ``Term('columns', list_of_columns_to_filter)``: +The ``columns`` keyword can be supplied to select a list of columns to be +returned, this is equivalent to passing a +``'columns=list_of_columns_to_filter'``: .. ipython:: python - store.select('df', columns=['A', 'B']) + store.select('df', "columns=['A', 'B']") ``start`` and ``stop`` parameters can be specified to limit the total search space. These are in terms of the total number of rows in a table. @@ -2023,10 +2076,18 @@ space. These are in terms of the total number of rows in a table. wp.to_frame() # limiting the search - store.select('wp',[ Term('major_axis>20000102'), - Term('minor_axis', '=', ['A','B']) ], + store.select('wp',"major_axis>20000102 & minor_axis=['A','B']", start=0, stop=10) +.. note:: + + ``select`` will raise a ``ValueError`` if the query expression has an unknown + variable reference. Usually this means that you are trying to select on a column + that is **not** a data_column. + + ``select`` will raise a ``SyntaxError`` if the query expression is not valid. + + .. _io.hdf5-timedelta: **Using timedelta64[ns]** @@ -2048,7 +2109,7 @@ specified in the format: ``<float>(<unit>)``, where float may be signed (and fra dftd['C'] = dftd['A']-dftd['B'] dftd store.append('dftd',dftd,data_columns=True) - store.select('dftd',Term("C","<","-3.5D")) + store.select('dftd',"C<'-3.5D'") Indexing ~~~~~~~~ @@ -2057,10 +2118,13 @@ You can create/modify an index for a table with ``create_table_index`` after data is already in the table (after and ``append/put`` operation). Creating a table index is **highly** encouraged. This will speed your queries a great deal when you use a ``select`` with the -indexed dimension as the ``where``. **Indexes are automagically created -(starting 0.10.1)** on the indexables and any data columns you -specify. This behavior can be turned off by passing ``index=False`` to -``append``. +indexed dimension as the ``where``. + +.. note:: + + Indexes are automagically created (starting ``0.10.1``) on the indexables + and any data columns you specify. This behavior can be turned off by passing + ``index=False`` to ``append``. .. ipython:: python @@ -2117,7 +2181,7 @@ create a new table!) Iterator ~~~~~~~~ -Starting in 0.11, you can pass, ``iterator=True`` or ``chunksize=number_in_a_chunk`` +Starting in ``0.11.0``, you can pass, ``iterator=True`` or ``chunksize=number_in_a_chunk`` to ``select`` and ``select_as_multiple`` to return an iterator on the results. The default is 50,000 rows returned in a chunk. @@ -2151,7 +2215,7 @@ Advanced Queries To retrieve a single indexable or data column, use the method ``select_column``. This will, for example, enable you to get the index very quickly. These return a ``Series`` of the result, indexed by the row number. -These do not currently accept the ``where`` selector (coming soon) +These do not currently accept the ``where`` selector. .. ipython:: python diff --git a/doc/source/release.rst b/doc/source/release.rst index 0ed1f39d72cb5..b8a817a00403c 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -294,7 +294,15 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` Experimental Features ~~~~~~~~~~~~~~~~~~~~~ -.. _release:bug_fixes-0.13.0: +- The new :func:`~pandas.eval` function implements expression evaluation using + ``numexpr`` behind the scenes. This results in large speedups for complicated + expressions involving large DataFrames/Series. +- :class:`~pandas.DataFrame` has a new :meth:`~pandas.DataFrame.eval` that + evaluates an expression in the context of the ``DataFrame``. +- A :meth:`~pandas.DataFrame.query` method has been added that allows + you to select elements of a ``DataFrame`` using a natural query syntax nearly + identical to Python syntax. + Bug Fixes ~~~~~~~~~ diff --git a/doc/source/v0.10.0.txt b/doc/source/v0.10.0.txt index d0c0ecc148239..0c86add1225ad 100644 --- a/doc/source/v0.10.0.txt +++ b/doc/source/v0.10.0.txt @@ -262,7 +262,7 @@ Updated PyTables Support [ Term('major_axis>20000102'), Term('minor_axis', '=', ['A','B']) ]) # removing data from tables - store.remove('wp', [ 'major_axis', '>', wp.major_axis[3] ]) + store.remove('wp', Term('major_axis>20000103')) store.select('wp') # deleting a store diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt index c56af23e85eae..694281b813c3b 100644 --- a/doc/source/v0.13.0.txt +++ b/doc/source/v0.13.0.txt @@ -187,6 +187,96 @@ Indexing API Changes p p.loc[:,:,'C'] +HDFStore API Changes +~~~~~~~~~~~~~~~~~~~~ + + - Query Format Changes. A much more string-like query format is now supported. + + .. ipython:: python + + path = 'test_query.h5' + dfq = DataFrame(randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10)) + dfq.to_hdf(path,'dfq',format='table',data_columns=True) + + Use boolean expressions, with in-line function evaluation. + + .. ipython:: python + + read_hdf(path,'dfq',where="index>Timestamp('20130104') & columns=['A', 'B']") + + Use an inline column reference + + .. ipython:: python + + read_hdf(path,'dfq',where="A>0 or C>0") + + See :ref:`the docs<io.hdf5-query>`. + + - Significant table writing performance improvements + - handle a passed ``Series`` in table format (:issue:`4330`) + - added an ``is_open`` property to indicate if the underlying file handle is_open; + a closed store will now report 'CLOSED' when viewing the store (rather than raising an error) + (:issue:`4409`) + - a close of a ``HDFStore`` now will close that instance of the ``HDFStore`` + but will only close the actual file if the ref count (by ``PyTables``) w.r.t. all of the open handles + are 0. Essentially you have a local instance of ``HDFStore`` referenced by a variable. Once you + close it, it will report closed. Other references (to the same file) will continue to operate + until they themselves are closed. Performing an action on a closed file will raise + ``ClosedFileError`` + + .. ipython:: python + + path = 'test.h5' + df = DataFrame(randn(10,2)) + store1 = HDFStore(path) + store2 = HDFStore(path) + store1.append('df',df) + store2.append('df2',df) + + store1 + store2 + store1.close() + store2 + store2.close() + store2 + + .. ipython:: python + :suppress: + + import os + os.remove(path) + + - removed the ``_quiet`` attribute, replace by a ``DuplicateWarning`` if retrieving + duplicate rows from a table (:issue:`4367`) + - removed the ``warn`` argument from ``open``. Instead a ``PossibleDataLossError`` exception will + be raised if you try to use ``mode='w'`` with an OPEN file handle (:issue:`4367`) + - allow a passed locations array or mask as a ``where`` condition (:issue:`4467`). + See :ref:`here<io.hdf5-where_mask>` for an example. + + - the ``format`` keyword now replaces the ``table`` keyword; allowed values are ``fixed(f)`` or ``table(t)`` + the same defaults as prior < 0.13.0 remain, e.g. ``put`` implies 'fixed` or 'f' (Fixed) format + and ``append`` imples 'table' or 't' (Table) format + + .. ipython:: python + + path = 'test.h5' + df = DataFrame(randn(10,2)) + df.to_hdf(path,'df_table',format='table') + df.to_hdf(path,'df_table2',append=True) + df.to_hdf(path,'df_fixed') + with get_store(path) as store: + print store + + .. ipython:: python + :suppress: + + import os + os.remove('test.h5') + os.remove('test_query.h5') + - add the keyword ``dropna=True`` to ``append`` to change whether ALL nan rows are not written + to the store (default is ``True``, ALL nan rows are NOT written), also settable + via the option ``io.hdf.dropna_table`` (:issue:`4625`) + Enhancements ~~~~~~~~~~~~ @@ -271,6 +361,90 @@ Enhancements is evaluated, respecttively. See scipy docs. - DataFrame constructor now accepts a numpy masked record array (:issue:`3478`) + +.. _whatsnew_0130.experimental: + +Experimental +~~~~~~~~~~~~ + +- :func:`~pandas.eval`: + + - The new :func:`~pandas.eval` function implements expression evaluation using + ``numexpr`` behind the scenes. This results in large speedups for + complicated expressions involving large DataFrames/Series. For example, + + .. ipython:: python + + nrows, ncols = 20000, 100 + df1, df2, df3, df4 = [DataFrame(randn(nrows, ncols)) + for _ in xrange(4)] + + .. ipython:: python + + %timeit pd.eval('df1 + df2 + df3 + df4') + + For more details, see the :ref:`enhancing performance documentation on eval + <enhancingperf.eval>` + +- :meth:`~pandas.DataFrame.eval` + + - Similar to :func:`~pandas.eval`, :class:`~pandas.DataFrame` has a new + :meth:`~pandas.DataFrame.eval` that evaluates an expression in the context + of the ``DataFrame``. For example, + + .. ipython:: python + :suppress: + + try: + del a + except NameError: + pass + + try: + del b + except NameError: + pass + + .. ipython:: python + + df = DataFrame(randn(10, 2), columns=['a', 'b']) + df.eval('a + b') + + +- :meth:`~pandas.DataFrame.query` + + - In 0.13 a :meth:`~pandas.DataFrame.query` method has been added that allows + you to select elements of a ``DataFrame`` using a natural query syntax + nearly identical to Python syntax. For example, + + .. ipython:: python + :suppress: + + try: + del a + except NameError: + pass + + try: + del b + except NameError: + pass + + try: + del c + except NameError: + pass + + .. ipython:: python + + n = 20 + df = DataFrame(randint(n, size=(n, 3)), columns=['a', 'b', 'c']) + df.query('a < b < c') + + selects all the rows of ``df`` where ``a < b < c`` evaluates to ``True``. + For more details see the :ref:`indexing documentation on query + <indexing.query>`. + .. _whatsnew_0130.refactoring: Internal Refactoring diff --git a/pandas/__init__.py b/pandas/__init__.py index 03681d3fa5a3f..c4c012d6c5095 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -42,6 +42,7 @@ from pandas.stats.api import * from pandas.tseries.api import * from pandas.io.api import * +from pandas.computation.api import * from pandas.util.testing import debug diff --git a/pandas/compat/__init__.py b/pandas/compat/__init__.py index 12c929cd59820..10e1464739203 100644 --- a/pandas/compat/__init__.py +++ b/pandas/compat/__init__.py @@ -46,11 +46,13 @@ from StringIO import StringIO BytesIO = StringIO import cPickle + import httplib except ImportError: import builtins from io import StringIO, BytesIO cStringIO = StringIO import pickle as cPickle + import http.client as httplib if PY3: diff --git a/pandas/computation/__init__.py b/pandas/computation/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/computation/align.py b/pandas/computation/align.py new file mode 100644 index 0000000000000..60975bdc8a5b4 --- /dev/null +++ b/pandas/computation/align.py @@ -0,0 +1,247 @@ +"""Core eval alignment algorithms +""" + +import warnings +from functools import partial, wraps +from pandas.compat import zip, range + +import numpy as np + +import pandas as pd +from pandas import compat +import pandas.core.common as com + + +def _align_core_single_unary_op(term): + if isinstance(term.value, np.ndarray): + typ = partial(np.asanyarray, dtype=term.value.dtype) + else: + typ = type(term.value) + ret = typ, + + if not hasattr(term.value, 'axes'): + ret += None, + else: + ret += _zip_axes_from_type(typ, term.value.axes), + return ret + + +def _zip_axes_from_type(typ, new_axes): + axes = {} + for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES): + axes[ax_name] = new_axes[ax_ind] + return axes + + +def _maybe_promote_shape(values, naxes): + # test to see if we have an array else leave since must be a number + if not isinstance(values, np.ndarray): + return values + + ndims = values.ndim + if ndims > naxes: + raise AssertionError('cannot have more dims than axes, ' + '{0} > {1}'.format(ndims, naxes)) + if ndims == naxes: + return values + + ndim, nax = range(ndims), range(naxes) + + axes_slice = [slice(None)] * naxes + + # set difference of numaxes and ndims + slices = list(set(nax) - set(ndim)) + + if ndims == naxes: + if slices: + raise AssertionError('slices should be empty if ndims == naxes ' + '{0}'.format(slices)) + else: + if not slices: + raise AssertionError('slices should NOT be empty if ndim != naxes ' + '{0}'.format(slices)) + + for sl in slices: + axes_slice[sl] = np.newaxis + + return values[tuple(axes_slice)] + + +def _any_pandas_objects(terms): + """Check a sequence of terms for instances of PandasObject.""" + return any(isinstance(term.value, pd.core.generic.PandasObject) + for term in terms) + + +def _filter_special_cases(f): + @wraps(f) + def wrapper(terms): + # single unary operand + if len(terms) == 1: + return _align_core_single_unary_op(terms[0]) + + term_values = (term.value for term in terms) + # only scalars or indexes + if all(isinstance(term.value, pd.Index) or term.isscalar for term in + terms): + return np.result_type(*term_values), None + + # single element ndarrays + all_has_size = all(hasattr(term.value, 'size') for term in terms) + if all_has_size and all(term.value.size == 1 for term in terms): + return np.result_type(*term_values), None + + # no pandas objects + if not _any_pandas_objects(terms): + return np.result_type(*term_values), None + + return f(terms) + return wrapper + + +@_filter_special_cases +def _align_core(terms): + term_index = [i for i, term in enumerate(terms) if hasattr(term.value, + 'axes')] + term_dims = [terms[i].value.ndim for i in term_index] + ndims = pd.Series(dict(zip(term_index, term_dims))) + + # initial axes are the axes of the largest-axis'd term + biggest = terms[ndims.idxmax()].value + typ = biggest._constructor + axes = biggest.axes + naxes = len(axes) + + for term in (terms[i] for i in term_index): + for axis, items in enumerate(term.value.axes): + if isinstance(term.value, pd.Series) and naxes > 1: + ax, itm = naxes - 1, term.value.index + else: + ax, itm = axis, items + axes[ax] = axes[ax].join(itm, how='outer') + + for i, ndim in compat.iteritems(ndims): + for axis, items in zip(range(ndim), axes): + ti = terms[i].value + + if hasattr(ti, 'reindex_axis'): + transpose = isinstance(ti, pd.Series) and naxes > 1 + reindexer = axes[naxes - 1] if transpose else items + + term_axis_size = len(ti.axes[axis]) + reindexer_size = len(reindexer) + + ordm = np.log10(abs(reindexer_size - term_axis_size)) + if ordm >= 1 and reindexer_size >= 10000: + warnings.warn("Alignment difference on axis {0} is larger" + " than an order of magnitude on term {1!r}, " + "by more than {2:.4g}; performance may suffer" + "".format(axis, term.name, ordm), + category=pd.io.common.PerformanceWarning) + + if transpose: + f = partial(ti.reindex, index=reindexer, copy=False) + else: + f = partial(ti.reindex_axis, reindexer, axis=axis, + copy=False) + + if pd.lib.is_bool_array(ti.values): + r = f(fill_value=True) + else: + r = f() + + terms[i].update(r) + + res = _maybe_promote_shape(terms[i].value.T if transpose else + terms[i].value, naxes) + res = res.T if transpose else res + + try: + v = res.values + except AttributeError: + v = res + terms[i].update(v) + + return typ, _zip_axes_from_type(typ, axes) + + +def _filter_terms(flat): + # numeric literals + literals = frozenset(filter(lambda x: isinstance(x, Constant), flat)) + + # these are strings which are variable names + names = frozenset(flat) - literals + + # literals are not names and names are not literals, so intersection should + # be empty + if literals & names: + raise ValueError('literals cannot be names and names cannot be ' + 'literals') + return names, literals + + +def _align(terms): + """Align a set of terms""" + try: + # flatten the parse tree (a nested list, really) + terms = list(com.flatten(terms)) + except TypeError: + # can't iterate so it must just be a constant or single variable + if isinstance(terms.value, pd.core.generic.NDFrame): + typ = type(terms.value) + return typ, _zip_axes_from_type(typ, terms.value.axes) + return np.result_type(terms.type), None + + # if all resolved variables are numeric scalars + if all(term.isscalar for term in terms): + return np.result_type(*(term.value for term in terms)).type, None + + # perform the main alignment + typ, axes = _align_core(terms) + return typ, axes + + +def _reconstruct_object(typ, obj, axes, dtype): + """Reconstruct an object given its type, raw value, and possibly empty + (None) axes. + + Parameters + ---------- + typ : object + A type + obj : object + The value to use in the type constructor + axes : dict + The axes to use to construct the resulting pandas object + + Returns + ------- + ret : typ + An object of type ``typ`` with the value `obj` and possible axes + `axes`. + """ + try: + typ = typ.type + except AttributeError: + pass + + try: + res_t = np.result_type(obj.dtype, dtype) + except AttributeError: + res_t = dtype + + if (not isinstance(typ, partial) and + issubclass(typ, pd.core.generic.PandasObject)): + return typ(obj, dtype=res_t, **axes) + + # special case for pathological things like ~True/~False + if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_: + ret_value = res_t.type(obj) + else: + ret_value = typ(obj).astype(res_t) + + try: + ret = ret_value.item() + except ValueError: + ret = ret_value + return ret diff --git a/pandas/computation/api.py b/pandas/computation/api.py new file mode 100644 index 0000000000000..db8269a497768 --- /dev/null +++ b/pandas/computation/api.py @@ -0,0 +1,2 @@ +from pandas.computation.eval import eval +from pandas.computation.expr import Expr diff --git a/pandas/computation/common.py b/pandas/computation/common.py new file mode 100644 index 0000000000000..9af2197a4fd69 --- /dev/null +++ b/pandas/computation/common.py @@ -0,0 +1,13 @@ +import numpy as np +import pandas as pd + + +def _ensure_decoded(s): + """ if we have bytes, decode them to unicode """ + if isinstance(s, (np.bytes_, bytes)): + s = s.decode(pd.get_option('display.encoding')) + return s + + +class NameResolutionError(NameError): + pass diff --git a/pandas/computation/engines.py b/pandas/computation/engines.py new file mode 100644 index 0000000000000..88efc9eeab5d5 --- /dev/null +++ b/pandas/computation/engines.py @@ -0,0 +1,125 @@ +"""Engine classes for :func:`~pandas.eval` +""" + +import abc + +from pandas import compat +from pandas.core import common as com +from pandas.computation.align import _align, _reconstruct_object +from pandas.computation.ops import UndefinedVariableError + + +class AbstractEngine(object): + """Object serving as a base class for all engines.""" + + __metaclass__ = abc.ABCMeta + + has_neg_frac = False + + def __init__(self, expr): + self.expr = expr + self.aligned_axes = None + self.result_type = None + + def convert(self): + """Convert an expression for evaluation. + + Defaults to return the expression as a string. + """ + return com.pprint_thing(self.expr) + + def pre_evaluate(self): + self.expr.check_name_clashes() + + def evaluate(self): + """Run the engine on the expression + + This method performs alignment which is necessary no matter what engine + is being used, thus its implementation is in the base class. + + Returns + ------- + obj : object + The result of the passed expression. + """ + if not self._is_aligned: + self.result_type, self.aligned_axes = _align(self.expr.terms) + + # make sure no names in resolvers and locals/globals clash + self.pre_evaluate() + res = self._evaluate() + return _reconstruct_object(self.result_type, res, self.aligned_axes, + self.expr.terms.return_type) + + @property + def _is_aligned(self): + return self.aligned_axes is not None and self.result_type is not None + + @abc.abstractmethod + def _evaluate(self): + """Return an evaluated expression. + + Parameters + ---------- + env : Scope + The local and global environment in which to evaluate an + expression. + + Notes + ----- + Must be implemented by subclasses. + """ + pass + + +class NumExprEngine(AbstractEngine): + """NumExpr engine class""" + has_neg_frac = True + + def __init__(self, expr): + super(NumExprEngine, self).__init__(expr) + + def convert(self): + return str(super(NumExprEngine, self).convert()) + + def _evaluate(self): + import numexpr as ne + + # add the resolvers to locals + self.expr.add_resolvers_to_locals() + + # convert the expression to a valid numexpr expression + s = self.convert() + + try: + return ne.evaluate(s, local_dict=self.expr.env.locals, + global_dict=self.expr.env.globals, + truediv=self.expr.truediv) + except KeyError as e: + # python 3 compat kludge + try: + msg = e.message + except AttributeError: + msg = compat.text_type(e) + raise UndefinedVariableError(msg) + + +class PythonEngine(AbstractEngine): + """Evaluate an expression in Python space. + + Mostly for testing purposes. + """ + has_neg_frac = False + + def __init__(self, expr): + super(PythonEngine, self).__init__(expr) + + def evaluate(self): + self.pre_evaluate() + return self.expr() + + def _evaluate(self): + pass + + +_engines = {'numexpr': NumExprEngine, 'python': PythonEngine} diff --git a/pandas/computation/eval.py b/pandas/computation/eval.py new file mode 100644 index 0000000000000..36b1e2bc96090 --- /dev/null +++ b/pandas/computation/eval.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python + +"""Top level ``eval`` module. +""" + +import numbers +import numpy as np + +from pandas.core import common as com +from pandas.compat import string_types +from pandas.computation.expr import Expr, _parsers, _ensure_scope +from pandas.computation.engines import _engines + + +def _check_engine(engine): + """Make sure a valid engine is passed. + + Parameters + ---------- + engine : str + + Raises + ------ + KeyError + * If an invalid engine is passed + ImportError + * If numexpr was requested but doesn't exist + """ + if engine not in _engines: + raise KeyError('Invalid engine {0!r} passed, valid engines are' + ' {1}'.format(engine, list(_engines.keys()))) + + # TODO: validate this in a more general way (thinking of future engines + # that won't necessarily be import-able) + # Could potentially be done on engine instantiation + if engine == 'numexpr': + try: + import numexpr + except ImportError: + raise ImportError("'numexpr' not found. Cannot use " + "engine='numexpr' if 'numexpr' is not installed") + + +def _check_parser(parser): + """Make sure a valid parser is passed. + + Parameters + ---------- + parser : str + + Raises + ------ + KeyError + * If an invalid parser is passed + """ + if parser not in _parsers: + raise KeyError('Invalid parser {0!r} passed, valid parsers are' + ' {1}'.format(parser, _parsers.keys())) + + +def _check_resolvers(resolvers): + if resolvers is not None: + for resolver in resolvers: + if not hasattr(resolver, '__getitem__'): + name = type(resolver).__name__ + raise AttributeError('Resolver of type {0!r} must implement ' + 'the __getitem__ method'.format(name)) + + +def _check_expression(expr): + """Make sure an expression is not an empty string + + Parameters + ---------- + expr : object + An object that can be converted to a string + + Raises + ------ + ValueError + * If expr is an empty string + """ + if not expr: + raise ValueError("expr cannot be an empty string") + + +def _convert_expression(expr): + """Convert an object to an expression. + + Thus function converts an object to an expression (a unicode string) and + checks to make sure it isn't empty after conversion. This is used to + convert operators to their string representation for recursive calls to + :func:`~pandas.eval`. + + Parameters + ---------- + expr : object + The object to be converted to a string. + + Returns + ------- + s : unicode + The string representation of an object. + + Raises + ------ + ValueError + * If the expression is empty. + """ + s = com.pprint_thing(expr) + _check_expression(s) + return s + + +def eval(expr, parser='pandas', engine='numexpr', truediv=True, + local_dict=None, global_dict=None, resolvers=None, level=2): + """Evaluate a Python expression as a string using various backends. + + The following arithmetic operations are supported: ``+``, ``-``, ``*``, + ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following + boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not). + Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`, + :keyword:`or`, and :keyword:`not` with the same semantics as the + corresponding bitwise operators. :class:`~pandas.Series` and + :class:`~pandas.DataFrame` objects are supported and behave as they would + with plain ol' Python evaluation. + + Parameters + ---------- + expr : str or unicode + The expression to evaluate. This string cannot contain any Python + `statements + <http://docs.python.org/2/reference/simple_stmts.html#simple-statements>`__, + only Python `expressions + <http://docs.python.org/2/reference/simple_stmts.html#expression-statements>`__. + parser : string, default 'pandas', {'pandas', 'python'} + The parser to use to construct the syntax tree from the expression. The + default of ``'pandas'`` parses code slightly different than standard + Python. Alternatively, you can parse an expression using the + ``'python'`` parser to retain strict Python semantics. See the + :ref:`enhancing performance <enhancingperf.eval>` documentation for + more details. + engine : string, default 'numexpr', {'python', 'numexpr'} + + The engine used to evaluate the expression. Supported engines are + + - ``'numexpr'``: This default engine evaluates pandas objects using + numexpr for large speed ups in complex expressions + with large frames. + - ``'python'``: Performs operations as if you had ``eval``'d in top + level python. This engine is generally not that useful. + + More backends may be available in the future. + + truediv : bool, optional + Whether to use true division, like in Python >= 3 + local_dict : dict or None, optional + A dictionary of local variables, taken from locals() by default. + global_dict : dict or None, optional + A dictionary of global variables, taken from globals() by default. + resolvers : list of dict-like or None, optional + A list of objects implementing the ``__getitem__`` special method that + you can use to inject an additional collection of namespaces to use for + variable lookup. For example, this is used in the + :meth:`~pandas.DataFrame.query` method to inject the + :attr:`~pandas.DataFrame.index` and :attr:`~pandas.DataFrame.columns` + variables that refer to their respective :class:`~pandas.DataFrame` + instance attributes. + level : int, optional + The number of prior stack frames to traverse and add to the current + scope. Most users will **not** need to change this parameter. + + Returns + ------- + ndarray, numeric scalar, DataFrame, Series + + Notes + ----- + The ``dtype`` of any objects involved in an arithmetic ``%`` operation are + recursively cast to ``float64``. + + See the :ref:`enhancing performance <enhancingperf.eval>` documentation for + more details. + + See Also + -------- + pandas.DataFrame.query + pandas.DataFrame.eval + """ + expr = _convert_expression(expr) + _check_engine(engine) + _check_parser(parser) + _check_resolvers(resolvers) + + # get our (possibly passed-in) scope + env = _ensure_scope(global_dict=global_dict, local_dict=local_dict, + resolvers=resolvers, level=level) + + parsed_expr = Expr(expr, engine=engine, parser=parser, env=env, + truediv=truediv) + + # construct the engine and evaluate the parsed expression + eng = _engines[engine] + eng_inst = eng(parsed_expr) + ret = eng_inst.evaluate() + return ret diff --git a/pandas/computation/expr.py b/pandas/computation/expr.py new file mode 100644 index 0000000000000..ff9adc26b8201 --- /dev/null +++ b/pandas/computation/expr.py @@ -0,0 +1,763 @@ +""":func:`~pandas.eval` parsers +""" + +import ast +import operator +import sys +import inspect +import tokenize +import datetime +import struct + +from functools import partial + +import pandas as pd +from pandas import compat +from pandas.compat import StringIO, zip, reduce, string_types +from pandas.core.base import StringMixin +from pandas.core import common as com +from pandas.computation.common import NameResolutionError +from pandas.computation.ops import (_cmp_ops_syms, _bool_ops_syms, + _arith_ops_syms, _unary_ops_syms, is_term) +from pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG +from pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div + + +def _ensure_scope(level=2, global_dict=None, local_dict=None, resolvers=None, + **kwargs): + """Ensure that we are grabbing the correct scope.""" + return Scope(gbls=global_dict, lcls=local_dict, level=level, + resolvers=resolvers) + + +def _check_disjoint_resolver_names(resolver_keys, local_keys, global_keys): + """Make sure that variables in resolvers don't overlap with locals or + globals. + """ + res_locals = list(com.intersection(resolver_keys, local_keys)) + if res_locals: + msg = "resolvers and locals overlap on names {0}".format(res_locals) + raise NameResolutionError(msg) + + res_globals = list(com.intersection(resolver_keys, global_keys)) + if res_globals: + msg = "resolvers and globals overlap on names {0}".format(res_globals) + raise NameResolutionError(msg) + + +def _replacer(x, pad_size): + """Replace a number with its padded hexadecimal representation. Used to tag + temporary variables with their calling scope's id. + """ + # get the hex repr of the binary char and remove 0x and pad by pad_size + # zeros + try: + hexin = ord(x) + except TypeError: + # bytes literals masquerade as ints when iterating in py3 + hexin = x + + return hex(hexin).replace('0x', '').rjust(pad_size, '0') + + +def _raw_hex_id(obj, pad_size=2): + """Return the padded hexadecimal id of ``obj``.""" + # interpret as a pointer since that's what really what id returns + packed = struct.pack('@P', id(obj)) + + return ''.join(_replacer(x, pad_size) for x in packed) + + +class Scope(StringMixin): + """Object to hold scope, with a few bells to deal with some custom syntax + added by pandas. + + Parameters + ---------- + gbls : dict or None, optional, default None + lcls : dict or Scope or None, optional, default None + level : int, optional, default 1 + resolvers : list-like or None, optional, default None + + Attributes + ---------- + globals : dict + locals : dict + level : int + resolvers : tuple + resolver_keys : frozenset + """ + __slots__ = ('globals', 'locals', 'resolvers', '_global_resolvers', + 'resolver_keys', '_resolver', 'level', 'ntemps') + + def __init__(self, gbls=None, lcls=None, level=1, resolvers=None): + self.level = level + self.resolvers = tuple(resolvers or []) + self.globals = dict() + self.locals = dict() + self.ntemps = 1 # number of temporary variables in this scope + + if isinstance(lcls, Scope): + ld, lcls = lcls, dict() + self.locals.update(ld.locals.copy()) + self.globals.update(ld.globals.copy()) + self.resolvers += ld.resolvers + self.update(ld.level) + + frame = sys._getframe(level) + try: + self.globals.update(gbls or frame.f_globals) + self.locals.update(lcls or frame.f_locals) + finally: + del frame + + # add some useful defaults + self.globals['Timestamp'] = pd.lib.Timestamp + self.globals['datetime'] = datetime + + # SUCH a hack + self.globals['True'] = True + self.globals['False'] = False + + res_keys = (list(o.keys()) for o in self.resolvers) + self.resolver_keys = frozenset(reduce(operator.add, res_keys, [])) + self._global_resolvers = self.resolvers + (self.locals, self.globals) + self._resolver = None + + self.resolver_dict = {} + for o in self.resolvers: + self.resolver_dict.update(dict(o)) + + def __unicode__(self): + return com.pprint_thing("locals: {0}\nglobals: {0}\nresolvers: " + "{0}".format(list(self.locals.keys()), + list(self.globals.keys()), + list(self.resolver_keys))) + + def __getitem__(self, key): + return self.resolve(key, globally=False) + + def resolve(self, key, globally=False): + resolvers = self.locals, self.globals + if globally: + resolvers = self._global_resolvers + + for resolver in resolvers: + try: + return resolver[key] + except KeyError: + pass + + def update(self, level=None): + """Update the current scope by going back `level` levels. + + Parameters + ---------- + level : int or None, optional, default None + """ + # we are always 2 levels below the caller + # plus the caller may be below the env level + # in which case we need addtl levels + sl = 2 + if level is not None: + sl += level + + # add sl frames to the scope starting with the + # most distant and overwritting with more current + # makes sure that we can capture variable scope + frame = inspect.currentframe() + try: + frames = [] + while sl >= 0: + frame = frame.f_back + sl -= 1 + frames.append(frame) + for f in frames[::-1]: + self.locals.update(f.f_locals) + self.globals.update(f.f_globals) + finally: + del frame, frames + + def add_tmp(self, value, where='locals'): + """Add a temporary variable to the scope. + + Parameters + ---------- + value : object + An arbitrary object to be assigned to a temporary variable. + where : basestring, optional, default 'locals', {'locals', 'globals'} + What scope to add the value to. + + Returns + ------- + name : basestring + The name of the temporary variable created. + """ + d = getattr(self, where, None) + + if d is None: + raise AttributeError("Cannot add value to non-existent scope " + "{0!r}".format(where)) + if not isinstance(d, dict): + raise TypeError("Cannot add value to object of type {0!r}, " + "scope must be a dictionary" + "".format(type(d).__name__)) + name = 'tmp_var_{0}_{1}_{2}'.format(type(value).__name__, self.ntemps, + _raw_hex_id(self)) + d[name] = value + + # only increment if the variable gets put in the scope + self.ntemps += 1 + return name + + def remove_tmp(self, name, where='locals'): + d = getattr(self, where, None) + if d is None: + raise AttributeError("Cannot remove value from non-existent scope " + "{0!r}".format(where)) + if not isinstance(d, dict): + raise TypeError("Cannot remove value from object of type {0!r}, " + "scope must be a dictionary" + "".format(type(d).__name__)) + del d[name] + self.ntemps -= 1 + + +def _rewrite_assign(source): + """Rewrite the assignment operator for PyTables expression that want to use + ``=`` as a substitute for ``==``. + """ + res = [] + g = tokenize.generate_tokens(StringIO(source).readline) + for toknum, tokval, _, _, _ in g: + res.append((toknum, '==' if tokval == '=' else tokval)) + return tokenize.untokenize(res) + + +def _replace_booleans(source): + """Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise + precedence is changed to boolean precedence. + """ + return source.replace('|', ' or ').replace('&', ' and ') + + +def _replace_locals(source, local_symbol='@'): + """Replace local variables with a syntacticall valid name.""" + return source.replace(local_symbol, _LOCAL_TAG) + + +def _preparse(source): + """Compose assignment and boolean replacement.""" + return _replace_booleans(_rewrite_assign(source)) + + +def _is_type(t): + """Factory for a type checking function of type ``t`` or tuple of types.""" + return lambda x: isinstance(x.value, t) + + +_is_list = _is_type(list) +_is_str = _is_type(string_types) + + +# partition all AST nodes +_all_nodes = frozenset(filter(lambda x: isinstance(x, type) and + issubclass(x, ast.AST), + (getattr(ast, node) for node in dir(ast)))) + + +def _filter_nodes(superclass, all_nodes=_all_nodes): + """Filter out AST nodes that are subclasses of ``superclass``.""" + node_names = (node.__name__ for node in all_nodes + if issubclass(node, superclass)) + return frozenset(node_names) + + +_all_node_names = frozenset(map(lambda x: x.__name__, _all_nodes)) +_mod_nodes = _filter_nodes(ast.mod) +_stmt_nodes = _filter_nodes(ast.stmt) +_expr_nodes = _filter_nodes(ast.expr) +_expr_context_nodes = _filter_nodes(ast.expr_context) +_slice_nodes = _filter_nodes(ast.slice) +_boolop_nodes = _filter_nodes(ast.boolop) +_operator_nodes = _filter_nodes(ast.operator) +_unary_op_nodes = _filter_nodes(ast.unaryop) +_cmp_op_nodes = _filter_nodes(ast.cmpop) +_comprehension_nodes = _filter_nodes(ast.comprehension) +_handler_nodes = _filter_nodes(ast.excepthandler) +_arguments_nodes = _filter_nodes(ast.arguments) +_keyword_nodes = _filter_nodes(ast.keyword) +_alias_nodes = _filter_nodes(ast.alias) + + +# nodes that we don't support directly but are needed for parsing +_hacked_nodes = frozenset(['Assign', 'Module', 'Expr']) + + +_unsupported_expr_nodes = frozenset(['Yield', 'GeneratorExp', 'IfExp', + 'DictComp', 'SetComp', 'Repr', 'Lambda', + 'Set', 'AST', 'Is', 'IsNot']) + +# these nodes are low priority or won't ever be supported (e.g., AST) +_unsupported_nodes = ((_stmt_nodes | _mod_nodes | _handler_nodes | + _arguments_nodes | _keyword_nodes | _alias_nodes | + _expr_context_nodes | _unsupported_expr_nodes) - + _hacked_nodes) + +# we're adding a different assignment in some cases to be equality comparison +# and we don't want `stmt` and friends in their so get only the class whose +# names are capitalized +_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes +_msg = 'cannot both support and not support {0}'.format(_unsupported_nodes & + _base_supported_nodes) +assert not _unsupported_nodes & _base_supported_nodes, _msg + + +def _node_not_implemented(node_name, cls): + """Return a function that raises a NotImplementedError with a passed node + name. + """ + def f(self, *args, **kwargs): + raise NotImplementedError("{0!r} nodes are not " + "implemented".format(node_name)) + return f + + +def disallow(nodes): + """Decorator to disallow certain nodes from parsing. Raises a + NotImplementedError instead. + + Returns + ------- + disallowed : callable + """ + def disallowed(cls): + cls.unsupported_nodes = () + for node in nodes: + new_method = _node_not_implemented(node, cls) + name = 'visit_{0}'.format(node) + cls.unsupported_nodes += (name,) + setattr(cls, name, new_method) + return cls + return disallowed + + +def _op_maker(op_class, op_symbol): + """Return a function to create an op class with its symbol already passed. + + Returns + ------- + f : callable + """ + def f(self, node, *args, **kwargs): + """Return a partial function with an Op subclass with an operator + already passed. + + Returns + ------- + f : callable + """ + return partial(op_class, op_symbol, *args, **kwargs) + return f + + +_op_classes = {'binary': BinOp, 'unary': UnaryOp} + + +def add_ops(op_classes): + """Decorator to add default implementation of ops.""" + def f(cls): + for op_attr_name, op_class in compat.iteritems(op_classes): + ops = getattr(cls, '{0}_ops'.format(op_attr_name)) + ops_map = getattr(cls, '{0}_op_nodes_map'.format(op_attr_name)) + for op in ops: + op_node = ops_map[op] + if op_node is not None: + made_op = _op_maker(op_class, op) + setattr(cls, 'visit_{0}'.format(op_node), made_op) + return cls + return f + + +@disallow(_unsupported_nodes) +@add_ops(_op_classes) +class BaseExprVisitor(ast.NodeVisitor): + """Custom ast walker. Parsers of other engines should subclass this class + if necessary. + + Parameters + ---------- + env : Scope + engine : str + parser : str + preparser : callable + """ + const_type = Constant + term_type = Term + + binary_ops = _cmp_ops_syms + _bool_ops_syms + _arith_ops_syms + binary_op_nodes = ('Gt', 'Lt', 'GtE', 'LtE', 'Eq', 'NotEq', 'In', 'NotIn', + 'BitAnd', 'BitOr', 'And', 'Or', 'Add', 'Sub', 'Mult', + None, 'Pow', 'FloorDiv', 'Mod') + binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes)) + + unary_ops = _unary_ops_syms + unary_op_nodes = 'UAdd', 'USub', 'Invert', 'Not' + unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes)) + + rewrite_map = { + ast.Eq: ast.In, + ast.NotEq: ast.NotIn, + ast.In: ast.In, + ast.NotIn: ast.NotIn + } + + def __init__(self, env, engine, parser, preparser=_preparse): + self.env = env + self.engine = engine + self.parser = parser + self.preparser = preparser + + def visit(self, node, **kwargs): + if isinstance(node, string_types): + clean = self.preparser(node) + node = ast.fix_missing_locations(ast.parse(clean)) + elif not isinstance(node, ast.AST): + raise TypeError("Cannot visit objects of type {0!r}" + "".format(node.__class__.__name__)) + + method = 'visit_' + node.__class__.__name__ + visitor = getattr(self, method) + return visitor(node, **kwargs) + + def visit_Module(self, node, **kwargs): + if len(node.body) != 1: + raise SyntaxError('only a single expression is allowed') + expr = node.body[0] + return self.visit(expr, **kwargs) + + def visit_Expr(self, node, **kwargs): + return self.visit(node.value, **kwargs) + + def _rewrite_membership_op(self, node, left, right): + # the kind of the operator (is actually an instance) + op_instance = node.op + op_type = type(op_instance) + + # must be two terms and the comparison operator must be ==/!=/in/not in + if is_term(left) and is_term(right) and op_type in self.rewrite_map: + + left_list, right_list = map(_is_list, (left, right)) + left_str, right_str = map(_is_str, (left, right)) + + # if there are any strings or lists in the expression + if left_list or right_list or left_str or right_str: + op_instance = self.rewrite_map[op_type]() + + # pop the string variable out of locals and replace it with a list + # of one string, kind of a hack + if right_str: + self.env.remove_tmp(right.name) + name = self.env.add_tmp([right.value]) + right = self.term_type(name, self.env) + + if left_str: + self.env.remove_tmp(left.name) + name = self.env.add_tmp([left.value]) + left = self.term_type(name, self.env) + + op = self.visit(op_instance) + return op, op_instance, left, right + + def _possibly_transform_eq_ne(self, node, left=None, right=None): + if left is None: + left = self.visit(node.left, side='left') + if right is None: + right = self.visit(node.right, side='right') + op, op_class, left, right = self._rewrite_membership_op(node, left, + right) + return op, op_class, left, right + + def _possibly_eval(self, binop, eval_in_python): + # eval `in` and `not in` (for now) in "partial" python space + # things that can be evaluated in "eval" space will be turned into + # temporary variables. for example, + # [1,2] in a + 2 * b + # in that case a + 2 * b will be evaluated using numexpr, and the "in" + # call will be evaluated using isin (in python space) + return binop.evaluate(self.env, self.engine, self.parser, + self.term_type, eval_in_python) + + def _possibly_evaluate_binop(self, op, op_class, lhs, rhs, + eval_in_python=('in', 'not in'), + maybe_eval_in_python=('==', '!=')): + res = op(lhs, rhs) + + # "in"/"not in" ops are always evaluated in python + if res.op in eval_in_python: + return self._possibly_eval(res, eval_in_python) + elif (lhs.return_type == object or rhs.return_type == object and + self.engine != 'pytables'): + # evaluate "==" and "!=" in python if either of our operands has an + # object return type + return self._possibly_eval(res, eval_in_python + + maybe_eval_in_python) + return res + + def visit_BinOp(self, node, **kwargs): + op, op_class, left, right = self._possibly_transform_eq_ne(node) + return self._possibly_evaluate_binop(op, op_class, left, right) + + def visit_Div(self, node, **kwargs): + return lambda lhs, rhs: Div(lhs, rhs, + truediv=self.env.locals['truediv']) + + def visit_UnaryOp(self, node, **kwargs): + op = self.visit(node.op) + operand = self.visit(node.operand) + return op(operand) + + def visit_Name(self, node, **kwargs): + return self.term_type(node.id, self.env, **kwargs) + + def visit_Num(self, node, **kwargs): + return self.const_type(node.n, self.env) + + def visit_Str(self, node, **kwargs): + name = self.env.add_tmp(node.s) + return self.term_type(name, self.env) + + def visit_List(self, node, **kwargs): + name = self.env.add_tmp([self.visit(e).value for e in node.elts]) + return self.term_type(name, self.env) + + visit_Tuple = visit_List + + def visit_Index(self, node, **kwargs): + """ df.index[4] """ + return self.visit(node.value) + + def visit_Subscript(self, node, **kwargs): + value = self.visit(node.value) + slobj = self.visit(node.slice) + result = pd.eval(slobj, local_dict=self.env, engine=self.engine, + parser=self.parser) + try: + # a Term instance + v = value.value[result] + except AttributeError: + # an Op instance + lhs = pd.eval(value, local_dict=self.env, engine=self.engine, + parser=self.parser) + v = lhs[result] + name = self.env.add_tmp(v) + return self.term_type(name, env=self.env) + + def visit_Slice(self, node, **kwargs): + """ df.index[slice(4,6)] """ + lower = node.lower + if lower is not None: + lower = self.visit(lower).value + upper = node.upper + if upper is not None: + upper = self.visit(upper).value + step = node.step + if step is not None: + step = self.visit(step).value + + return slice(lower, upper, step) + + def visit_Assign(self, node, **kwargs): + cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0], + comparators=[node.value]) + return self.visit(cmpr) + + def visit_Attribute(self, node, **kwargs): + attr = node.attr + value = node.value + + ctx = node.ctx + if isinstance(ctx, ast.Load): + # resolve the value + resolved = self.visit(value).value + try: + v = getattr(resolved, attr) + name = self.env.add_tmp(v) + return self.term_type(name, self.env) + except AttributeError: + # something like datetime.datetime where scope is overriden + if isinstance(value, ast.Name) and value.id == attr: + return resolved + + raise ValueError("Invalid Attribute context {0}".format(ctx.__name__)) + + def visit_Call(self, node, **kwargs): + + # this can happen with: datetime.datetime + if isinstance(node.func, ast.Attribute): + res = self.visit_Attribute(node.func) + elif not isinstance(node.func, ast.Name): + raise TypeError("Only named functions are supported") + else: + res = self.visit(node.func) + + if res is None: + raise ValueError("Invalid function call {0}".format(node.func.id)) + if hasattr(res, 'value'): + res = res.value + + args = [self.visit(targ).value for targ in node.args] + if node.starargs is not None: + args = args + self.visit(node.starargs).value + + keywords = {} + for key in node.keywords: + if not isinstance(key, ast.keyword): + raise ValueError("keyword error in function call " + "'{0}'".format(node.func.id)) + keywords[key.arg] = self.visit(key.value).value + if node.kwargs is not None: + keywords.update(self.visit(node.kwargs).value) + + return self.const_type(res(*args, **keywords), self.env) + + def translate_In(self, op): + return op + + def visit_Compare(self, node, **kwargs): + ops = node.ops + comps = node.comparators + + # base case: we have something like a CMP b + if len(comps) == 1: + op = self.translate_In(ops[0]) + binop = ast.BinOp(op=op, left=node.left, right=comps[0]) + return self.visit(binop) + + # recursive case: we have a chained comparison, a CMP b CMP c, etc. + left = node.left + values = [] + for op, comp in zip(ops, comps): + new_node = self.visit(ast.Compare(comparators=[comp], left=left, + ops=[self.translate_In(op)])) + left = comp + values.append(new_node) + return self.visit(ast.BoolOp(op=ast.And(), values=values)) + + def _try_visit_binop(self, bop): + if isinstance(bop, (Op, Term)): + return bop + return self.visit(bop) + + def visit_BoolOp(self, node, **kwargs): + def visitor(x, y): + lhs = self._try_visit_binop(x) + rhs = self._try_visit_binop(y) + + op, op_class, lhs, rhs = self._possibly_transform_eq_ne(node, lhs, + rhs) + return self._possibly_evaluate_binop(op, node.op, lhs, rhs) + + operands = node.values + return reduce(visitor, operands) + + +_python_not_supported = frozenset(['Assign', 'Dict', 'Call', 'BoolOp', + 'In', 'NotIn']) +_numexpr_supported_calls = frozenset(_reductions + _mathops) + + +@disallow((_unsupported_nodes | _python_not_supported) - + (_boolop_nodes | frozenset(['BoolOp', 'Attribute', 'In', 'NotIn', + 'Tuple']))) +class PandasExprVisitor(BaseExprVisitor): + def __init__(self, env, engine, parser, + preparser=lambda x: _replace_locals(_replace_booleans(x))): + super(PandasExprVisitor, self).__init__(env, engine, parser, preparser) + + +@disallow(_unsupported_nodes | _python_not_supported | frozenset(['Not'])) +class PythonExprVisitor(BaseExprVisitor): + def __init__(self, env, engine, parser, preparser=lambda x: x): + super(PythonExprVisitor, self).__init__(env, engine, parser, + preparser=preparser) + + +class Expr(StringMixin): + """Object encapsulating an expression. + + Parameters + ---------- + expr : str + engine : str, optional, default 'numexpr' + parser : str, optional, default 'pandas' + env : Scope, optional, default None + truediv : bool, optional, default True + level : int, optional, default 2 + """ + def __init__(self, expr, engine='numexpr', parser='pandas', env=None, + truediv=True, level=2): + self.expr = expr + self.env = _ensure_scope(level=level, local_dict=env) + self.engine = engine + self.parser = parser + self._visitor = _parsers[parser](self.env, self.engine, self.parser) + self.terms = self.parse() + self.truediv = truediv + + def __call__(self): + self.env.locals['truediv'] = self.truediv + return self.terms(self.env) + + def __unicode__(self): + return com.pprint_thing(self.terms) + + def __len__(self): + return len(self.expr) + + def parse(self): + """Parse an expression""" + return self._visitor.visit(self.expr) + + def align(self): + """align a set of Terms""" + return self.terms.align(self.env) + + @property + def names(self): + """Get the names in an expression""" + if is_term(self.terms): + return frozenset([self.terms.name]) + return frozenset(term.name for term in com.flatten(self.terms)) + + def check_name_clashes(self): + env = self.env + names = self.names + res_keys = frozenset(env.resolver_dict.keys()) & names + lcl_keys = frozenset(env.locals.keys()) & names + gbl_keys = frozenset(env.globals.keys()) & names + _check_disjoint_resolver_names(res_keys, lcl_keys, gbl_keys) + + def add_resolvers_to_locals(self): + """Add the extra scope (resolvers) to local scope + + Notes + ----- + This should be done after parsing and pre-evaluation, otherwise + unnecessary name clashes will occur. + """ + self.env.locals.update(self.env.resolver_dict) + + +def isexpr(s, check_names=True): + """Strict checking for a valid expression.""" + try: + Expr(s, env=_ensure_scope() if check_names else None) + except SyntaxError: + return False + except NameError: + return not check_names + return True + + +_parsers = {'python': PythonExprVisitor, 'pandas': PandasExprVisitor} diff --git a/pandas/core/expressions.py b/pandas/computation/expressions.py similarity index 67% rename from pandas/core/expressions.py rename to pandas/computation/expressions.py index b1bd104ce48a5..45c9a2d5259cb 100644 --- a/pandas/core/expressions.py +++ b/pandas/computation/expressions.py @@ -5,6 +5,7 @@ Offer fast expression evaluation thru numexpr """ + import numpy as np from pandas.core.common import _values_from_object @@ -15,17 +16,19 @@ _NUMEXPR_INSTALLED = False _USE_NUMEXPR = _NUMEXPR_INSTALLED -_evaluate = None -_where = None +_evaluate = None +_where = None # the set of dtypes that we will allow pass to numexpr -_ALLOWED_DTYPES = dict(evaluate = set(['int64','int32','float64','float32','bool']), - where = set(['int64','float64','bool'])) +_ALLOWED_DTYPES = dict( + evaluate=set(['int64', 'int32', 'float64', 'float32', 'bool']), + where=set(['int64', 'float64', 'bool'])) # the minimum prod shape that we will use numexpr -_MIN_ELEMENTS = 10000 +_MIN_ELEMENTS = 10000 + -def set_use_numexpr(v = True): +def set_use_numexpr(v=True): # set/unset to use numexpr global _USE_NUMEXPR if _NUMEXPR_INSTALLED: @@ -35,26 +38,25 @@ def set_use_numexpr(v = True): global _evaluate, _where if not _USE_NUMEXPR: _evaluate = _evaluate_standard - _where = _where_standard + _where = _where_standard else: _evaluate = _evaluate_numexpr - _where = _where_numexpr + _where = _where_numexpr -def set_numexpr_threads(n = None): + +def set_numexpr_threads(n=None): # if we are using numexpr, set the threads to n # otherwise reset - try: - if _NUMEXPR_INSTALLED and _USE_NUMEXPR: - if n is None: - n = ne.detect_number_of_cores() - ne.set_num_threads(n) - except: - pass + if _NUMEXPR_INSTALLED and _USE_NUMEXPR: + if n is None: + n = ne.detect_number_of_cores() + ne.set_num_threads(n) def _evaluate_standard(op, op_str, a, b, raise_on_error=True, **eval_kwargs): """ standard evaluation """ - return op(a,b) + return op(a, b) + def _can_use_numexpr(op, op_str, a, b, dtype_check): """ return a boolean if we WILL be using numexpr """ @@ -65,13 +67,13 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check): # check for dtype compatiblity dtypes = set() - for o in [ a, b ]: - if hasattr(o,'get_dtype_counts'): + for o in [a, b]: + if hasattr(o, 'get_dtype_counts'): s = o.get_dtype_counts() if len(s) > 1: return False dtypes |= set(s.index) - elif isinstance(o,np.ndarray): + elif isinstance(o, np.ndarray): dtypes |= set([o.dtype.name]) # allowed are a superset @@ -80,52 +82,54 @@ def _can_use_numexpr(op, op_str, a, b, dtype_check): return False -def _evaluate_numexpr(op, op_str, a, b, raise_on_error = False, **eval_kwargs): + +def _evaluate_numexpr(op, op_str, a, b, raise_on_error=False, **eval_kwargs): result = None if _can_use_numexpr(op, op_str, a, b, 'evaluate'): try: a_value, b_value = a, b - if hasattr(a_value,'values'): + if hasattr(a_value, 'values'): a_value = a_value.values - if hasattr(b_value,'values'): + if hasattr(b_value, 'values'): b_value = b_value.values result = ne.evaluate('a_value %s b_value' % op_str, - local_dict={ 'a_value' : a_value, - 'b_value' : b_value }, + local_dict={'a_value': a_value, + 'b_value': b_value}, casting='safe', **eval_kwargs) except (ValueError) as detail: if 'unknown type object' in str(detail): pass except (Exception) as detail: if raise_on_error: - raise TypeError(str(detail)) + raise if result is None: - result = _evaluate_standard(op,op_str,a,b,raise_on_error) + result = _evaluate_standard(op, op_str, a, b, raise_on_error) return result def _where_standard(cond, a, b, raise_on_error=True): - return np.where(_values_from_object(cond), _values_from_object(a), _values_from_object(b)) + return np.where(_values_from_object(cond), _values_from_object(a), + _values_from_object(b)) -def _where_numexpr(cond, a, b, raise_on_error = False): +def _where_numexpr(cond, a, b, raise_on_error=False): result = None if _can_use_numexpr(None, 'where', a, b, 'where'): try: cond_value, a_value, b_value = cond, a, b - if hasattr(cond_value,'values'): + if hasattr(cond_value, 'values'): cond_value = cond_value.values - if hasattr(a_value,'values'): + if hasattr(a_value, 'values'): a_value = a_value.values - if hasattr(b_value,'values'): + if hasattr(b_value, 'values'): b_value = b_value.values - result = ne.evaluate('where(cond_value,a_value,b_value)', - local_dict={ 'cond_value' : cond_value, - 'a_value' : a_value, - 'b_value' : b_value }, + result = ne.evaluate('where(cond_value, a_value, b_value)', + local_dict={'cond_value': cond_value, + 'a_value': a_value, + 'b_value': b_value}, casting='safe') except (ValueError) as detail: if 'unknown type object' in str(detail): @@ -135,7 +139,7 @@ def _where_numexpr(cond, a, b, raise_on_error = False): raise TypeError(str(detail)) if result is None: - result = _where_standard(cond,a,b,raise_on_error) + result = _where_standard(cond, a, b, raise_on_error) return result @@ -143,7 +147,9 @@ def _where_numexpr(cond, a, b, raise_on_error = False): # turn myself on set_use_numexpr(True) -def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True, **eval_kwargs): + +def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True, + **eval_kwargs): """ evaluate and return the expression of the op on a and b Parameters @@ -153,15 +159,18 @@ def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True, **eval_kw op_str: the string version of the op a : left operand b : right operand - raise_on_error : pass the error to the higher level if indicated (default is False), - otherwise evaluate the op with and return the results + raise_on_error : pass the error to the higher level if indicated + (default is False), otherwise evaluate the op with and + return the results use_numexpr : whether to try to use numexpr (default True) """ if use_numexpr: - return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error, **eval_kwargs) + return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error, + **eval_kwargs) return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error) + def where(cond, a, b, raise_on_error=False, use_numexpr=True): """ evaluate the where condition cond on a and b @@ -171,8 +180,9 @@ def where(cond, a, b, raise_on_error=False, use_numexpr=True): cond : a boolean array a : return if cond is True b : return if cond is False - raise_on_error : pass the error to the higher level if indicated (default is False), - otherwise evaluate the op with and return the results + raise_on_error : pass the error to the higher level if indicated + (default is False), otherwise evaluate the op with and + return the results use_numexpr : whether to try to use numexpr (default True) """ diff --git a/pandas/computation/ops.py b/pandas/computation/ops.py new file mode 100644 index 0000000000000..debc79e33968c --- /dev/null +++ b/pandas/computation/ops.py @@ -0,0 +1,510 @@ +"""Operator classes for eval. +""" + +import re +import operator as op +from functools import partial +from itertools import product, islice, chain + +import numpy as np + +import pandas as pd +from pandas.compat import PY3, string_types, text_type +import pandas.core.common as com +from pandas.core.base import StringMixin +from pandas.computation.common import _ensure_decoded + + +_reductions = 'sum', 'prod' +_mathops = ('sin', 'cos', 'exp', 'log', 'expm1', 'log1p', 'pow', 'div', 'sqrt', + 'inv', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos', 'arctan', + 'arccosh', 'arcsinh', 'arctanh', 'arctan2', 'abs') + + +_LOCAL_TAG = '__pd_eval_local_' +_TAG_RE = re.compile('^{0}'.format(_LOCAL_TAG)) + + +class UndefinedVariableError(NameError): + """NameError subclass for local variables.""" + def __init__(self, *args): + msg = 'name {0!r} is not defined' + subbed = _TAG_RE.sub('', args[0]) + if subbed != args[0]: + subbed = '@' + subbed + msg = 'local variable {0!r} is not defined' + super(UndefinedVariableError, self).__init__(msg.format(subbed)) + + +def _possibly_update_key(d, value, old_key, new_key=None): + if new_key is None: + new_key = old_key + + try: + del d[old_key] + except KeyError: + return False + else: + d[new_key] = value + return True + + +class Term(StringMixin): + def __new__(cls, name, env, side=None, encoding=None): + klass = Constant if not isinstance(name, string_types) else cls + supr_new = super(Term, klass).__new__ + if PY3: + return supr_new(klass) + return supr_new(klass, name, env, side=side, encoding=encoding) + + def __init__(self, name, env, side=None, encoding=None): + self._name = name + self.env = env + self.side = side + self.local = _TAG_RE.search(text_type(name)) is not None + self._value = self._resolve_name() + self.encoding = encoding + + @property + def local_name(self): + return _TAG_RE.sub('', self.name) + + def __unicode__(self): + return com.pprint_thing(self.name) + + def __call__(self, *args, **kwargs): + return self.value + + def evaluate(self, *args, **kwargs): + return self + + def _resolve_name(self): + env = self.env + key = self.name + res = env.resolve(self.local_name, globally=not self.local) + self.update(res) + + if res is None: + if not isinstance(key, string_types): + return key + raise UndefinedVariableError(key) + + if hasattr(res, 'ndim') and res.ndim > 2: + raise NotImplementedError("N-dimensional objects, where N > 2, are" + " not supported with eval") + return res + + def update(self, value): + """ + search order for local (i.e., @variable) variables: + + scope, key_variable + [('locals', 'local_name'), + ('globals', 'local_name'), + ('locals', 'key'), + ('globals', 'key')] + """ + env = self.env + key = self.name + + # if it's a variable name (otherwise a constant) + if isinstance(key, string_types): + if self.local: + # get it's name WITHOUT the local tag (defined above) + local_name = self.local_name + + # search for the local in the above specified order + scope_pairs = product([env.locals, env.globals], + [local_name, key]) + + # a[::2] + a[1::2] but iterators + scope_iter = chain(islice(scope_pairs, None, None, 2), + islice(scope_pairs, 1, None, 2)) + for d, k in scope_iter: + if _possibly_update_key(d, value, k, key): + break + else: + raise UndefinedVariableError(key) + else: + # otherwise we look in resolvers -> locals -> globals + for r in (env.resolver_dict, env.locals, env.globals): + if _possibly_update_key(r, value, key): + break + else: + raise UndefinedVariableError(key) + + self.value = value + + @property + def isscalar(self): + return np.isscalar(self._value) + + @property + def type(self): + try: + # potentially very slow for large, mixed dtype frames + return self._value.values.dtype + except AttributeError: + try: + # ndarray + return self._value.dtype + except AttributeError: + # scalar + return type(self._value) + + return_type = type + + @property + def raw(self): + return com.pprint_thing('{0}(name={1!r}, type={2})' + ''.format(self.__class__.__name__, self.name, + self.type)) + + @property + def kind(self): + try: + return self.type.__name__ + except AttributeError: + return self.type.type.__name__ + + @property + def value(self): + kind = self.kind.lower() + if kind == 'datetime64': + try: + return self._value.asi8 + except AttributeError: + return self._value.view('i8') + elif kind == 'datetime': + return pd.Timestamp(self._value) + elif kind == 'timestamp': + return self._value.asm8.view('i8') + return self._value + + @value.setter + def value(self, new_value): + self._value = new_value + + @property + def name(self): + return self._name + + @name.setter + def name(self, new_name): + self._name = new_name + + @property + def ndim(self): + try: + return self._value.ndim + except AttributeError: + return 0 + + +class Constant(Term): + def __init__(self, value, env, side=None, encoding=None): + super(Constant, self).__init__(value, env, side=side, + encoding=encoding) + + def _resolve_name(self): + return self._name + + @property + def name(self): + return self.value + + + +_bool_op_map = {'not': '~', 'and': '&', 'or': '|'} + + +class Op(StringMixin): + """Hold an operator of unknown arity + """ + def __init__(self, op, operands, *args, **kwargs): + self.op = _bool_op_map.get(op, op) + self.operands = operands + self.encoding = kwargs.get('encoding', None) + + def __iter__(self): + return iter(self.operands) + + def __unicode__(self): + """Print a generic n-ary operator and its operands using infix + notation""" + # recurse over the operands + parened = ('({0})'.format(com.pprint_thing(opr)) + for opr in self.operands) + return com.pprint_thing(' {0} '.format(self.op).join(parened)) + + @property + def return_type(self): + # clobber types to bool if the op is a boolean operator + if self.op in (_cmp_ops_syms + _bool_ops_syms): + return np.bool_ + return np.result_type(*(term.type for term in com.flatten(self))) + + @property + def isscalar(self): + return all(operand.isscalar for operand in self.operands) + + +def _in(x, y): + """Compute the vectorized membership of ``x in y`` if possible, otherwise + use Python. + """ + try: + return x.isin(y) + except AttributeError: + if com.is_list_like(x): + try: + return y.isin(x) + except AttributeError: + pass + return x in y + + +def _not_in(x, y): + """Compute the vectorized membership of ``x not in y`` if possible, + otherwise use Python. + """ + try: + return ~x.isin(y) + except AttributeError: + if com.is_list_like(x): + try: + return ~y.isin(x) + except AttributeError: + pass + return x not in y + + +_cmp_ops_syms = '>', '<', '>=', '<=', '==', '!=', 'in', 'not in' +_cmp_ops_funcs = op.gt, op.lt, op.ge, op.le, op.eq, op.ne, _in, _not_in +_cmp_ops_dict = dict(zip(_cmp_ops_syms, _cmp_ops_funcs)) + +_bool_ops_syms = '&', '|', 'and', 'or' +_bool_ops_funcs = op.and_, op.or_, op.and_, op.or_ +_bool_ops_dict = dict(zip(_bool_ops_syms, _bool_ops_funcs)) + +_arith_ops_syms = '+', '-', '*', '/', '**', '//', '%' +_arith_ops_funcs = (op.add, op.sub, op.mul, op.truediv if PY3 else op.div, + op.pow, op.floordiv, op.mod) +_arith_ops_dict = dict(zip(_arith_ops_syms, _arith_ops_funcs)) + +_special_case_arith_ops_syms = '**', '//', '%' +_special_case_arith_ops_funcs = op.pow, op.floordiv, op.mod +_special_case_arith_ops_dict = dict(zip(_special_case_arith_ops_syms, + _special_case_arith_ops_funcs)) + +_binary_ops_dict = {} + +for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict): + _binary_ops_dict.update(d) + + +def _cast_inplace(terms, dtype): + """Cast an expression inplace. + + Parameters + ---------- + terms : Op + The expression that should cast. + dtype : str or numpy.dtype + The dtype to cast to. + """ + dt = np.dtype(dtype) + for term in terms: + try: + new_value = term.value.astype(dt) + except AttributeError: + new_value = dt.type(term.value) + term.update(new_value) + + +def is_term(obj): + return isinstance(obj, Term) + + +class BinOp(Op): + """Hold a binary operator and its operands + + Parameters + ---------- + op : str + left : Term or Op + right : Term or Op + """ + def __init__(self, op, lhs, rhs, **kwargs): + super(BinOp, self).__init__(op, (lhs, rhs)) + self.lhs = lhs + self.rhs = rhs + + self._disallow_scalar_only_bool_ops() + + self.convert_values() + + try: + self.func = _binary_ops_dict[op] + except KeyError: + # has to be made a list for python3 + keys = list(_binary_ops_dict.keys()) + raise ValueError('Invalid binary operator {0!r}, valid' + ' operators are {1}'.format(op, keys)) + + def __call__(self, env): + """Recursively evaluate an expression in Python space. + + Parameters + ---------- + env : Scope + + Returns + ------- + object + The result of an evaluated expression. + """ + # handle truediv + if self.op == '/' and env.locals['truediv']: + self.func = op.truediv + + # recurse over the left/right nodes + left = self.lhs(env) + right = self.rhs(env) + + return self.func(left, right) + + def evaluate(self, env, engine, parser, term_type, eval_in_python): + """Evaluate a binary operation *before* being passed to the engine. + + Parameters + ---------- + env : Scope + engine : str + parser : str + term_type : type + eval_in_python : list + + Returns + ------- + term_type + The "pre-evaluated" expression as an instance of ``term_type`` + """ + if engine == 'python': + res = self(env) + else: + # recurse over the left/right nodes + left = self.lhs.evaluate(env, engine=engine, parser=parser, + term_type=term_type, + eval_in_python=eval_in_python) + right = self.rhs.evaluate(env, engine=engine, parser=parser, + term_type=term_type, + eval_in_python=eval_in_python) + + # base cases + if self.op in eval_in_python: + res = self.func(left.value, right.value) + else: + res = pd.eval(self, local_dict=env, engine=engine, + parser=parser) + + name = env.add_tmp(res) + return term_type(name, env=env) + + def convert_values(self): + """Convert datetimes to a comparable value in an expression. + """ + def stringify(value): + if self.encoding is not None: + encoder = partial(com.pprint_thing_encoded, + encoding=self.encoding) + else: + encoder = com.pprint_thing + return encoder(value) + + lhs, rhs = self.lhs, self.rhs + + if (is_term(lhs) and lhs.kind.startswith('datetime') and is_term(rhs) + and rhs.isscalar): + v = rhs.value + if isinstance(v, (int, float)): + v = stringify(v) + v = _ensure_decoded(v) + v = pd.Timestamp(v) + if v.tz is not None: + v = v.tz_convert('UTC') + self.rhs.update(v) + + if (is_term(rhs) and rhs.kind.startswith('datetime') and + is_term(lhs) and lhs.isscalar): + v = lhs.value + if isinstance(v, (int, float)): + v = stringify(v) + v = _ensure_decoded(v) + v = pd.Timestamp(v) + if v.tz is not None: + v = v.tz_convert('UTC') + self.lhs.update(v) + + def _disallow_scalar_only_bool_ops(self): + if ((self.lhs.isscalar or self.rhs.isscalar) and + self.op in _bool_ops_dict and + (not (issubclass(self.rhs.return_type, (bool, np.bool_)) and + issubclass(self.lhs.return_type, (bool, np.bool_))))): + raise NotImplementedError("cannot evaluate scalar only bool ops") + + +class Div(BinOp): + """Div operator to special case casting. + + Parameters + ---------- + lhs, rhs : Term or Op + The Terms or Ops in the ``/`` expression. + truediv : bool + Whether or not to use true division. With Python 3 this happens + regardless of the value of ``truediv``. + """ + def __init__(self, lhs, rhs, truediv=True, *args, **kwargs): + super(Div, self).__init__('/', lhs, rhs, *args, **kwargs) + + if truediv or PY3: + _cast_inplace(com.flatten(self), np.float_) + + +_unary_ops_syms = '+', '-', '~', 'not' +_unary_ops_funcs = op.pos, op.neg, op.invert, op.invert +_unary_ops_dict = dict(zip(_unary_ops_syms, _unary_ops_funcs)) + + +class UnaryOp(Op): + """Hold a unary operator and its operands + + Parameters + ---------- + op : str + The token used to represent the operator. + operand : Term or Op + The Term or Op operand to the operator. + + Raises + ------ + ValueError + * If no function associated with the passed operator token is found. + """ + def __init__(self, op, operand): + super(UnaryOp, self).__init__(op, (operand,)) + self.operand = operand + + try: + self.func = _unary_ops_dict[op] + except KeyError: + raise ValueError('Invalid unary operator {0!r}, valid operators ' + 'are {1}'.format(op, _unary_ops_syms)) + + def __call__(self, env): + operand = self.operand(env) + return self.func(operand) + + def __unicode__(self): + return com.pprint_thing('{0}({1})'.format(self.op, self.operand)) diff --git a/pandas/computation/pytables.py b/pandas/computation/pytables.py new file mode 100644 index 0000000000000..9ffae5edd93bc --- /dev/null +++ b/pandas/computation/pytables.py @@ -0,0 +1,573 @@ +""" manage PyTables query interface via Expressions """ + +import ast +import time +import warnings +from functools import partial +from datetime import datetime + +import pandas as pd +from pandas.compat import u, string_types, PY3 +from pandas.core.base import StringMixin +import pandas.core.common as com +from pandas.computation import expr, ops +from pandas.computation.ops import is_term +from pandas.computation.expr import BaseExprVisitor +from pandas.computation.common import _ensure_decoded +from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type + +class Scope(expr.Scope): + __slots__ = 'globals', 'locals', 'queryables' + + def __init__(self, gbls=None, lcls=None, queryables=None, level=1): + super( + Scope, + self).__init__(gbls=gbls, + lcls=lcls, + level=level) + self.queryables = queryables or dict() + + +class Term(ops.Term): + def __new__(cls, name, env, side=None, encoding=None): + klass = Constant if not isinstance(name, string_types) else cls + supr_new = StringMixin.__new__ + if PY3: + return supr_new(klass) + return supr_new(klass, name, env, side=side, encoding=encoding) + + def __init__(self, name, env, side=None, encoding=None): + super(Term, self).__init__(name, env, side=side, encoding=encoding) + + def _resolve_name(self): + # must be a queryables + if self.side == 'left': + if self.name not in self.env.queryables: + raise NameError('name {0!r} is not defined'.format(self.name)) + return self.name + + # resolve the rhs (and allow to be None) + return self.env.locals.get(self.name, + self.env.globals.get(self.name, self.name)) + + @property + def value(self): + return self._value + + +class Constant(Term): + def __init__(self, value, env, side=None, encoding=None): + super(Constant, self).__init__(value, env, side=side, + encoding=encoding) + + def _resolve_name(self): + return self._name + + @property + def name(self): + return self._value + + +class BinOp(ops.BinOp): + + _max_selectors = 31 + + def __init__(self, op, lhs, rhs, queryables, encoding): + super(BinOp, self).__init__(op, lhs, rhs) + self.queryables = queryables + self.encoding = encoding + self.filter = None + self.condition = None + + def _disallow_scalar_only_bool_ops(self): + pass + + def prune(self, klass): + + def pr(left, right): + """ create and return a new specilized BinOp from myself """ + + if left is None: + return right + elif right is None: + return left + + k = klass + if isinstance(left, ConditionBinOp): + if (isinstance(left, ConditionBinOp) and + isinstance(right, ConditionBinOp)): + k = JointConditionBinOp + elif isinstance(left, k): + return left + elif isinstance(right, k): + return right + + elif isinstance(left, FilterBinOp): + if (isinstance(left, FilterBinOp) and + isinstance(right, FilterBinOp)): + k = JointFilterBinOp + elif isinstance(left, k): + return left + elif isinstance(right, k): + return right + + return k(self.op, left, right, queryables=self.queryables, + encoding=self.encoding).evaluate() + + left, right = self.lhs, self.rhs + + if is_term(left) and is_term(right): + res = pr(left.value, right.value) + elif not is_term(left) and is_term(right): + res = pr(left.prune(klass), right.value) + elif is_term(left) and not is_term(right): + res = pr(left.value, right.prune(klass)) + elif not (is_term(left) or is_term(right)): + res = pr(left.prune(klass), right.prune(klass)) + + return res + + def conform(self, rhs): + """ inplace conform rhs """ + if not com.is_list_like(rhs): + rhs = [rhs] + if hasattr(self.rhs, 'ravel'): + rhs = rhs.ravel() + return rhs + + @property + def is_valid(self): + """ return True if this is a valid field """ + return self.lhs in self.queryables + + @property + def is_in_table(self): + """ return True if this is a valid column name for generation (e.g. an + actual column in the table) """ + return self.queryables.get(self.lhs) is not None + + @property + def kind(self): + """ the kind of my field """ + return self.queryables.get(self.lhs) + + def generate(self, v): + """ create and return the op string for this TermValue """ + val = v.tostring(self.encoding) + return "(%s %s %s)" % (self.lhs, self.op, val) + + def convert_value(self, v): + """ convert the expression that is in the term to something that is + accepted by pytables """ + + def stringify(value): + if self.encoding is not None: + encoder = partial(com.pprint_thing_encoded, + encoding=self.encoding) + else: + encoder = com.pprint_thing + return encoder(value) + + kind = _ensure_decoded(self.kind) + if kind == u('datetime64') or kind == u('datetime'): + if isinstance(v, (int, float)): + v = stringify(v) + v = _ensure_decoded(v) + v = pd.Timestamp(v) + if v.tz is not None: + v = v.tz_convert('UTC') + return TermValue(v, v.value, kind) + elif isinstance(v, datetime) or hasattr(v, 'timetuple') or kind == u('date'): + v = time.mktime(v.timetuple()) + return TermValue(v, pd.Timestamp(v), kind) + elif kind == u('timedelta64') or kind == u('timedelta'): + v = _coerce_scalar_to_timedelta_type(v,unit='s').item() + return TermValue(int(v), v, kind) + elif kind == u('integer'): + v = int(float(v)) + return TermValue(v, v, kind) + elif kind == u('float'): + v = float(v) + return TermValue(v, v, kind) + elif kind == u('bool'): + if isinstance(v, string_types): + v = not v.strip().lower() in [u('false'), u('f'), u('no'), + u('n'), u('none'), u('0'), + u('[]'), u('{}'), u('')] + else: + v = bool(v) + return TermValue(v, v, kind) + elif not isinstance(v, string_types): + v = stringify(v) + return TermValue(v, stringify(v), u('string')) + + # string quoting + return TermValue(v, stringify(v), u('string')) + + def convert_values(self): + pass + + +class FilterBinOp(BinOp): + + def __unicode__(self): + return com.pprint_thing("[Filter : [{0}] -> " + "[{1}]".format(self.filter[0], self.filter[1])) + + def invert(self): + """ invert the filter """ + if self.filter is not None: + f = list(self.filter) + f[1] = self.generate_filter_op(invert=True) + self.filter = tuple(f) + return self + + def format(self): + """ return the actual filter format """ + return [self.filter] + + def evaluate(self): + + if not isinstance(self.lhs, string_types): + return self + + if not self.is_valid: + raise ValueError("query term is not valid [%s]" % self) + + rhs = self.conform(self.rhs) + values = [TermValue(v, v, self.kind) for v in rhs] + + if self.is_in_table: + + # if too many values to create the expression, use a filter instead + if self.op in ['==', '!='] and len(values) > self._max_selectors: + + filter_op = self.generate_filter_op() + self.filter = ( + self.lhs, + filter_op, + pd.Index([v.value for v in values])) + + return self + return None + + # equality conditions + if self.op in ['==', '!=']: + + filter_op = self.generate_filter_op() + self.filter = ( + self.lhs, + filter_op, + pd.Index([v.value for v in values])) + + else: + raise TypeError( + "passing a filterable condition to a non-table indexer [%s]" % + self) + + return self + + def generate_filter_op(self, invert=False): + if (self.op == '!=' and not invert) or (self.op == '==' and invert): + return lambda axis, vals: ~axis.isin(vals) + else: + return lambda axis, vals: axis.isin(vals) + + +class JointFilterBinOp(FilterBinOp): + + def format(self): + raise NotImplementedError("unable to collapse Joint Filters") + + def evaluate(self): + return self + + +class ConditionBinOp(BinOp): + + def __unicode__(self): + return com.pprint_thing("[Condition : [{0}]]".format(self.condition)) + + def invert(self): + """ invert the condition """ + #if self.condition is not None: + # self.condition = "~(%s)" % self.condition + #return self + raise NotImplementedError("cannot use an invert condition when passing to numexpr") + + def format(self): + """ return the actual ne format """ + return self.condition + + def evaluate(self): + + if not isinstance(self.lhs, string_types): + return self + + if not self.is_valid: + raise ValueError("query term is not valid [%s]" % self) + + # convert values if we are in the table + if not self.is_in_table: + return None + + rhs = self.conform(self.rhs) + values = [self.convert_value(v) for v in rhs] + + # equality conditions + if self.op in ['==', '!=']: + + # too many values to create the expression? + if len(values) <= self._max_selectors: + vs = [self.generate(v) for v in values] + self.condition = "(%s)" % ' | '.join(vs) + + # use a filter after reading + else: + return None + else: + self.condition = self.generate(values[0]) + + return self + + +class JointConditionBinOp(ConditionBinOp): + + def evaluate(self): + self.condition = "(%s %s %s)" % ( + self.lhs.condition, + self.op, + self.rhs.condition) + return self + + +class UnaryOp(ops.UnaryOp): + + def prune(self, klass): + + if self.op != '~': + raise NotImplementedError("UnaryOp only support invert type ops") + + operand = self.operand + operand = operand.prune(klass) + + if operand is not None: + if issubclass(klass,ConditionBinOp): + if operand.condition is not None: + return operand.invert() + elif issubclass(klass,FilterBinOp): + if operand.filter is not None: + return operand.invert() + + return None + + +_op_classes = {'unary': UnaryOp} + +class ExprVisitor(BaseExprVisitor): + const_type = Constant + term_type = Term + + def __init__(self, env, engine, parser, **kwargs): + super(ExprVisitor, self).__init__(env, engine, parser) + for bin_op in self.binary_ops: + setattr(self, 'visit_{0}'.format(self.binary_op_nodes_map[bin_op]), + lambda node, bin_op=bin_op: partial(BinOp, bin_op, + **kwargs)) + + def visit_UnaryOp(self, node, **kwargs): + if isinstance(node.op, (ast.Not, ast.Invert)): + return UnaryOp('~', self.visit(node.operand)) + elif isinstance(node.op, ast.USub): + return self.const_type(-self.visit(node.operand).value, self.env) + elif isinstance(node.op, ast.UAdd): + raise NotImplementedError('Unary addition not supported') + + def visit_USub(self, node, **kwargs): + return self.const_type(-self.visit(node.operand).value, self.env) + + def visit_Index(self, node, **kwargs): + return self.visit(node.value).value + + def visit_Subscript(self, node, **kwargs): + value = self.visit(node.value) + slobj = self.visit(node.slice) + try: + return self.const_type(value[slobj], self.env) + except TypeError: + raise ValueError("cannot subscript {0!r} with " + "{1!r}".format(value, slobj)) + + def visit_Attribute(self, node, **kwargs): + attr = node.attr + value = node.value + + ctx = node.ctx.__class__ + if ctx == ast.Load: + # resolve the value + resolved = self.visit(value).value + try: + return getattr(resolved, attr) + except AttributeError: + + # something like datetime.datetime where scope is overriden + if isinstance(value, ast.Name) and value.id == attr: + return resolved + + raise ValueError("Invalid Attribute context {0}".format(ctx.__name__)) + + def translate_In(self, op): + return ast.Eq() if isinstance(op, ast.In) else op + + def _rewrite_membership_op(self, node, left, right): + return self.visit(node.op), node.op, left, right + + +class Expr(expr.Expr): + + """ hold a pytables like expression, comprised of possibly multiple 'terms' + + Parameters + ---------- + where : string term expression, Expr, or list-like of Exprs + queryables : a "kinds" map (dict of column name -> kind), or None if column is non-indexable + encoding : an encoding that will encode the query terms + + Returns + ------- + an Expr object + + Examples + -------- + + 'index>=date' + "columns=['A', 'D']" + 'columns=A' + 'columns==A' + "~(columns=['A','B'])" + 'index>df.index[3] & string="bar"' + '(index>df.index[3] & index<=df.index[6]) | string="bar"' + "ts>=Timestamp('2012-02-01')" + "major_axis>=20130101" + """ + + def __init__(self, where, op=None, value=None, queryables=None, + encoding=None, scope_level=None): + + # try to be back compat + where = self.parse_back_compat(where, op, value) + + self.encoding = encoding + self.condition = None + self.filter = None + self.terms = None + self._visitor = None + + # capture the environement if needed + lcls = dict() + if isinstance(where, Expr): + + lcls.update(where.env.locals) + where = where.expr + + elif isinstance(where, (list, tuple)): + + for w in where: + if isinstance(w, Expr): + lcls.update(w.env.locals) + else: + w = self.parse_back_compat(w) + + where = ' & ' .join(["(%s)" % w for w in where]) + + self.expr = where + self.env = Scope(lcls=lcls) + self.env.update(scope_level) + + if queryables is not None and isinstance(self.expr, string_types): + self.env.queryables.update(queryables) + self._visitor = ExprVisitor(self.env, queryables=queryables, + parser='pytables', engine='pytables', + encoding=encoding) + self.terms = self.parse() + + def parse_back_compat(self, w, op=None, value=None): + """ allow backward compatibility for passed arguments """ + + if isinstance(w, dict): + w, op, value = w.get('field'), w.get('op'), w.get('value') + if not isinstance(w, string_types): + raise TypeError( + "where must be passed as a string if op/value are passed") + warnings.warn("passing a dict to Expr is deprecated, " + "pass the where as a single string", + DeprecationWarning) + + if op is not None: + if not isinstance(w, string_types): + raise TypeError( + "where must be passed as a string if op/value are passed") + + if isinstance(op, Expr): + raise TypeError("invalid op passed, must be a string") + w = "{0}{1}".format(w, op) + if value is not None: + if isinstance(value, Expr): + raise TypeError("invalid value passed, must be a string") + w = "{0}{1}".format(w, value) + + warnings.warn("passing multiple values to Expr is deprecated, " + "pass the where as a single string", + DeprecationWarning) + + return w + + def __unicode__(self): + if self.terms is not None: + return com.pprint_thing(self.terms) + return com.pprint_thing(self.expr) + + def evaluate(self): + """ create and return the numexpr condition and filter """ + + try: + self.condition = self.terms.prune(ConditionBinOp) + except AttributeError: + raise ValueError( + "cannot process expression [{0}], [{1}] is not a valid condition".format(self.expr,self)) + try: + self.filter = self.terms.prune(FilterBinOp) + except AttributeError: + raise ValueError( + "cannot process expression [{0}], [{1}] is not a valid filter".format(self.expr,self)) + + return self.condition, self.filter + + +class TermValue(object): + + """ hold a term value the we use to construct a condition/filter """ + + def __init__(self, value, converted, kind): + self.value = value + self.converted = converted + self.kind = kind + + def tostring(self, encoding): + """ quote the string if not encoded + else encode and return """ + if self.kind == u('string'): + if encoding is not None: + return self.converted + return '"%s"' % self.converted + return self.converted + + +def maybe_expression(s): + """ loose checking if s is a pytables-acceptable expression """ + if not isinstance(s, string_types): + return False + ops = ExprVisitor.binary_ops + ExprVisitor.unary_ops + ('=',) + + # make sure we have an op at least + return any(op in s for op in ops) diff --git a/pandas/computation/tests/__init__.py b/pandas/computation/tests/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py new file mode 100755 index 0000000000000..d5bcf85d4de03 --- /dev/null +++ b/pandas/computation/tests/test_eval.py @@ -0,0 +1,1473 @@ +#!/usr/bin/env python + +import unittest +import functools +import numbers +from itertools import product +import ast + +import nose +from nose.tools import assert_raises, assert_true, assert_false, assert_equal + +from numpy.random import randn, rand, randint +import numpy as np +from numpy.testing import assert_array_equal, assert_allclose +from numpy.testing.decorators import slow + +import pandas as pd +from pandas.core import common as com +from pandas import DataFrame, Series, Panel, date_range +from pandas.util.testing import makeCustomDataframe as mkdf + +from pandas.computation import pytables +from pandas.computation.expressions import _USE_NUMEXPR +from pandas.computation.engines import _engines +from pandas.computation.expr import PythonExprVisitor, PandasExprVisitor +from pandas.computation.ops import (_binary_ops_dict, _unary_ops_dict, + _special_case_arith_ops_syms, + _arith_ops_syms, _bool_ops_syms) +import pandas.computation.expr as expr +import pandas.util.testing as tm +from pandas.util.testing import (assert_frame_equal, randbool, + assertRaisesRegexp, + assert_produces_warning, assert_series_equal) +from pandas.compat import PY3, u + +_series_frame_incompatible = _bool_ops_syms +_scalar_skip = 'in', 'not in' + +def skip_if_no_ne(engine='numexpr'): + if not _USE_NUMEXPR and engine == 'numexpr': + raise nose.SkipTest("numexpr engine not installed or disabled") + + +def engine_has_neg_frac(engine): + return _engines[engine].has_neg_frac + + +def _eval_single_bin(lhs, cmp1, rhs, engine): + c = _binary_ops_dict[cmp1] + if engine_has_neg_frac(engine): + try: + return c(lhs, rhs) + except ValueError as e: + try: + msg = e.message + except AttributeError: + msg = e + msg = u(msg) + if msg == u('negative number cannot be raised to a fractional' + ' power'): + return np.nan + raise + return c(lhs, rhs) + + +def _series_and_2d_ndarray(lhs, rhs): + return ((isinstance(lhs, Series) and + isinstance(rhs, np.ndarray) and rhs.ndim > 1) + or (isinstance(rhs, Series) and + isinstance(lhs, np.ndarray) and lhs.ndim > 1)) + + +def _series_and_frame(lhs, rhs): + return ((isinstance(lhs, Series) and isinstance(rhs, DataFrame)) + or (isinstance(rhs, Series) and isinstance(lhs, DataFrame))) + + +def _bool_and_frame(lhs, rhs): + return isinstance(lhs, bool) and isinstance(rhs, pd.core.generic.NDFrame) + + +def skip_incompatible_operand(f): + @functools.wraps(f) + def wrapper(self, lhs, arith1, rhs, *args, **kwargs): + if _series_and_2d_ndarray(lhs, rhs): + self.assertRaises(Exception, pd.eval, 'lhs {0} rhs'.format(arith1), + local_dict={'lhs': lhs, 'rhs': rhs}, + engine=self.engine, parser=self.parser) + elif (np.isscalar(lhs) and np.isscalar(rhs) and arith1 in + _bool_ops_syms): + with tm.assertRaises(NotImplementedError): + pd.eval('lhs {0} rhs'.format(arith1), engine=self.engine, + parser=self.parser) + else: + f(self, lhs, arith1, rhs, *args, **kwargs) + return wrapper + + +def _is_py3_complex_incompat(result, expected): + return (PY3 and isinstance(expected, (complex, np.complexfloating)) and + np.isnan(result)) + + +_good_arith_ops = com.difference(_arith_ops_syms, _special_case_arith_ops_syms) + + +class TestEvalNumexprPandas(unittest.TestCase): + @classmethod + def setUpClass(cls): + skip_if_no_ne() + import numexpr as ne + cls.ne = ne + cls.engine = 'numexpr' + cls.parser = 'pandas' + + @classmethod + def tearDownClass(cls): + del cls.engine, cls.parser + if hasattr(cls, 'ne'): + del cls.ne + + def setup_data(self): + nan_df1 = DataFrame(rand(10, 5)) + nan_df1[nan_df1 > 0.5] = np.nan + nan_df2 = DataFrame(rand(10, 5)) + nan_df2[nan_df2 > 0.5] = np.nan + + self.pandas_lhses = (DataFrame(randn(10, 5)), Series(randn(5)), + Series([1, 2, np.nan, np.nan, 5]), nan_df1) + self.pandas_rhses = (DataFrame(randn(10, 5)), Series(randn(5)), + Series([1, 2, np.nan, np.nan, 5]), nan_df2) + self.scalar_lhses = randn(), np.float64(randn()), np.nan + self.scalar_rhses = randn(), np.float64(randn()), np.nan + + self.lhses = self.pandas_lhses + self.scalar_lhses + self.rhses = self.pandas_rhses + self.scalar_rhses + + def setup_ops(self): + self.cmp_ops = expr._cmp_ops_syms + self.cmp2_ops = self.cmp_ops[::-1] + self.bin_ops = expr._bool_ops_syms + self.special_case_ops = _special_case_arith_ops_syms + self.arith_ops = _good_arith_ops + self.unary_ops = '-', '~', 'not ' + + def setUp(self): + self.setup_ops() + self.setup_data() + self.current_engines = filter(lambda x: x != self.engine, _engines) + + def tearDown(self): + del self.lhses, self.rhses, self.scalar_rhses, self.scalar_lhses + del self.pandas_rhses, self.pandas_lhses, self.current_engines + + @slow + def test_complex_cmp_ops(self): + for lhs, cmp1, rhs, binop, cmp2 in product(self.lhses, self.cmp_ops, + self.rhses, self.bin_ops, + self.cmp2_ops): + self.check_complex_cmp_op(lhs, cmp1, rhs, binop, cmp2) + + def test_simple_cmp_ops(self): + bool_lhses = (DataFrame(randbool(size=(10, 5))), + Series(randbool((5,))), randbool()) + bool_rhses = (DataFrame(randbool(size=(10, 5))), + Series(randbool((5,))), randbool()) + for lhs, rhs, cmp_op in product(bool_lhses, bool_rhses, self.cmp_ops): + self.check_simple_cmp_op(lhs, cmp_op, rhs) + + @slow + def test_binary_arith_ops(self): + for lhs, op, rhs in product(self.lhses, self.arith_ops, self.rhses): + self.check_binary_arith_op(lhs, op, rhs) + + def test_modulus(self): + for lhs, rhs in product(self.lhses, self.rhses): + self.check_modulus(lhs, '%', rhs) + + def test_floor_division(self): + for lhs, rhs in product(self.lhses, self.rhses): + self.check_floor_division(lhs, '//', rhs) + + @slow + def test_pow(self): + for lhs, rhs in product(self.lhses, self.rhses): + self.check_pow(lhs, '**', rhs) + + @slow + def test_single_invert_op(self): + for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses): + self.check_single_invert_op(lhs, op, rhs) + + @slow + def test_compound_invert_op(self): + for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses): + self.check_compound_invert_op(lhs, op, rhs) + + @slow + def test_chained_cmp_op(self): + mids = self.lhses + cmp_ops = tuple(set(self.cmp_ops) - set(['==', '!=', '<=', '>='])) + for lhs, cmp1, mid, cmp2, rhs in product(self.lhses, cmp_ops, + mids, cmp_ops, self.rhses): + self.check_chained_cmp_op(lhs, cmp1, mid, cmp2, rhs) + + def check_complex_cmp_op(self, lhs, cmp1, rhs, binop, cmp2): + skip_these = 'in', 'not in' + ex = '(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)'.format(cmp1=cmp1, + binop=binop, + cmp2=cmp2) + scalar_with_in_notin = (np.isscalar(rhs) and (cmp1 in skip_these or + cmp2 in skip_these)) + if scalar_with_in_notin: + self.assertRaises(TypeError, pd.eval, ex, engine=self.engine, + parser=self.parser, local_dict={'lhs': lhs, + 'rhs': rhs}) + elif (_series_and_frame(lhs, rhs) and (cmp1 in + _series_frame_incompatible or + cmp2 in _series_frame_incompatible)): + self.assertRaises(TypeError, pd.eval, ex, + local_dict={'lhs': lhs, 'rhs': rhs}, + engine=self.engine, parser=self.parser) + elif _bool_and_frame(lhs, rhs): + self.assertRaises(TypeError, _eval_single_bin, lhs_new, '&', + rhs_new, self.engine) + self.assertRaises(TypeError, pd.eval, ex, + local_dict={'lhs': lhs, 'rhs': rhs}, + engine=self.engine, parser=self.parser) + elif (np.isscalar(lhs) and np.isnan(lhs) and + not np.isscalar(rhs) and (cmp1 in skip_these or cmp2 in + skip_these)): + with tm.assertRaises(TypeError): + _eval_single_bin(lhs, binop, rhs, self.engine) + else: + lhs_new = _eval_single_bin(lhs, cmp1, rhs, self.engine) + rhs_new = _eval_single_bin(lhs, cmp2, rhs, self.engine) + if (isinstance(lhs_new, Series) and isinstance(rhs_new, DataFrame) + and binop in _series_frame_incompatible): + pass + # TODO: the code below should be added back when left and right + # hand side bool ops are fixed. + + #try: + #self.assertRaises(Exception, pd.eval, ex, + #local_dict={'lhs': lhs, 'rhs': rhs}, + #engine=self.engine, parser=self.parser) + #except AssertionError: + #import ipdb; ipdb.set_trace() + #raise + elif (np.isscalar(lhs_new) and np.isnan(lhs_new) and + not np.isscalar(rhs_new) and binop in skip_these): + with tm.assertRaises(TypeError): + _eval_single_bin(lhs_new, binop, rhs_new, self.engine) + elif _bool_and_frame(lhs_new, rhs_new): + with tm.assertRaises(TypeError): + _eval_single_bin(lhs_new, binop, rhs_new, self.engine) + with tm.assertRaises(TypeError): + pd.eval('lhs_new & rhs_new'.format(binop), + engine=self.engine, parser=self.parser) + else: + expected = _eval_single_bin(lhs_new, binop, rhs_new, self.engine) + result = pd.eval(ex, engine=self.engine, parser=self.parser) + assert_array_equal(result, expected) + + @skip_incompatible_operand + def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs): + skip_these = 'in', 'not in' + + def check_operands(left, right, cmp_op): + if (np.isscalar(left) and np.isnan(left) and not np.isscalar(right) + and cmp_op in skip_these): + ex = 'left {0} right'.format(cmp_op) + with tm.assertRaises(ValueError): + pd.eval(ex, engine=self.engine, parser=self.parser) + return + if (np.isscalar(left) and np.isscalar(right) and + cmp_op in _bool_ops_syms): + ex1 = 'lhs {0} mid {1} rhs'.format(cmp1, cmp2) + ex2 = 'lhs {0} mid and mid {1} rhs'.format(cmp1, cmp2) + ex3 = '(lhs {0} mid) & (mid {1} rhs)'.format(cmp1, cmp2) + for ex in (ex1, ex2, ex3): + with assertRaises(NotImplementedError): + pd.eval(ex, engine=self.engine, parser=self.parser) + return + if (np.isscalar(right) and not np.isscalar(left) and cmp_op in + skip_these): + self.assertRaises(Exception, _eval_single_bin, left, cmp_op, + right, self.engine) + elif _series_and_2d_ndarray(right, left): + self.assertRaises(Exception, _eval_single_bin, right, cmp_op, + left, self.engine) + elif (np.isscalar(right) and np.isscalar(left) and cmp_op in + skip_these): + self.assertRaises(Exception, _eval_single_bin, right, cmp_op, + left, self.engine) + else: + new = _eval_single_bin(left, cmp_op, right, self.engine) + return new + return + + lhs_new = check_operands(lhs, mid, cmp1) + rhs_new = check_operands(mid, rhs, cmp2) + + if lhs_new is not None and rhs_new is not None: + # these are not compatible operands + if isinstance(lhs_new, Series) and isinstance(rhs_new, DataFrame): + self.assertRaises(TypeError, _eval_single_bin, lhs_new, '&', + rhs_new, self.engine) + elif (_bool_and_frame(lhs_new, rhs_new)): + self.assertRaises(TypeError, _eval_single_bin, lhs_new, '&', + rhs_new, self.engine) + elif _series_and_2d_ndarray(lhs_new, rhs_new): + # TODO: once #4319 is fixed add this test back in + #self.assertRaises(Exception, _eval_single_bin, lhs_new, '&', + #rhs_new, self.engine) + pass + else: + ex1 = 'lhs {0} mid {1} rhs'.format(cmp1, cmp2) + ex2 = 'lhs {0} mid and mid {1} rhs'.format(cmp1, cmp2) + ex3 = '(lhs {0} mid) & (mid {1} rhs)'.format(cmp1, cmp2) + try: + expected = _eval_single_bin(lhs_new, '&', rhs_new, self.engine) + except TypeError: + import ipdb; ipdb.set_trace() + raise + + for ex in (ex1, ex2, ex3): + result = pd.eval(ex, engine=self.engine, + parser=self.parser) + assert_array_equal(result, expected) + + @skip_incompatible_operand + def check_simple_cmp_op(self, lhs, cmp1, rhs): + ex = 'lhs {0} rhs'.format(cmp1) + if cmp1 in ('in', 'not in') and not com.is_list_like(rhs): + self.assertRaises(TypeError, pd.eval, ex, engine=self.engine, + parser=self.parser, local_dict={'lhs': lhs, + 'rhs': rhs}) + else: + expected = _eval_single_bin(lhs, cmp1, rhs, self.engine) + result = pd.eval(ex, engine=self.engine, parser=self.parser) + assert_array_equal(result, expected) + + @skip_incompatible_operand + def check_binary_arith_op(self, lhs, arith1, rhs): + ex = 'lhs {0} rhs'.format(arith1) + result = pd.eval(ex, engine=self.engine, parser=self.parser) + expected = _eval_single_bin(lhs, arith1, rhs, self.engine) + assert_array_equal(result, expected) + ex = 'lhs {0} rhs {0} rhs'.format(arith1) + result = pd.eval(ex, engine=self.engine, parser=self.parser) + nlhs = _eval_single_bin(lhs, arith1, rhs, + self.engine) + self.check_alignment(result, nlhs, rhs, arith1) + + def check_alignment(self, result, nlhs, ghs, op): + try: + nlhs, ghs = nlhs.align(ghs) + except (ValueError, TypeError, AttributeError): + # ValueError: series frame or frame series align + # TypeError, AttributeError: series or frame with scalar align + pass + else: + expected = self.ne.evaluate('nlhs {0} ghs'.format(op)) + assert_array_equal(result, expected) + + # the following 3 tests require special casing + + @skip_incompatible_operand + def check_modulus(self, lhs, arith1, rhs): + ex = 'lhs {0} rhs'.format(arith1) + result = pd.eval(ex, engine=self.engine, parser=self.parser) + expected = lhs % rhs + assert_allclose(result, expected) + expected = self.ne.evaluate('expected {0} rhs'.format(arith1)) + assert_allclose(result, expected) + + @skip_incompatible_operand + def check_floor_division(self, lhs, arith1, rhs): + ex = 'lhs {0} rhs'.format(arith1) + + if self.engine == 'python': + res = pd.eval(ex, engine=self.engine, parser=self.parser) + expected = lhs // rhs + assert_array_equal(res, expected) + else: + self.assertRaises(TypeError, pd.eval, ex, local_dict={'lhs': lhs, + 'rhs': rhs}, + engine=self.engine, parser=self.parser) + + def get_expected_pow_result(self, lhs, rhs): + try: + expected = _eval_single_bin(lhs, '**', rhs, self.engine) + except ValueError as e: + msg = 'negative number cannot be raised to a fractional power' + try: + emsg = e.message + except AttributeError: + emsg = e + + emsg = u(emsg) + + if emsg == msg: + if self.engine == 'python': + raise nose.SkipTest(emsg) + else: + expected = np.nan + else: + raise + return expected + + @skip_incompatible_operand + def check_pow(self, lhs, arith1, rhs): + ex = 'lhs {0} rhs'.format(arith1) + expected = self.get_expected_pow_result(lhs, rhs) + result = pd.eval(ex, engine=self.engine, parser=self.parser) + + if (np.isscalar(lhs) and np.isscalar(rhs) and + _is_py3_complex_incompat(result, expected)): + self.assertRaises(AssertionError, assert_array_equal, result, + expected) + else: + assert_array_equal(result, expected) + + ex = '(lhs {0} rhs) {0} rhs'.format(arith1) + result = pd.eval(ex, engine=self.engine, parser=self.parser) + expected = self.get_expected_pow_result( + self.get_expected_pow_result(lhs, rhs), rhs) + assert_array_equal(result, expected) + + @skip_incompatible_operand + def check_single_invert_op(self, lhs, cmp1, rhs): + # simple + for el in (lhs, rhs): + try: + elb = el.astype(bool) + except AttributeError: + elb = np.array([bool(el)]) + expected = ~elb + result = pd.eval('~elb', engine=self.engine, parser=self.parser) + assert_array_equal(expected, result) + + for engine in self.current_engines: + skip_if_no_ne(engine) + assert_array_equal(result, pd.eval('~elb', engine=engine, + parser=self.parser)) + + @skip_incompatible_operand + def check_compound_invert_op(self, lhs, cmp1, rhs): + skip_these = 'in', 'not in' + ex = '~(lhs {0} rhs)'.format(cmp1) + + if np.isscalar(rhs) and cmp1 in skip_these: + self.assertRaises(TypeError, pd.eval, ex, engine=self.engine, + parser=self.parser, local_dict={'lhs': lhs, + 'rhs': rhs}) + elif (np.isscalar(lhs) and np.isnan(lhs) and not np.isscalar(rhs) + and cmp1 in skip_these): + with tm.assertRaises(ValueError): + pd.eval(ex, engine=self.engine, parser=self.parser) + else: + # compound + if np.isscalar(lhs) and np.isscalar(rhs): + lhs, rhs = map(lambda x: np.array([x]), (lhs, rhs)) + expected = _eval_single_bin(lhs, cmp1, rhs, self.engine) + if np.isscalar(expected): + expected = not expected + else: + expected = ~expected + result = pd.eval(ex, engine=self.engine, parser=self.parser) + assert_array_equal(expected, result) + + # make sure the other engines work the same as this one + for engine in self.current_engines: + skip_if_no_ne(engine) + ev = pd.eval(ex, engine=self.engine, parser=self.parser) + assert_array_equal(ev, result) + + def ex(self, op, var_name='lhs'): + return '{0}{1}'.format(op, var_name) + + def test_frame_invert(self): + expr = self.ex('~') + + ## ~ ## + # frame + ## float always raises + lhs = DataFrame(randn(5, 2)) + if self.engine == 'numexpr': + with tm.assertRaises(NotImplementedError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + with tm.assertRaises(TypeError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + + ## int raises on numexpr + lhs = DataFrame(randint(5, size=(5, 2))) + if self.engine == 'numexpr': + with tm.assertRaises(NotImplementedError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + expect = ~lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_frame_equal(expect, result) + + ## bool always works + lhs = DataFrame(rand(5, 2) > 0.5) + expect = ~lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_frame_equal(expect, result) + + ## object raises + lhs = DataFrame({'b': ['a', 1, 2.0], 'c': rand(3) > 0.5}) + if self.engine == 'numexpr': + with tm.assertRaises(ValueError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + with tm.assertRaises(TypeError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + + def test_series_invert(self): + #### ~ #### + expr = self.ex('~') + + # series + ## float raises + lhs = Series(randn(5)) + if self.engine == 'numexpr': + with tm.assertRaises(NotImplementedError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + with tm.assertRaises(TypeError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + + ## int raises on numexpr + lhs = Series(randint(5, size=5)) + if self.engine == 'numexpr': + with tm.assertRaises(NotImplementedError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + expect = ~lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_series_equal(expect, result) + + ## bool + lhs = Series(rand(5) > 0.5) + expect = ~lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_series_equal(expect, result) + + # float + # int + # bool + + # object + lhs = Series(['a', 1, 2.0]) + if self.engine == 'numexpr': + with tm.assertRaises(ValueError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + with tm.assertRaises(TypeError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + + def test_frame_negate(self): + expr = self.ex('-') + + # float + lhs = DataFrame(randn(5, 2)) + expect = -lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_frame_equal(expect, result) + + # int + lhs = DataFrame(randint(5, size=(5, 2))) + expect = -lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_frame_equal(expect, result) + + # bool doesn't work with numexpr but works elsewhere + lhs = DataFrame(rand(5, 2) > 0.5) + if self.engine == 'numexpr': + with tm.assertRaises(NotImplementedError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + expect = -lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_frame_equal(expect, result) + + def test_series_negate(self): + expr = self.ex('-') + + # float + lhs = Series(randn(5)) + expect = -lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_series_equal(expect, result) + + # int + lhs = Series(randint(5, size=5)) + expect = -lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_series_equal(expect, result) + + # bool doesn't work with numexpr but works elsewhere + lhs = Series(rand(5) > 0.5) + if self.engine == 'numexpr': + with tm.assertRaises(NotImplementedError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + expect = -lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_series_equal(expect, result) + + def test_frame_pos(self): + expr = self.ex('+') + + # float + lhs = DataFrame(randn(5, 2)) + if self.engine == 'python': + with tm.assertRaises(TypeError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + expect = lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_frame_equal(expect, result) + + # int + lhs = DataFrame(randint(5, size=(5, 2))) + if self.engine == 'python': + with tm.assertRaises(TypeError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + expect = lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_frame_equal(expect, result) + + # bool doesn't work with numexpr but works elsewhere + lhs = DataFrame(rand(5, 2) > 0.5) + if self.engine == 'python': + with tm.assertRaises(TypeError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + expect = lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_frame_equal(expect, result) + + def test_series_pos(self): + expr = self.ex('+') + + # float + lhs = Series(randn(5)) + if self.engine == 'python': + with tm.assertRaises(TypeError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + expect = lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_series_equal(expect, result) + + # int + lhs = Series(randint(5, size=5)) + if self.engine == 'python': + with tm.assertRaises(TypeError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + expect = lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_series_equal(expect, result) + + # bool doesn't work with numexpr but works elsewhere + lhs = Series(rand(5) > 0.5) + if self.engine == 'python': + with tm.assertRaises(TypeError): + result = pd.eval(expr, engine=self.engine, parser=self.parser) + else: + expect = lhs + result = pd.eval(expr, engine=self.engine, parser=self.parser) + assert_series_equal(expect, result) + + def test_scalar_unary(self): + with tm.assertRaises(TypeError): + pd.eval('~1.0', engine=self.engine, parser=self.parser) + + self.assertEqual(pd.eval('-1.0', parser=self.parser, engine=self.engine), -1.0) + self.assertEqual(pd.eval('+1.0', parser=self.parser, engine=self.engine), +1.0) + + self.assertEqual(pd.eval('~1', parser=self.parser, engine=self.engine), ~1) + self.assertEqual(pd.eval('-1', parser=self.parser, engine=self.engine), -1) + self.assertEqual(pd.eval('+1', parser=self.parser, engine=self.engine), +1) + + self.assertEqual(pd.eval('~True', parser=self.parser, engine=self.engine), ~True) + self.assertEqual(pd.eval('~False', parser=self.parser, engine=self.engine), ~False) + self.assertEqual(pd.eval('-True', parser=self.parser, engine=self.engine), -True) + self.assertEqual(pd.eval('-False', parser=self.parser, engine=self.engine), -False) + self.assertEqual(pd.eval('+True', parser=self.parser, engine=self.engine), +True) + self.assertEqual(pd.eval('+False', parser=self.parser, engine=self.engine), +False) + + def test_disallow_scalar_bool_ops(self): + exprs = '1 or 2', '1 and 2' + exprs += 'a and b', 'a or b' + exprs += '1 or 2 and (3 + 2) > 3', + exprs += '2 * x > 2 or 1 and 2', + exprs += '2 * df > 3 and 1 or a', + + x, a, b, df = np.random.randn(3), 1, 2, DataFrame(randn(3, 2)) + for ex in exprs: + with tm.assertRaises(NotImplementedError): + pd.eval(ex, engine=self.engine, parser=self.parser) + + +class TestEvalNumexprPython(TestEvalNumexprPandas): + @classmethod + def setUpClass(cls): + skip_if_no_ne() + import numexpr as ne + cls.ne = ne + cls.engine = 'numexpr' + cls.parser = 'python' + + def setup_ops(self): + self.cmp_ops = list(filter(lambda x: x not in ('in', 'not in'), + expr._cmp_ops_syms)) + self.cmp2_ops = self.cmp_ops[::-1] + self.bin_ops = [s for s in expr._bool_ops_syms + if s not in ('and', 'or')] + self.special_case_ops = _special_case_arith_ops_syms + self.arith_ops = _good_arith_ops + self.unary_ops = '+', '-', '~' + + def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs): + ex1 = 'lhs {0} mid {1} rhs'.format(cmp1, cmp2) + self.assertRaises(NotImplementedError, pd.eval, ex1, + local_dict={'lhs': lhs, 'mid': mid, 'rhs': rhs}, + engine=self.engine, parser=self.parser) + + +class TestEvalPythonPython(TestEvalNumexprPython): + @classmethod + def setUpClass(cls): + cls.engine = 'python' + cls.parser = 'python' + + @skip_incompatible_operand + def check_modulus(self, lhs, arith1, rhs): + ex = 'lhs {0} rhs'.format(arith1) + result = pd.eval(ex, engine=self.engine) + expected = lhs % rhs + assert_allclose(result, expected) + expected = eval('expected {0} rhs'.format(arith1)) + assert_allclose(result, expected) + + def check_alignment(self, result, nlhs, ghs, op): + try: + nlhs, ghs = nlhs.align(ghs) + except (ValueError, TypeError, AttributeError): + # ValueError: series frame or frame series align + # TypeError, AttributeError: series or frame with scalar align + pass + else: + expected = eval('nlhs {0} ghs'.format(op)) + assert_array_equal(result, expected) + + +class TestEvalPythonPandas(TestEvalPythonPython): + @classmethod + def setUpClass(cls): + cls.engine = 'python' + cls.parser = 'pandas' + + def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs): + TestEvalNumexprPandas.check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, + rhs) + + +f = lambda *args, **kwargs: np.random.randn() + + +ENGINES_PARSERS = list(product(_engines, expr._parsers)) + + +#------------------------------------- +# basic and complex alignment + +class TestAlignment(object): + + index_types = 'i', 'f', 's', 'u', 'dt', # 'p' + + def check_align_nested_unary_op(self, engine, parser): + skip_if_no_ne(engine) + s = 'df * ~2' + df = mkdf(5, 3, data_gen_f=f) + res = pd.eval(s, engine=engine, parser=parser) + assert_frame_equal(res, df * ~2) + + def test_align_nested_unary_op(self): + for engine, parser in ENGINES_PARSERS: + yield self.check_align_nested_unary_op, engine, parser + + def check_basic_frame_alignment(self, engine, parser): + skip_if_no_ne(engine) + args = product(self.index_types, repeat=2) + for r_idx_type, c_idx_type in args: + df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type, + c_idx_type=c_idx_type) + df2 = mkdf(20, 10, data_gen_f=f, r_idx_type=r_idx_type, + c_idx_type=c_idx_type) + res = pd.eval('df + df2', engine=engine, parser=parser) + assert_frame_equal(res, df + df2) + + @slow + def test_basic_frame_alignment(self): + for engine, parser in ENGINES_PARSERS: + yield self.check_basic_frame_alignment, engine, parser + + def check_frame_comparison(self, engine, parser): + skip_if_no_ne(engine) + args = product(self.index_types, repeat=2) + for r_idx_type, c_idx_type in args: + df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type, + c_idx_type=c_idx_type) + res = pd.eval('df < 2', engine=engine, parser=parser) + assert_frame_equal(res, df < 2) + + df3 = DataFrame(randn(*df.shape), index=df.index, + columns=df.columns) + res = pd.eval('df < df3', engine=engine, parser=parser) + assert_frame_equal(res, df < df3) + + @slow + def test_frame_comparison(self): + for engine, parser in ENGINES_PARSERS: + yield self.check_frame_comparison, engine, parser + + def check_medium_complex_frame_alignment(self, engine, parser): + skip_if_no_ne(engine) + args = product(self.index_types, repeat=4) + for r1, c1, r2, c2 in args: + df = mkdf(5, 2, data_gen_f=f, r_idx_type=r1, c_idx_type=c1) + df2 = mkdf(10, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2) + df3 = mkdf(15, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2) + res = pd.eval('df + df2 + df3', engine=engine, parser=parser) + assert_frame_equal(res, df + df2 + df3) + + @slow + def test_medium_complex_frame_alignment(self): + for engine, parser in ENGINES_PARSERS: + yield self.check_medium_complex_frame_alignment, engine, parser + + def check_basic_frame_series_alignment(self, engine, parser): + skip_if_no_ne(engine) + def testit(r_idx_type, c_idx_type, index_name): + df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type, + c_idx_type=c_idx_type) + index = getattr(df, index_name) + s = Series(np.random.randn(5), index[:5]) + + res = pd.eval('df + s', engine=engine, parser=parser) + if r_idx_type == 'dt' or c_idx_type == 'dt': + if engine == 'numexpr': + expected = df.add(s) + else: + expected = df + s + else: + expected = df + s + assert_frame_equal(res, expected) + + args = product(self.index_types, self.index_types, ('index', + 'columns')) + for r_idx_type, c_idx_type, index_name in args: + testit(r_idx_type, c_idx_type, index_name) + + @slow + def test_basic_frame_series_alignment(self): + for engine, parser in ENGINES_PARSERS: + yield self.check_basic_frame_series_alignment, engine, parser + + def check_basic_series_frame_alignment(self, engine, parser): + skip_if_no_ne(engine) + def testit(r_idx_type, c_idx_type, index_name): + df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type, + c_idx_type=c_idx_type) + index = getattr(df, index_name) + s = Series(np.random.randn(5), index[:5]) + + res = pd.eval('s + df', engine=engine, parser=parser) + if r_idx_type == 'dt' or c_idx_type == 'dt': + if engine == 'numexpr': + expected = df.add(s) + else: + expected = s + df + else: + expected = s + df + assert_frame_equal(res, expected) + + args = product(self.index_types, self.index_types, ('index', + 'columns')) + for r_idx_type, c_idx_type, index_name in args: + testit(r_idx_type, c_idx_type, index_name) + + @slow + def test_basic_series_frame_alignment(self): + for engine, parser in ENGINES_PARSERS: + yield self.check_basic_series_frame_alignment, engine, parser + + def check_series_frame_commutativity(self, engine, parser): + skip_if_no_ne(engine) + args = product(self.index_types, self.index_types, ('+', '*'), + ('index', 'columns')) + for r_idx_type, c_idx_type, op, index_name in args: + df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type, + c_idx_type=c_idx_type) + index = getattr(df, index_name) + s = Series(np.random.randn(5), index[:5]) + + lhs = 's {0} df'.format(op) + rhs = 'df {0} s'.format(op) + a = pd.eval(lhs, engine=engine, parser=parser) + b = pd.eval(rhs, engine=engine, parser=parser) + + if r_idx_type != 'dt' and c_idx_type != 'dt': + if engine == 'numexpr': + assert_frame_equal(a, b) + + @slow + def test_series_frame_commutativity(self): + for engine, parser in ENGINES_PARSERS: + yield self.check_series_frame_commutativity, engine, parser + + def check_complex_series_frame_alignment(self, engine, parser): + skip_if_no_ne(engine) + index_types = [self.index_types] * 4 + args = product(('index', 'columns'), ('df', 'df2'), *index_types) + for index_name, obj, r1, r2, c1, c2 in args: + df = mkdf(10, 5, data_gen_f=f, r_idx_type=r1, c_idx_type=c1) + df2 = mkdf(20, 5, data_gen_f=f, r_idx_type=r2, c_idx_type=c2) + index = getattr(locals()[obj], index_name) + s = Series(np.random.randn(5), index[:5]) + + if r2 == 'dt' or c2 == 'dt': + if engine == 'numexpr': + expected2 = df2.add(s) + else: + expected2 = df2 + s + else: + expected2 = df2 + s + + if r1 == 'dt' or c1 == 'dt': + if engine == 'numexpr': + expected = expected2.add(df) + else: + expected = expected2 + df + else: + expected = expected2 + df + + res = pd.eval('df2 + s + df', engine=engine, parser=parser) + assert_equal(res.shape, expected.shape) + assert_frame_equal(res, expected) + + @slow + def test_complex_series_frame_alignment(self): + for engine, parser in ENGINES_PARSERS: + yield self.check_complex_series_frame_alignment, engine, parser + + def check_performance_warning_for_poor_alignment(self, engine, parser): + skip_if_no_ne(engine) + df = DataFrame(randn(1000, 10)) + s = Series(randn(10000)) + if engine == 'numexpr': + seen = pd.io.common.PerformanceWarning + else: + seen = False + + with assert_produces_warning(seen): + pd.eval('df + s', engine=engine, parser=parser) + + s = Series(randn(1000)) + with assert_produces_warning(False): + pd.eval('df + s', engine=engine, parser=parser) + + df = DataFrame(randn(10, 10000)) + s = Series(randn(10000)) + with assert_produces_warning(False): + pd.eval('df + s', engine=engine, parser=parser) + + df = DataFrame(randn(10, 10)) + s = Series(randn(10000)) + + is_python_engine = engine == 'python' + + if not is_python_engine: + wrn = pd.io.common.PerformanceWarning + else: + wrn = False + + with assert_produces_warning(wrn) as w: + pd.eval('df + s', engine=engine, parser=parser) + + if not is_python_engine: + assert_equal(len(w), 1) + msg = str(w[0].message) + expected = ("Alignment difference on axis {0} is larger" + " than an order of magnitude on term {1!r}, " + "by more than {2:.4g}; performance may suffer" + "".format(1, 's', np.log10(s.size - df.shape[1]))) + assert_equal(msg, expected) + + + def test_performance_warning_for_poor_alignment(self): + for engine, parser in ENGINES_PARSERS: + yield self.check_performance_warning_for_poor_alignment, engine, parser + + +#------------------------------------ +# slightly more complex ops + +class TestOperationsNumExprPandas(unittest.TestCase): + @classmethod + def setUpClass(cls): + skip_if_no_ne() + cls.engine = 'numexpr' + cls.parser = 'pandas' + cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms + + @classmethod + def tearDownClass(cls): + del cls.engine, cls.parser + + def eval(self, *args, **kwargs): + kwargs['engine'] = self.engine + kwargs['parser'] = self.parser + return pd.eval(*args, **kwargs) + + def test_simple_arith_ops(self): + ops = self.arith_ops + + for op in filter(lambda x: x != '//', ops): + ex = '1 {0} 1'.format(op) + ex2 = 'x {0} 1'.format(op) + ex3 = '1 {0} (x + 1)'.format(op) + + if op in ('in', 'not in'): + self.assertRaises(TypeError, pd.eval, ex, + engine=self.engine, parser=self.parser) + else: + expec = _eval_single_bin(1, op, 1, self.engine) + x = self.eval(ex, engine=self.engine, parser=self.parser) + assert_equal(x, expec) + + expec = _eval_single_bin(x, op, 1, self.engine) + y = self.eval(ex2, local_dict={'x': x}, engine=self.engine, + parser=self.parser) + assert_equal(y, expec) + + expec = _eval_single_bin(1, op, x + 1, self.engine) + y = self.eval(ex3, local_dict={'x': x}, + engine=self.engine, parser=self.parser) + assert_equal(y, expec) + + def test_simple_bool_ops(self): + for op, lhs, rhs in product(expr._bool_ops_syms, (True, False), + (True, False)): + ex = '{0} {1} {2}'.format(lhs, op, rhs) + res = self.eval(ex) + exp = eval(ex) + self.assertEqual(res, exp) + + def test_bool_ops_with_constants(self): + for op, lhs, rhs in product(expr._bool_ops_syms, ('True', 'False'), + ('True', 'False')): + ex = '{0} {1} {2}'.format(lhs, op, rhs) + res = self.eval(ex) + exp = eval(ex) + self.assertEqual(res, exp) + + def test_panel_fails(self): + x = Panel(randn(3, 4, 5)) + y = Series(randn(10)) + assert_raises(NotImplementedError, self.eval, 'x + y', + local_dict={'x': x, 'y': y}) + + def test_4d_ndarray_fails(self): + x = randn(3, 4, 5, 6) + y = Series(randn(10)) + assert_raises(NotImplementedError, self.eval, 'x + y', + local_dict={'x': x, 'y': y}) + + def test_constant(self): + x = self.eval('1') + assert_equal(x, 1) + + def test_single_variable(self): + df = DataFrame(randn(10, 2)) + df2 = self.eval('df', local_dict={'df': df}) + assert_frame_equal(df, df2) + + def test_truediv(self): + s = np.array([1]) + ex = 's / 1' + d = {'s': s} + + if PY3: + res = self.eval(ex, truediv=False, local_dict=d) + assert_array_equal(res, np.array([1.0])) + + res = self.eval(ex, truediv=True, local_dict=d) + assert_array_equal(res, np.array([1.0])) + + res = self.eval('1 / 2', truediv=True) + expec = 0.5 + self.assertEqual(res, expec) + + res = self.eval('1 / 2', truediv=False) + expec = 0.5 + self.assertEqual(res, expec) + + res = self.eval('s / 2', truediv=False, local_dict={'s': s}) + expec = 0.5 + self.assertEqual(res, expec) + + res = self.eval('s / 2', truediv=True, local_dict={'s': s}) + expec = 0.5 + self.assertEqual(res, expec) + else: + res = self.eval(ex, truediv=False, local_dict=d) + assert_array_equal(res, np.array([1])) + + res = self.eval(ex, truediv=True, local_dict=d) + assert_array_equal(res, np.array([1.0])) + + res = self.eval('1 / 2', truediv=True) + expec = 0.5 + self.assertEqual(res, expec) + + res = self.eval('1 / 2', truediv=False) + expec = 0 + self.assertEqual(res, expec) + + res = self.eval('s / 2', truediv=False, local_dict={'s': s}) + expec = 0 + self.assertEqual(res, expec) + + res = self.eval('s / 2', truediv=True, local_dict={'s': s}) + expec = 0.5 + self.assertEqual(res, expec) + + def test_failing_subscript_with_name_error(self): + df = DataFrame(np.random.randn(5, 3)) + self.assertRaises(NameError, self.eval, 'df[x > 2] > 2', + local_dict={'df': df}) + + def test_lhs_expression_subscript(self): + df = DataFrame(np.random.randn(5, 3)) + result = self.eval('(df + 1)[df > 2]', local_dict={'df': df}) + expected = (df + 1)[df > 2] + assert_frame_equal(result, expected) + + def test_attr_expression(self): + df = DataFrame(np.random.randn(5, 3), columns=list('abc')) + expr1 = 'df.a < df.b' + expec1 = df.a < df.b + expr2 = 'df.a + df.b + df.c' + expec2 = df.a + df.b + df.c + expr3 = 'df.a + df.b + df.c[df.b < 0]' + expec3 = df.a + df.b + df.c[df.b < 0] + exprs = expr1, expr2, expr3 + expecs = expec1, expec2, expec3 + for e, expec in zip(exprs, expecs): + assert_series_equal(expec, self.eval(e, local_dict={'df': df})) + + def test_assignment_fails(self): + df = DataFrame(np.random.randn(5, 3), columns=list('abc')) + df2 = DataFrame(np.random.randn(5, 3)) + expr1 = 'df = df2' + self.assertRaises(NotImplementedError, self.eval, expr1, + local_dict={'df': df, 'df2': df2}) + + def test_basic_period_index_boolean_expression(self): + df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i') + + e = df < 2 + r = self.eval('df < 2', local_dict={'df': df}) + x = df < 2 + + assert_frame_equal(r, e) + assert_frame_equal(x, e) + + def test_basic_period_index_subscript_expression(self): + df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i') + r = self.eval('df[df < 2 + 3]', local_dict={'df': df}) + e = df[df < 2 + 3] + assert_frame_equal(r, e) + + def test_nested_period_index_subscript_expression(self): + df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i') + r = self.eval('df[df[df < 2] < 2] + df * 2', local_dict={'df': df}) + e = df[df[df < 2] < 2] + df * 2 + assert_frame_equal(r, e) + + def test_date_boolean(self): + df = DataFrame(randn(5, 3)) + df['dates1'] = date_range('1/1/2012', periods=5) + res = self.eval('df.dates1 < 20130101', local_dict={'df': df}, + engine=self.engine, parser=self.parser) + expec = df.dates1 < '20130101' + assert_series_equal(res, expec) + + def test_simple_in_ops(self): + if self.parser != 'python': + res = pd.eval('1 in [1, 2]', engine=self.engine, + parser=self.parser) + self.assertTrue(res) + + res = pd.eval('2 in (1, 2)', engine=self.engine, + parser=self.parser) + self.assertTrue(res) + + res = pd.eval('3 in (1, 2)', engine=self.engine, + parser=self.parser) + self.assertFalse(res) + + res = pd.eval('3 not in (1, 2)', engine=self.engine, + parser=self.parser) + self.assertTrue(res) + + res = pd.eval('[3] not in (1, 2)', engine=self.engine, + parser=self.parser) + self.assertTrue(res) + + res = pd.eval('[3] in ([3], 2)', engine=self.engine, + parser=self.parser) + self.assertTrue(res) + + res = pd.eval('[[3]] in [[[3]], 2]', engine=self.engine, + parser=self.parser) + self.assertTrue(res) + + res = pd.eval('(3,) in [(3,), 2]', engine=self.engine, + parser=self.parser) + self.assertTrue(res) + + res = pd.eval('(3,) not in [(3,), 2]', engine=self.engine, + parser=self.parser) + self.assertFalse(res) + + res = pd.eval('[(3,)] in [[(3,)], 2]', engine=self.engine, + parser=self.parser) + self.assertTrue(res) + else: + with tm.assertRaises(NotImplementedError): + pd.eval('1 in [1, 2]', engine=self.engine, parser=self.parser) + with tm.assertRaises(NotImplementedError): + pd.eval('2 in (1, 2)', engine=self.engine, parser=self.parser) + with tm.assertRaises(NotImplementedError): + pd.eval('3 in (1, 2)', engine=self.engine, parser=self.parser) + with tm.assertRaises(NotImplementedError): + pd.eval('3 not in (1, 2)', engine=self.engine, + parser=self.parser) + with tm.assertRaises(NotImplementedError): + pd.eval('[(3,)] in (1, 2, [(3,)])', engine=self.engine, + parser=self.parser) + with tm.assertRaises(NotImplementedError): + pd.eval('[3] not in (1, 2, [[3]])', engine=self.engine, + parser=self.parser) + + +class TestOperationsNumExprPython(TestOperationsNumExprPandas): + @classmethod + def setUpClass(cls): + if not _USE_NUMEXPR: + raise nose.SkipTest("numexpr engine not installed") + cls.engine = 'numexpr' + cls.parser = 'python' + cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms + cls.arith_ops = filter(lambda x: x not in ('in', 'not in'), + cls.arith_ops) + + def test_fails_and(self): + df = DataFrame(np.random.randn(5, 3)) + self.assertRaises(NotImplementedError, pd.eval, 'df > 2 and df > 3', + local_dict={'df': df}, parser=self.parser, + engine=self.engine) + + def test_fails_or(self): + df = DataFrame(np.random.randn(5, 3)) + self.assertRaises(NotImplementedError, pd.eval, 'df > 2 or df > 3', + local_dict={'df': df}, parser=self.parser, + engine=self.engine) + + def test_fails_not(self): + df = DataFrame(np.random.randn(5, 3)) + self.assertRaises(NotImplementedError, pd.eval, 'not df > 2', + local_dict={'df': df}, parser=self.parser, + engine=self.engine) + + def test_fails_ampersand(self): + df = DataFrame(np.random.randn(5, 3)) + ex = '(df + 2)[df > 1] > 0 & (df > 0)' + with tm.assertRaises(NotImplementedError): + pd.eval(ex, parser=self.parser, engine=self.engine) + + def test_fails_pipe(self): + df = DataFrame(np.random.randn(5, 3)) + ex = '(df + 2)[df > 1] > 0 | (df > 0)' + with tm.assertRaises(NotImplementedError): + pd.eval(ex, parser=self.parser, engine=self.engine) + + def test_bool_ops_with_constants(self): + for op, lhs, rhs in product(expr._bool_ops_syms, ('True', 'False'), + ('True', 'False')): + ex = '{0} {1} {2}'.format(lhs, op, rhs) + if op in ('and', 'or'): + with tm.assertRaises(NotImplementedError): + self.eval(ex) + else: + res = self.eval(ex) + exp = eval(ex) + self.assertEqual(res, exp) + + def test_simple_bool_ops(self): + for op, lhs, rhs in product(expr._bool_ops_syms, (True, False), + (True, False)): + ex = 'lhs {0} rhs'.format(op) + if op in ('and', 'or'): + with tm.assertRaises(NotImplementedError): + pd.eval(ex, engine=self.engine, parser=self.parser) + else: + res = pd.eval(ex, engine=self.engine, parser=self.parser) + exp = eval(ex) + self.assertEqual(res, exp) + + +class TestOperationsPythonPython(TestOperationsNumExprPython): + @classmethod + def setUpClass(cls): + cls.engine = cls.parser = 'python' + cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms + cls.arith_ops = filter(lambda x: x not in ('in', 'not in'), + cls.arith_ops) + + +class TestOperationsPythonPandas(TestOperationsNumExprPandas): + @classmethod + def setUpClass(cls): + cls.engine = 'python' + cls.parser = 'pandas' + cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms + + +_var_s = randn(10) + + +class TestScope(object): + def check_global_scope(self, e, engine, parser): + skip_if_no_ne(engine) + assert_array_equal(_var_s * 2, pd.eval(e, engine=engine, + parser=parser)) + + def test_global_scope(self): + e = '_var_s * 2' + for engine, parser in product(_engines, expr._parsers): + yield self.check_global_scope, e, engine, parser + + def check_no_new_locals(self, engine, parser): + skip_if_no_ne(engine) + x = 1 + lcls = locals().copy() + pd.eval('x + 1', local_dict=lcls, engine=engine, parser=parser) + lcls2 = locals().copy() + lcls2.pop('lcls') + assert_equal(lcls, lcls2) + + def test_no_new_locals(self): + for engine, parser in product(_engines, expr._parsers): + yield self.check_no_new_locals, engine, parser + + def check_no_new_globals(self, engine, parser): + skip_if_no_ne(engine) + x = 1 + gbls = globals().copy() + pd.eval('x + 1', engine=engine, parser=parser) + gbls2 = globals().copy() + assert_equal(gbls, gbls2) + + def test_no_new_globals(self): + for engine, parser in product(_engines, expr._parsers): + yield self.check_no_new_globals, engine, parser + + +def test_invalid_engine(): + skip_if_no_ne() + assertRaisesRegexp(KeyError, 'Invalid engine \'asdf\' passed', + pd.eval, 'x + y', local_dict={'x': 1, 'y': 2}, + engine='asdf') + + +def test_invalid_parser(): + skip_if_no_ne() + assertRaisesRegexp(KeyError, 'Invalid parser \'asdf\' passed', + pd.eval, 'x + y', local_dict={'x': 1, 'y': 2}, + parser='asdf') + + +def check_is_expr_syntax(engine): + skip_if_no_ne(engine) + s = 1 + valid1 = 's + 1' + valid2 = '__y + _xx' + assert_true(expr.isexpr(valid1, check_names=False)) + assert_true(expr.isexpr(valid2, check_names=False)) + + +def check_is_expr_names(engine): + skip_if_no_ne(engine) + r, s = 1, 2 + valid = 's + r' + invalid = '__y + __x' + assert_true(expr.isexpr(valid, check_names=True)) + assert_false(expr.isexpr(invalid, check_names=True)) + + +def test_is_expr_syntax(): + for engine in _engines: + yield check_is_expr_syntax, engine + + +def test_is_expr_names(): + for engine in _engines: + yield check_is_expr_names, engine + + +_parsers = {'python': PythonExprVisitor, 'pytables': pytables.ExprVisitor, + 'pandas': PandasExprVisitor} + +def check_disallowed_nodes(engine, parser): + skip_if_no_ne(engine) + VisitorClass = _parsers[parser] + uns_ops = VisitorClass.unsupported_nodes + inst = VisitorClass('x + 1', engine, parser) + + for ops in uns_ops: + assert_raises(NotImplementedError, getattr(inst, ops)) + + +def test_disallowed_nodes(): + for engine, visitor in product(_parsers, repeat=2): + yield check_disallowed_nodes, engine, visitor + + +def check_syntax_error_exprs(engine, parser): + skip_if_no_ne(engine) + e = 's +' + assert_raises(SyntaxError, pd.eval, e, engine=engine, parser=parser) + + +def test_syntax_error_exprs(): + for engine, parser in ENGINES_PARSERS: + yield check_syntax_error_exprs, engine, parser + + +def check_name_error_exprs(engine, parser): + skip_if_no_ne(engine) + e = 's + t' + assert_raises(NameError, pd.eval, e, engine=engine, parser=parser) + + +def test_name_error_exprs(): + for engine, parser in ENGINES_PARSERS: + yield check_name_error_exprs, engine, parser + + +if __name__ == '__main__': + nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], + exit=False) diff --git a/pandas/core/base.py b/pandas/core/base.py index a2f7f04053b9f..fb0d56113ede9 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -48,6 +48,7 @@ def __repr__(self): """ return str(self) + class PandasObject(StringMixin): """baseclass for various pandas objects""" diff --git a/pandas/core/common.py b/pandas/core/common.py index 34aaa08b57171..d3fa10abc7681 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -3,17 +3,25 @@ """ import re +import collections +import numbers import codecs import csv import sys +from datetime import timedelta + +from distutils.version import LooseVersion + from numpy.lib.format import read_array, write_array import numpy as np + import pandas.algos as algos import pandas.lib as lib import pandas.tslib as tslib from pandas import compat -from pandas.compat import StringIO, BytesIO, range, long, u, zip, map +from pandas.compat import (StringIO, BytesIO, range, long, u, zip, map, + string_types) from datetime import timedelta from pandas.core.config import get_option @@ -27,14 +35,18 @@ class AmbiguousIndexError(PandasError, KeyError): pass _POSSIBLY_CAST_DTYPES = set([np.dtype(t) - for t in ['M8[ns]', 'm8[ns]', 'O', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']]) + for t in ['M8[ns]', 'm8[ns]', 'O', 'int8', + 'uint8', 'int16', 'uint16', 'int32', + 'uint32', 'int64', 'uint64']]) _NS_DTYPE = np.dtype('M8[ns]') _TD_DTYPE = np.dtype('m8[ns]') _INT64_DTYPE = np.dtype(np.int64) _DATELIKE_DTYPES = set([np.dtype(t) for t in ['M8[ns]', 'm8[ns]']]) -# define abstract base classes to enable isinstance type checking on our objects + +# define abstract base classes to enable isinstance type checking on our +# objects def create_pandas_abc_type(name, attr, comp): @classmethod def _check(cls, inst): @@ -44,15 +56,22 @@ def _check(cls, inst): meta = type("ABCBase", (type,), dct) return meta(name, tuple(), dct) + ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series",)) ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel",)) -ABCSparseSeries = create_pandas_abc_type("ABCSparseSeries", "_subtyp", ('sparse_series', 'sparse_time_series')) -ABCSparseArray = create_pandas_abc_type("ABCSparseArray", "_subtyp", ('sparse_array', 'sparse_series')) +ABCSparseSeries = create_pandas_abc_type("ABCSparseSeries", "_subtyp", + ('sparse_series', + 'sparse_time_series')) +ABCSparseArray = create_pandas_abc_type("ABCSparseArray", "_subtyp", + ('sparse_array', 'sparse_series')) + class _ABCGeneric(type): def __instancecheck__(cls, inst): return hasattr(inst, "_data") + + ABCGeneric = _ABCGeneric("ABCGeneric", tuple(), {}) def isnull(obj): @@ -223,6 +242,35 @@ def notnull(obj): return -res +def _iterable_not_string(x): + return (isinstance(x, collections.Iterable) and + not isinstance(x, compat.string_types)) + + +def flatten(l): + """Flatten an arbitrarily nested sequence. + + Parameters + ---------- + l : sequence + The non string sequence to flatten + + Notes + ----- + This doesn't consider strings sequences. + + Returns + ------- + flattened : generator + """ + for el in l: + if _iterable_not_string(el): + for s in flatten(el): + yield s + else: + yield el + + def mask_missing(arr, values_to_mask): """ Return a masking array of same size/shape as arr @@ -1657,7 +1705,7 @@ def is_bool(obj): def is_integer(obj): - return isinstance(obj, (int, long, np.integer)) + return isinstance(obj, (numbers.Integral, np.integer)) def is_float(obj): @@ -1665,7 +1713,7 @@ def is_float(obj): def is_complex(obj): - return isinstance(obj, (complex, np.complexfloating)) + return isinstance(obj, (numbers.Complex, np.complexfloating)) def is_iterator(obj): @@ -1674,7 +1722,7 @@ def is_iterator(obj): def is_number(obj): - return isinstance(obj, (np.number, int, long, float, complex)) + return isinstance(obj, (numbers.Number, np.number)) def is_integer_dtype(arr_or_dtype): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index f56b6bc00cf15..86565a3a1d9e5 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -28,15 +28,16 @@ _coerce_to_dtypes, _DATELIKE_DTYPES, is_list_like) from pandas.core.generic import NDFrame from pandas.core.index import Index, MultiIndex, _ensure_index -from pandas.core.indexing import (_NDFrameIndexer, _maybe_droplevels, - _convert_to_index_sliceable, _check_bool_indexer, - _maybe_convert_indices) +from pandas.core.indexing import (_maybe_droplevels, + _convert_to_index_sliceable, + _check_bool_indexer, _maybe_convert_indices) from pandas.core.internals import (BlockManager, create_block_manager_from_arrays, create_block_manager_from_blocks) from pandas.core.series import Series, _radd_compat -import pandas.core.expressions as expressions -from pandas.sparse.array import SparseArray +import pandas.computation.expressions as expressions +from pandas.computation.eval import eval as _eval +from pandas.computation.expr import _ensure_scope from pandas.compat.scipy import scoreatpercentile as _quantile from pandas.compat import(range, zip, lrange, lmap, lzip, StringIO, u, OrderedDict, raise_with_traceback) @@ -51,14 +52,12 @@ import pandas.core.datetools as datetools import pandas.core.common as com import pandas.core.format as fmt -import pandas.core.generic as generic import pandas.core.nanops as nanops import pandas.lib as lib -import pandas.tslib as tslib import pandas.algos as _algos -from pandas.core.config import get_option, set_option +from pandas.core.config import get_option #---------------------------------------------------------------------- # Docstring templates @@ -1898,6 +1897,155 @@ def _getitem_frame(self, key): raise ValueError('Must pass DataFrame with boolean values only') return self.where(key) + def _get_index_resolvers(self, axis): + # index or columns + axis_index = getattr(self, axis) + d = dict() + + for i, name in enumerate(axis_index.names): + if name is not None: + key = level = name + else: + # prefix with 'i' or 'c' depending on the input axis + # e.g., you must do ilevel_0 for the 0th level of an unnamed + # multiiindex + level_string = '{prefix}level_{i}'.format(prefix=axis[0], i=i) + key = level_string + level = i + + d[key] = Series(axis_index.get_level_values(level).values, + index=axis_index, name=level) + + # put the index/columns itself in the dict + d[axis] = axis_index + return d + + def query(self, expr, **kwargs): + """Query the columns of a frame with a boolean expression. + + Parameters + ---------- + expr : string + The query string to evaluate. The result of the evaluation of this + expression is first passed to :attr:`~pandas.DataFrame.loc` and if + that fails because of a multidimensional key (e.g., a DataFrame) + then the result will be passed to + :meth:`~pandas.DataFrame.__getitem__`. + kwargs : dict + See the documentation for :func:`~pandas.eval` for complete details + on the keyword arguments accepted by + :meth:`~pandas.DataFrame.query`. + + Returns + ------- + q : DataFrame or Series + + Notes + ----- + This method uses the top-level :func:`~pandas.eval` function to + evaluate the passed query. + + The :meth:`~pandas.DataFrame.query` method uses a slightly + modified Python syntax by default. For example, the ``&`` and ``|`` + (bitwise) operators have the precedence of their boolean cousins, + :keyword:`and` and :keyword:`or`. This *is* syntactically valid Python, + however the semantics are different. + + You can change the semantics of the expression by passing the keyword + argument ``parser='python'``. This enforces the same semantics as + evaluation in Python space. Likewise, you can pass ``engine='python'`` + to evaluate an expression using Python itself as a backend. This is not + recommended as it is inefficient compared to using ``numexpr`` as the + engine. + + The :attr:`~pandas.DataFrame.index` and + :attr:`~pandas.DataFrame.columns` attributes of the + :class:`~pandas.DataFrame` instance is placed in the namespace by + default, which allows you to treat both the index and columns of the + frame as a column in the frame. + The identifier ``index`` is used for this variable, and you can also + use the name of the index to identify it in a query. + + For further details and examples see the ``query`` documentation in + :ref:`indexing <indexing.query>`. + + See Also + -------- + pandas.eval + DataFrame.eval + + Examples + -------- + >>> from numpy.random import randn + >>> from pandas import DataFrame + >>> df = DataFrame(randn(10, 2), columns=list('ab')) + >>> df.query('a > b') + >>> df[df.a > df.b] # same result as the previous expression + """ + # need to go up at least 4 stack frames + # 4 expr.Scope + # 3 expr._ensure_scope + # 2 self.eval + # 1 self.query + # 0 self.query caller (implicit) + level = kwargs.setdefault('level', 4) + if level < 4: + raise ValueError("Going up fewer than 4 stack frames will not" + " capture the necessary variable scope for a " + "query expression") + + res = self.eval(expr, **kwargs) + + try: + return self.loc[res] + except ValueError: + # when res is multi-dimensional loc raises, but this is sometimes a + # valid query + return self[res] + + def eval(self, expr, **kwargs): + """Evaluate an expression in the context of the calling DataFrame + instance. + + Parameters + ---------- + expr : string + The expression string to evaluate. + kwargs : dict + See the documentation for :func:`~pandas.eval` for complete details + on the keyword arguments accepted by + :meth:`~pandas.DataFrame.query`. + + Returns + ------- + ret : ndarray, scalar, or pandas object + + See Also + -------- + pandas.DataFrame.query + pandas.eval + + Notes + ----- + For more details see the API documentation for :func:`~pandas.eval`. + For detailed examples see :ref:`enhancing performance with eval + <enhancingperf.eval>`. + + Examples + -------- + >>> from numpy.random import randn + >>> from pandas import DataFrame + >>> df = DataFrame(randn(10, 2), columns=list('ab')) + >>> df.eval('a + b') + """ + resolvers = kwargs.pop('resolvers', None) + if resolvers is None: + index_resolvers = self._get_index_resolvers('index') + index_resolvers.update(self._get_index_resolvers('columns')) + resolvers = [self, index_resolvers] + kwargs['local_dict'] = _ensure_scope(resolvers=resolvers, **kwargs) + return _eval(expr, **kwargs) + def _slice(self, slobj, axis=0, raise_on_error=False): axis = self._get_block_manager_axis(axis) new_data = self._data.get_slice( @@ -4599,6 +4747,7 @@ def combineMult(self, other): DataFrame._setup_axes( ['index', 'columns'], info_axis=1, stat_axis=0, axes_are_reversed=True) + _EMPTY_SERIES = Series([]) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index c265d1590af95..11ce27b078b18 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -18,8 +18,7 @@ from pandas.sparse.array import _maybe_to_sparse, SparseArray import pandas.lib as lib import pandas.tslib as tslib -import pandas.core.expressions as expressions -from pandas.util.decorators import cache_readonly +import pandas.computation.expressions as expressions from pandas.tslib import Timestamp from pandas import compat diff --git a/pandas/core/series.py b/pandas/core/series.py index 893483f0f2636..beb398dfe6fd0 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -8,7 +8,6 @@ import operator from distutils.version import LooseVersion import types -import warnings from numpy import nan, ndarray import numpy as np @@ -18,8 +17,10 @@ _default_index, _maybe_promote, _maybe_upcast, _asarray_tuplesafe, is_integer_dtype, _NS_DTYPE, _TD_DTYPE, - _infer_dtype_from_scalar, is_list_like, _values_from_object, - _possibly_cast_to_datetime, _possibly_castable, _possibly_convert_platform, + _infer_dtype_from_scalar, is_list_like, + _values_from_object, + _possibly_cast_to_datetime, _possibly_castable, + _possibly_convert_platform, ABCSparseArray) from pandas.core.index import (Index, MultiIndex, InvalidIndexError, _ensure_index, _handle_legacy_indexes) @@ -29,7 +30,6 @@ from pandas.core import generic from pandas.core.internals import SingleBlockManager from pandas.core.categorical import Categorical -import pandas.core.expressions as expressions from pandas.tseries.index import DatetimeIndex from pandas.tseries.period import PeriodIndex, Period from pandas.tseries.offsets import DateOffset @@ -775,12 +775,9 @@ def put(self, *args, **kwargs): def __len__(self): return len(self._data) - @property - def size(self): - return self.__len__() - def view(self, dtype=None): - return self._constructor(self.values.view(dtype), index=self.index, name=self.name) + return self._constructor(self.values.view(dtype), index=self.index, + name=self.name) def __array__(self, result=None): """ the array interface, return my values """ @@ -790,7 +787,8 @@ def __array_wrap__(self, result): """ Gets called prior to a ufunc (and after) """ - return self._constructor(result, index=self.index, name=self.name, copy=False) + return self._constructor(result, index=self.index, name=self.name, + copy=False) def __contains__(self, key): return key in self.index diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index c8224f761ce17..b79408a1bf8d2 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2,12 +2,9 @@ High level interface to PyTables for reading and writing pandas data structures to disk """ -from __future__ import print_function # pylint: disable-msg=E1101,W0613,W0603 from datetime import datetime, date -from pandas.compat import map, range, zip, lrange, lmap, u -from pandas import compat import time import re import copy @@ -15,14 +12,13 @@ import warnings import numpy as np -import pandas from pandas import (Series, TimeSeries, DataFrame, Panel, Panel4D, Index, MultiIndex, Int64Index, Timestamp, _np_version_under1p7) from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel from pandas.sparse.array import BlockIndex, IntIndex from pandas.tseries.api import PeriodIndex, DatetimeIndex from pandas.core.base import StringMixin -from pandas.core.common import adjoin, is_list_like, pprint_thing +from pandas.core.common import adjoin, pprint_thing from pandas.core.algorithms import match, unique from pandas.core.categorical import Categorical from pandas.core.common import _asarray_tuplesafe @@ -33,8 +29,10 @@ import pandas.core.common as com from pandas.tools.merge import concat from pandas import compat +from pandas.compat import u, PY3, range, lrange from pandas.io.common import PerformanceWarning from pandas.core.config import get_option +from pandas.computation.pytables import Expr, maybe_expression import pandas.lib as lib import pandas.algos as algos @@ -59,11 +57,31 @@ def _ensure_decoded(s): def _ensure_encoding(encoding): # set the encoding if we need if encoding is None: - if compat.PY3: + if PY3: encoding = _default_encoding return encoding +Term = Expr + + +def _ensure_term(where): + """ + ensure that the where is a Term or a list of Term + this makes sure that we are capturing the scope of variables + that are passed + create the terms here with a frame_level=2 (we are 2 levels down) + """ + + # only consider list/tuple here as an ndarray is automaticaly a coordinate list + if isinstance(where, (list,tuple)): + where = [w if not maybe_expression(w) else Term(w, scope_level=2) + for w in where if w is not None ] + elif maybe_expression(where): + where = Term(where, scope_level=2) + return where + + class PossibleDataLossError(Exception): pass @@ -222,9 +240,12 @@ def get_store(path, **kwargs): Examples -------- + >>> from pandas import DataFrame + >>> from numpy.random import randn + >>> bar = DataFrame(randn(10, 4)) >>> with get_store('test.h5') as store: - >>> store['foo'] = bar # write to HDF5 - >>> bar = store['foo'] # retrieve + ... store['foo'] = bar # write to HDF5 + ... bar = store['foo'] # retrieve """ store = None try: @@ -237,7 +258,8 @@ def get_store(path, **kwargs): # interface to/from ### -def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, append=None, **kwargs): +def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, + append=None, **kwargs): """ store this object, close it if we opened it """ if append: f = lambda store: store.append(key, value, **kwargs) @@ -245,7 +267,8 @@ def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None, app f = lambda store: store.put(key, value, **kwargs) if isinstance(path_or_buf, compat.string_types): - with get_store(path_or_buf, mode=mode, complevel=complevel, complib=complib) as store: + with get_store(path_or_buf, mode=mode, complevel=complevel, + complib=complib) as store: f(store) else: f(path_or_buf) @@ -332,6 +355,9 @@ class HDFStore(StringMixin): Examples -------- + >>> from pandas import DataFrame + >>> from numpy.random import randn + >>> bar = DataFrame(randn(10, 4)) >>> store = HDFStore('test.h5') >>> store['foo'] = bar # write to HDF5 >>> bar = store['foo'] # retrieve @@ -341,9 +367,9 @@ class HDFStore(StringMixin): def __init__(self, path, mode=None, complevel=None, complib=None, fletcher32=False, **kwargs): try: - import tables as _ + import tables except ImportError: # pragma: no cover - raise Exception('HDFStore requires PyTables') + raise ImportError('HDFStore requires PyTables') self._path = path if mode is None: @@ -477,7 +503,7 @@ def open(self, mode='a'): self._handle = h5_open(self._path, self._mode) except IOError as e: # pragma: no cover if 'can not be written' in str(e): - print('Opening %s in read-only mode' % self._path) + print ('Opening %s in read-only mode' % self._path) self._handle = h5_open(self._path, 'r') else: raise @@ -523,7 +549,8 @@ def get(self, key): raise KeyError('No object named %s in the file' % key) return self._read_group(group) - def select(self, key, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None, auto_close=False, **kwargs): + def select(self, key, where=None, start=None, stop=None, columns=None, + iterator=False, chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas object stored in file, optionally based on where criteria @@ -549,22 +576,28 @@ def select(self, key, where=None, start=None, stop=None, columns=None, iterator= raise KeyError('No object named %s in the file' % key) # create the storer and axes + where = _ensure_term(where) s = self._create_storer(group) s.infer_axes() # what we are actually going to do for a chunk def func(_start, _stop): - return s.read(where=where, start=_start, stop=_stop, columns=columns, **kwargs) + return s.read(where=where, start=_start, stop=_stop, + columns=columns, **kwargs) if iterator or chunksize is not None: if not s.is_table: raise TypeError( "can only use an iterator or chunksize on a table") - return TableIterator(self, func, nrows=s.nrows, start=start, stop=stop, chunksize=chunksize, auto_close=auto_close) + return TableIterator(self, func, nrows=s.nrows, start=start, + stop=stop, chunksize=chunksize, + auto_close=auto_close) - return TableIterator(self, func, nrows=s.nrows, start=start, stop=stop, auto_close=auto_close).get_values() + return TableIterator(self, func, nrows=s.nrows, start=start, stop=stop, + auto_close=auto_close).get_values() - def select_as_coordinates(self, key, where=None, start=None, stop=None, **kwargs): + def select_as_coordinates( + self, key, where=None, start=None, stop=None, **kwargs): """ return the selection as an Index @@ -575,6 +608,7 @@ def select_as_coordinates(self, key, where=None, start=None, stop=None, **kwargs start : integer (defaults to None), row number to start selection stop : integer (defaults to None), row number to stop selection """ + where = _ensure_term(where) return self.get_storer(key).read_coordinates(where=where, start=start, stop=stop, **kwargs) def unique(self, key, column, **kwargs): @@ -599,7 +633,9 @@ def select_column(self, key, column, **kwargs): """ return self.get_storer(key).read_column(column=column, **kwargs) - def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, auto_close=False, **kwargs): + def select_as_multiple(self, keys, where=None, selector=None, columns=None, + start=None, stop=None, iterator=False, + chunksize=None, auto_close=False, **kwargs): """ Retrieve pandas objects from multiple tables Parameters @@ -618,16 +654,19 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None, star """ # default to single select + where = _ensure_term(where) if isinstance(keys, (list, tuple)) and len(keys) == 1: keys = keys[0] if isinstance(keys, compat.string_types): - return self.select(key=keys, where=where, columns=columns, start=start, stop=stop, iterator=iterator, chunksize=chunksize, **kwargs) + return self.select(key=keys, where=where, columns=columns, + start=start, stop=stop, iterator=iterator, + chunksize=chunksize, **kwargs) if not isinstance(keys, (list, tuple)): - raise Exception("keys must be a list/tuple") + raise TypeError("keys must be a list/tuple") - if len(keys) == 0: - raise Exception("keys must have a non-zero length") + if not len(keys): + raise ValueError("keys must have a non-zero length") if selector is None: selector = keys[0] @@ -642,7 +681,8 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None, star raise TypeError("Invalid table [%s]" % k) if not t.is_table: raise TypeError( - "object [%s] is not a table, and cannot be used in all select as multiple" % t.pathname) + "object [%s] is not a table, and cannot be used in all select as multiple" % + t.pathname) if nrows is None: nrows = t.nrows @@ -655,7 +695,7 @@ def select_as_multiple(self, keys, where=None, selector=None, columns=None, star c = self.select_as_coordinates( selector, where, start=start, stop=stop) nrows = len(c) - except (Exception) as detail: + except Exception: raise ValueError("invalid selector [%s]" % selector) def func(_start, _stop): @@ -720,6 +760,7 @@ def remove(self, key, where=None, start=None, stop=None): raises KeyError if key is not a valid store """ + where = _ensure_term(where) try: s = self.get_storer(key) except: @@ -777,8 +818,8 @@ def append(self, key, value, format=None, append=True, columns=None, dropna=None data in the table, so be careful """ if columns is not None: - raise Exception( - "columns is not a supported keyword in append, try data_columns") + raise TypeError("columns is not a supported keyword in append, " + "try data_columns") if dropna is None: dropna = get_option("io.hdf.dropna_table") @@ -809,8 +850,9 @@ def append_to_multiple(self, d, value, selector, data_columns=None, axes=None, d """ if axes is not None: - raise Exception( - "axes is currently not accepted as a paremter to append_to_multiple; you can create the tables indepdently instead") + raise TypeError("axes is currently not accepted as a parameter to" + " append_to_multiple; you can create the " + "tables indepdently instead") if not isinstance(d, dict): raise ValueError( @@ -876,7 +918,7 @@ def create_table_index(self, key, **kwargs): # version requirements _tables() if not _table_supports_index: - raise Exception("PyTables >= 2.3 is required for table indexing") + raise ValueError("PyTables >= 2.3 is required for table indexing") s = self.get_storer(key) if s is None: @@ -930,7 +972,11 @@ def copy( """ new_store = HDFStore( - file, mode=mode, complib=complib, complevel=complevel, fletcher32 = fletcher32) + file, + mode=mode, + complib=complib, + complevel=complevel, + fletcher32=fletcher32) if keys is None: keys = list(self.keys()) if not isinstance(keys, (tuple, list)): @@ -1142,7 +1188,8 @@ class TableIterator(object): kwargs : the passed kwargs """ - def __init__(self, store, func, nrows, start=None, stop=None, chunksize=None, auto_close=False): + def __init__(self, store, func, nrows, start=None, stop=None, + chunksize=None, auto_close=False): self.store = store self.func = func self.nrows = nrows or 0 @@ -1251,7 +1298,12 @@ def set_table(self, table): def __unicode__(self): temp = tuple( - map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind))) + map(pprint_thing, + (self.name, + self.cname, + self.axis, + self.pos, + self.kind))) return "name->%s,cname->%s,axis->%s,pos->%s,kind->%s" % temp def __eq__(self, other): @@ -1361,9 +1413,7 @@ def validate_col(self, itemsize=None): """ validate this column: return the compared against itemsize """ # validate this column for string truncation (or reset to the max size) - dtype = getattr(self, 'dtype', None) if _ensure_decoded(self.kind) == u('string'): - c = self.col if c is not None: if itemsize is None: @@ -1467,7 +1517,8 @@ class DataCol(IndexCol): _info_fields = ['tz'] @classmethod - def create_for_block(cls, i=None, name=None, cname=None, version=None, **kwargs): + def create_for_block( + cls, i=None, name=None, cname=None, version=None, **kwargs): """ return a new datacol with the block i """ if cname is None: @@ -1487,11 +1538,12 @@ def create_for_block(cls, i=None, name=None, cname=None, version=None, **kwargs) return cls(name=name, cname=cname, **kwargs) - def __init__(self, values=None, kind=None, typ=None, cname=None, data=None, block=None, **kwargs): + def __init__(self, values=None, kind=None, typ=None, + cname=None, data=None, block=None, **kwargs): super(DataCol, self).__init__( values=values, kind=kind, typ=typ, cname=cname, **kwargs) self.dtype = None - self.dtype_attr = u("%s_dtype") % self.name + self.dtype_attr = u("%s_dtype" % self.name) self.set_data(data) def __unicode__(self): @@ -1540,7 +1592,8 @@ def set_kind(self): if self.typ is None: self.typ = getattr(self.description, self.cname, None) - def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, encoding=None, **kwargs): + def set_atom(self, block, existing_col, min_itemsize, + nan_rep, info, encoding=None, **kwargs): """ create and setup my atom from the block b """ self.values = list(block.items) @@ -1596,7 +1649,11 @@ def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, encoding=No # end up here ### elif inferred_type == 'string' or dtype == 'object': self.set_atom_string( - block, existing_col, min_itemsize, nan_rep, encoding) + block, + existing_col, + min_itemsize, + nan_rep, + encoding) else: self.set_atom_data(block) @@ -1605,7 +1662,8 @@ def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, encoding=No def get_atom_string(self, block, itemsize): return _tables().StringCol(itemsize=itemsize, shape=block.shape[0]) - def set_atom_string(self, block, existing_col, min_itemsize, nan_rep, encoding): + def set_atom_string( + self, block, existing_col, min_itemsize, nan_rep, encoding): # fill nan items with myself block = block.fillna(nan_rep)[0] data = block.values @@ -1701,13 +1759,13 @@ def validate_attr(self, append): if (existing_fields is not None and existing_fields != list(self.values)): raise ValueError("appended items do not match existing items" - " in table!") + " in table!") existing_dtype = getattr(self.attrs, self.dtype_attr, None) if (existing_dtype is not None and existing_dtype != self.dtype): raise ValueError("appended items dtype do not match existing items dtype" - " in table!") + " in table!") def convert(self, values, nan_rep, encoding): """ set the data from this selection (and convert to the correct dtype if we can) """ @@ -1855,6 +1913,9 @@ def __unicode__(self): return "%-12.12s (shape->%s)" % (self.pandas_type, s) return self.pandas_type + def __str__(self): + return self.__repr__() + def set_object_info(self): """ set my pandas type & version """ self.attrs.pandas_type = str(self.pandas_kind) @@ -2058,7 +2119,7 @@ def read_index(self, key): _, index = self.read_index_node(getattr(self.group, key)) return index else: # pragma: no cover - raise Exception('unrecognized index variety: %s' % variety) + raise TypeError('unrecognized index variety: %s' % variety) def write_index(self, key, index): if isinstance(index, MultiIndex): @@ -2241,7 +2302,7 @@ def write_array(self, key, value, items=None): warnings.warn(ws, PerformanceWarning) vlarr = self._handle.createVLArray(self.group, key, - _tables().ObjectAtom()) + _tables().ObjectAtom()) vlarr.append(value) elif value.dtype.type == np.datetime64: self._handle.createArray(self.group, key, value.view('i8')) @@ -2381,7 +2442,6 @@ def read(self, **kwargs): sdict = {} for name in items: key = 'sparse_frame_%s' % name - node = getattr(self.group, key) s = SparseFrameFixed(self.parent, getattr(self.group, key)) s.infer_axes() sdict[name] = s.read() @@ -2574,7 +2634,8 @@ def validate(self, other): oax = ov[i] if sax != oax: raise ValueError( - "invalid combinate of [%s] on appending data [%s] vs current table [%s]" % (c, sax, oax)) + "invalid combinate of [%s] on appending data [%s] vs current table [%s]" % + (c, sax, oax)) # should never get here raise Exception( @@ -2706,14 +2767,14 @@ def validate_min_itemsize(self, min_itemsize): continue if k not in q: raise ValueError( - "min_itemsize has the key [%s] which is not an axis or data_column" % k) + "min_itemsize has the key [%s] which is not an axis or data_column" % + k) @property def indexables(self): """ create/cache the indexables if they don't exist """ if self._indexables is None: - d = self.description self._indexables = [] # index columns @@ -2848,7 +2909,8 @@ def validate_data_columns(self, data_columns, min_itemsize): # return valid columns in the order of our axis return [c for c in data_columns if c in axis_labels] - def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, min_itemsize=None, **kwargs): + def create_axes(self, axes, obj, validate=True, nan_rep=None, + data_columns=None, min_itemsize=None, **kwargs): """ create and return the axes leagcy tables create an indexable column, indexable index, non-indexable fields @@ -2869,8 +2931,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, try: axes = _AXES_MAP[type(obj)] except: - raise TypeError( - "cannot properly create the storer for: [group->%s,value->%s]" % + raise TypeError("cannot properly create the storer for: [group->%s,value->%s]" % (self.group._v_name, type(obj))) # map axes to numbers @@ -2995,8 +3056,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, try: existing_col = existing_table.values_axes[i] except: - raise ValueError( - "Incompatible appended table [%s] with existing table [%s]" % + raise ValueError("Incompatible appended table [%s] with existing table [%s]" % (blocks, existing_table.values_axes)) else: existing_col = None @@ -3036,8 +3096,8 @@ def process_axes(self, obj, columns=None): obj = _reindex_axis(obj, axis, labels, columns) # apply the selection filters (but keep in the same order) - if self.selection.filter: - for field, op, filt in self.selection.filter: + if self.selection.filter is not None: + for field, op, filt in self.selection.filter.format(): def process_filter(field, filt): @@ -3070,7 +3130,8 @@ def process_filter(field, filt): return obj - def create_description(self, complib=None, complevel=None, fletcher32=False, expectedrows=None): + def create_description( + self, complib=None, complevel=None, fletcher32=False, expectedrows=None): """ create the description of the table from the axes & values """ # expected rows estimate @@ -3119,8 +3180,8 @@ def read_column(self, column, where=None, **kwargs): return False if where is not None: - raise Exception( - "read_column does not currently accept a where clause") + raise TypeError("read_column does not currently accept a where " + "clause") # find the axes for a in self.axes: @@ -3128,7 +3189,8 @@ def read_column(self, column, where=None, **kwargs): if not a.is_data_indexable: raise ValueError( - "column [%s] can not be extracted individually; it is not data indexable" % column) + "column [%s] can not be extracted individually; it is not data indexable" % + column) # column must be an indexable or a data column c = getattr(self.table.cols, column) @@ -3174,7 +3236,7 @@ class LegacyTable(Table): ndim = 3 def write(self, **kwargs): - raise Exception("write operations are not allowed on legacy tables!") + raise TypeError("write operations are not allowed on legacy tables!") def read(self, where=None, columns=None, **kwargs): """ we have n indexable columns, with an arbitrary number of data axes """ @@ -3418,15 +3480,14 @@ def write_data_chunk(self, indexes, mask, values): rows = rows[~mask.ravel().astype(bool)] except Exception as detail: - raise Exception("cannot create row-data -> %s" % str(detail)) + raise Exception("cannot create row-data -> %s" % detail) try: if len(rows): self.table.append(rows) self.table.flush() except Exception as detail: - raise Exception( - "tables cannot write this data -> %s" % str(detail)) + raise TypeError("tables cannot write this data -> %s" % detail) def delete(self, where=None, **kwargs): @@ -3626,9 +3687,9 @@ def get_attrs(self): self.levels = [] t = self.table self.index_axes = [a.infer(t) - for a in self.indexables if a.is_an_indexable] + for a in self.indexables if a.is_an_indexable] self.values_axes = [a.infer(t) - for a in self.indexables if not a.is_an_indexable] + for a in self.indexables if not a.is_an_indexable] self.data_columns = [a.name for a in self.values_axes] @property @@ -3755,7 +3816,7 @@ def _convert_index(index, encoding=None): index_name=index_name) if isinstance(index, MultiIndex): - raise Exception('MultiIndex not supported here!') + raise TypeError('MultiIndex not supported here!') inferred_type = lib.infer_dtype(index) @@ -3904,32 +3965,13 @@ def _need_convert(kind): return False -class Term(StringMixin): - - """create a term object that holds a field, op, and value - - Parameters - ---------- - field : dict, string term expression, or the field to operate (must be a valid index/column type of DataFrame/Panel) - op : a valid op (defaults to '=') (optional) - >, >=, <, <=, =, != (not equal) are allowed - value : a value or list of values (required) - queryables : a kinds map (dict of column name -> kind), or None i column is non-indexable - encoding : an encoding that will encode the query terms +class Coordinates(object): - Returns - ------- - a Term object + """ holds a returned coordinates list, useful to select the same rows from different tables - Examples - -------- - >>> Term(dict(field = 'index', op = '>', value = '20121114')) - >>> Term('index', '20121114') - >>> Term('index', '>', '20121114') - >>> Term('index', ['20121114','20121114']) - >>> Term('index', datetime(2012,11,14)) - >>> Term('major_axis>20121114') - >>> Term('minor_axis', ['A','U']) + coordinates : holds the array of coordinates + group : the source group + where : the source where """ _ops = ['<=', '<', '>=', '>', '!=', '==', '='] @@ -4134,23 +4176,13 @@ def stringify(value): return TermValue(v, stringify(v), u('string')) -class TermValue(object): - - """ hold a term value the we use to construct a condition/filter """ - def __init__(self, value, converted, kind): - self.value = value - self.converted = converted - self.kind = kind + def __len__(self): + return len(self.values) - def tostring(self, encoding): - """ quote the string if not encoded - else encode and return """ - if self.kind == u('string'): - if encoding is not None: - return self.converted - return '"%s"' % self.converted - return self.converted + def __getitem__(self, key): + """ return a new coordinates object, sliced by the key """ + return Coordinates(self.values[key], self.group, self.where) class Selection(object): @@ -4204,41 +4236,32 @@ def __init__(self, table, where=None, start=None, stop=None, **kwargs): self.terms = self.generate(where) # create the numexpr & the filter - if self.terms: - terms = [t for t in self.terms if t.condition is not None] - if len(terms): - self.condition = "(%s)" % ' & '.join( - [t.condition for t in terms]) - self.filter = [] - for t in self.terms: - if t.filter is not None: - self.filter.append(t.filter) + if self.terms is not None: + self.condition, self.filter = self.terms.evaluate() def generate(self, where): """ where can be a : dict,list,tuple,string """ if where is None: return None - if not isinstance(where, (list, tuple)): - where = [where] - else: - - # make this a list of we think that we only have a sigle term & no - # operands inside any terms - if not any([isinstance(w, (list, tuple, Term)) for w in where]): - - if not any([isinstance(w, compat.string_types) and Term._search.match(w) for w in where]): - where = [where] + q = self.table.queryables() + try: + return Expr(where, queryables=q, encoding=self.table.encoding) + except (NameError) as detail: - queryables = self.table.queryables() - return [Term(c, queryables=queryables, encoding=self.table.encoding) for c in where] + # raise a nice message, suggesting that the user should use data_columns + raise ValueError("The passed where expression: {0}\n" + " contains an invalid variable reference\n" + " all of the variable refrences must be a reference to\n" + " an axis (e.g. 'index' or 'columns'), or a data_column\n" + " The currently defined references are: {1}\n".format(where,','.join(q.keys()))) def select(self): """ generate the selection """ if self.condition is not None: - return self.table.table.readWhere(self.condition, start=self.start, stop=self.stop) + return self.table.table.readWhere(self.condition.format(), start=self.start, stop=self.stop) elif self.coordinates is not None: return self.table.table.readCoordinates(self.coordinates) return self.table.table.read(start=self.start, stop=self.stop) @@ -4250,7 +4273,7 @@ def select_coords(self): if self.condition is None: return np.arange(self.table.nrows) - return self.table.table.getWhereList(self.condition, start=self.start, stop=self.stop, sort=True) + return self.table.table.getWhereList(self.condition.format(), start=self.start, stop=self.stop, sort=True) # utilities ### diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py index 34b2811876f30..1cffccea2289f 100644 --- a/pandas/io/tests/test_data.py +++ b/pandas/io/tests/test_data.py @@ -277,7 +277,7 @@ def setUpClass(cls): except ImportError: raise nose.SkipTest - with assert_produces_warning(): + with assert_produces_warning(FutureWarning): cls.aapl = web.Options('aapl') today = datetime.today() diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index 861b4dd7567a0..322b626acc0ad 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -1,10 +1,9 @@ -from __future__ import print_function -from pandas.compat import range, lrange, u import nose import unittest -import os import sys +import os import warnings +from contextlib import contextmanager import datetime import numpy as np @@ -23,9 +22,8 @@ assert_series_equal) from pandas import concat, Timestamp from pandas import compat, _np_version_under1p7 -from pandas.core import common as com - -from numpy.testing.decorators import slow +from pandas.compat import range, lrange, u +from pandas.util.testing import assert_produces_warning try: import tables @@ -42,12 +40,12 @@ # contextmanager to ensure the file cleanup def safe_remove(path): if path is not None: - import os try: os.remove(path) except: pass + def safe_close(store): try: if store is not None: @@ -55,7 +53,6 @@ def safe_close(store): except: pass -from contextlib import contextmanager @contextmanager def ensure_clean(path, mode='a', complevel=None, complib=None, @@ -82,6 +79,7 @@ def _maybe_remove(store, key): except: pass + def compat_assert_produces_warning(w,f): """ don't produce a warning under PY3 """ if compat.PY3: @@ -90,6 +88,7 @@ def compat_assert_produces_warning(w,f): with tm.assert_produces_warning(expected_warning=w): f() + class TestHDFStore(unittest.TestCase): def setUp(self): @@ -329,8 +328,8 @@ def test_contains(self): self.assert_('bar' not in store) # GH 2694 - with tm.assert_produces_warning(expected_warning=tables.NaturalNameWarning): - store['node())'] = tm.makeDataFrame() + warnings.filterwarnings('ignore', category=tables.NaturalNameWarning) + store['node())'] = tm.makeDataFrame() self.assert_('node())' in store) def test_versioning(self): @@ -751,7 +750,7 @@ def test_encoding(self): raise nose.SkipTest('system byteorder is not little, skipping test_encoding!') with ensure_clean(self.path) as store: - df = DataFrame(dict(A='foo',B='bar'),index=lrange(5)) + df = DataFrame(dict(A='foo',B='bar'),index=range(5)) df.loc[2,'A'] = np.nan df.loc[3,'B'] = np.nan _maybe_remove(store, 'df') @@ -887,16 +886,16 @@ def test_append_frame_column_oriented(self): expected = df.reindex(columns=['A']) tm.assert_frame_equal(expected, result) - # this isn't supported - self.assertRaises(TypeError, store.select, 'df1', ( - 'columns=A', Term('index', '>', df.index[4]))) - # selection on the non-indexable result = store.select( - 'df1', ('columns=A', Term('index', '=', df.index[0:4]))) + 'df1', ('columns=A', Term('index=df.index[0:4]'))) expected = df.reindex(columns=['A'], index=df.index[0:4]) tm.assert_frame_equal(expected, result) + # this isn't supported + self.assertRaises(TypeError, store.select, 'df1', ( + 'columns=A', Term('index>df.index[4]'))) + def test_append_with_different_block_ordering(self): #GH 4096; using same frames, but different block orderings @@ -905,7 +904,7 @@ def test_append_with_different_block_ordering(self): for i in range(10): df = DataFrame(np.random.randn(10,2),columns=list('AB')) - df['index'] = lrange(10) + df['index'] = range(10) df['index'] += i*10 df['int64'] = Series([1]*len(df),dtype='int64') df['int16'] = Series([1]*len(df),dtype='int16') @@ -1081,7 +1080,7 @@ def check_col(key,name,size): def check_col(key,name,size): self.assert_(getattr(store.get_storer(key).table.description,name).itemsize == size) - df = DataFrame(dict(A = 'foo', B = 'bar'),index=lrange(10)) + df = DataFrame(dict(A = 'foo', B = 'bar'),index=range(10)) # a min_itemsize that creates a data_column _maybe_remove(store, 'df') @@ -1134,7 +1133,7 @@ def test_append_with_data_columns(self): # data column searching (with an indexable and a data_columns) result = store.select( - 'df', [Term('B>0'), Term('index', '>', df.index[3])]) + 'df', [Term('B>0'), Term('index>df.index[3]')]) df_new = df.reindex(index=df.index[4:]) expected = df_new[df_new.B > 0] tm.assert_frame_equal(result, expected) @@ -1146,7 +1145,7 @@ def test_append_with_data_columns(self): df_new['string'][5:6] = 'bar' _maybe_remove(store, 'df') store.append('df', df_new, data_columns=['string']) - result = store.select('df', [Term('string', '=', 'foo')]) + result = store.select('df', [Term('string=foo')]) expected = df_new[df_new.string == 'foo'] tm.assert_frame_equal(result, expected) @@ -1192,14 +1191,14 @@ def check_col(key,name,size): _maybe_remove(store, 'df') store.append( 'df', df_new, data_columns=['A', 'B', 'string', 'string2']) - result = store.select('df', [Term('string', '=', 'foo'), Term( + result = store.select('df', [Term('string=foo'), Term( 'string2=foo'), Term('A>0'), Term('B<0')]) expected = df_new[(df_new.string == 'foo') & ( df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)] tm.assert_frame_equal(result, expected) # yield an empty frame - result = store.select('df', [Term('string', '=', 'foo'), Term( + result = store.select('df', [Term('string=foo'), Term( 'string2=cool')]) expected = df_new[(df_new.string == 'foo') & ( df_new.string2 == 'cool')] @@ -1316,9 +1315,8 @@ def test_big_table_frame(self): raise nose.SkipTest('no big table frame') # create and write a big table - df = DataFrame(np.random.randn(2000 * 100, 100), - index=lrange(2000 * 100), - columns=['E%03d' % i for i in range(100)]) + df = DataFrame(np.random.randn(2000 * 100, 100), index=range( + 2000 * 100), columns=['E%03d' % i for i in range(100)]) for x in range(20): df['String%03d' % x] = 'string%03d' % x @@ -1328,8 +1326,9 @@ def test_big_table_frame(self): store.append('df', df) rows = store.root.df.table.nrows recons = store.select('df') + assert isinstance(recons, DataFrame) - print("\nbig_table frame [%s] -> %5.2f" % (rows, time.time() - x)) + print ("\nbig_table frame [%s] -> %5.2f" % (rows, time.time() - x)) def test_big_table2_frame(self): # this is a really big table: 1m rows x 60 float columns, 20 string, 20 datetime @@ -1340,15 +1339,14 @@ def test_big_table2_frame(self): print ("\nbig_table2 start") import time start_time = time.time() - df = DataFrame(np.random.randn(1000 * 1000, 60), - index=lrange(int(1000 * 1000)), - columns=['E%03d' % i for i in range(60)]) + df = DataFrame(np.random.randn(1000 * 1000, 60), index=range(int( + 1000 * 1000)), columns=['E%03d' % i for i in range(60)]) for x in range(20): df['String%03d' % x] = 'string%03d' % x for x in range(20): df['datetime%03d' % x] = datetime.datetime(2001, 1, 2, 0, 0) - print("\nbig_table2 frame (creation of df) [rows->%s] -> %5.2f" + print ("\nbig_table2 frame (creation of df) [rows->%s] -> %5.2f" % (len(df.index), time.time() - start_time)) def f(chunksize): @@ -1359,9 +1357,9 @@ def f(chunksize): for c in [10000, 50000, 250000]: start_time = time.time() - print("big_table2 frame [chunk->%s]" % c) + print ("big_table2 frame [chunk->%s]" % c) rows = f(c) - print("big_table2 frame [rows->%s,chunk->%s] -> %5.2f" + print ("big_table2 frame [rows->%s,chunk->%s] -> %5.2f" % (rows, c, time.time() - start_time)) def test_big_put_frame(self): @@ -1370,23 +1368,23 @@ def test_big_put_frame(self): print ("\nbig_put start") import time start_time = time.time() - df = DataFrame(np.random.randn(1000 * 1000, 60), index=lrange(int( + df = DataFrame(np.random.randn(1000 * 1000, 60), index=range(int( 1000 * 1000)), columns=['E%03d' % i for i in range(60)]) for x in range(20): df['String%03d' % x] = 'string%03d' % x for x in range(20): df['datetime%03d' % x] = datetime.datetime(2001, 1, 2, 0, 0) - print("\nbig_put frame (creation of df) [rows->%s] -> %5.2f" + print ("\nbig_put frame (creation of df) [rows->%s] -> %5.2f" % (len(df.index), time.time() - start_time)) with ensure_clean(self.path, mode='w') as store: start_time = time.time() - store = HDFStore(fn, mode='w') + store = HDFStore(self.path, mode='w') store.put('df', df) - print(df.get_dtype_counts()) - print("big_put frame [shape->%s] -> %5.2f" + print (df.get_dtype_counts()) + print ("big_put frame [shape->%s] -> %5.2f" % (df.shape, time.time() - start_time)) def test_big_table_panel(self): @@ -1410,8 +1408,9 @@ def test_big_table_panel(self): store.append('wp', wp) rows = store.root.wp.table.nrows recons = store.select('wp') + assert isinstance(recons, Panel) - print("\nbig_table panel [%s] -> %5.2f" % (rows, time.time() - x)) + print ("\nbig_table panel [%s] -> %5.2f" % (rows, time.time() - x)) def test_append_diff_item_order(self): @@ -1654,7 +1653,6 @@ def test_table_values_dtypes_roundtrip(self): expected.sort() tm.assert_series_equal(result,expected) - def test_table_mixed_dtypes(self): # frame @@ -1713,7 +1711,7 @@ def test_unimplemented_dtypes_table_columns(self): # py3 ok for unicode if not compat.PY3: - l.append(('unicode', u('\u03c3'))) + l.append(('unicode', u('\\u03c3'))) ### currently not supported dtypes #### for n, f in l: @@ -1759,17 +1757,17 @@ def compare(a,b): assert_frame_equal(result,df) # select with tz aware - compare(store.select('df_tz',where=Term('A','>=',df.A[3])),df[df.A>=df.A[3]]) + compare(store.select('df_tz',where=Term('A>=df.A[3]')),df[df.A>=df.A[3]]) _maybe_remove(store, 'df_tz') - df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130103',tz='US/Eastern')),index=lrange(5)) + df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130103',tz='US/Eastern')),index=range(5)) store.append('df_tz',df) result = store['df_tz'] compare(result,df) assert_frame_equal(result,df) _maybe_remove(store, 'df_tz') - df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='EET')),index=lrange(5)) + df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='EET')),index=range(5)) self.assertRaises(TypeError, store.append, 'df_tz', df) # this is ok @@ -1780,7 +1778,7 @@ def compare(a,b): assert_frame_equal(result,df) # can't append with diff timezone - df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='CET')),index=lrange(5)) + df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='CET')),index=range(5)) self.assertRaises(ValueError, store.append, 'df_tz', df) # as index @@ -1866,16 +1864,16 @@ def test_append_with_timedelta(self): result = store.select('df',Term("C","<",-3*86400)) assert_frame_equal(result,df.iloc[3:]) - result = store.select('df',Term("C","<",'-3D')) + result = store.select('df',"C<'-3D'") assert_frame_equal(result,df.iloc[3:]) # a bit hacky here as we don't really deal with the NaT properly - result = store.select('df',Term("C","<",'-500000s')) + result = store.select('df',"C<'-500000s'") result = result.dropna(subset=['C']) assert_frame_equal(result,df.iloc[6:]) - result = store.select('df',Term("C","<",'-3.5D')) + result = store.select('df',"C<'-3.5D'") result = result.iloc[1:] assert_frame_equal(result,df.iloc[4:]) @@ -1927,14 +1925,14 @@ def test_remove_where(self): with ensure_clean(self.path) as store: # non-existance - crit1 = Term('index', '>', 'foo') + crit1 = Term('index>foo') self.assertRaises(KeyError, store.remove, 'a', [crit1]) # try to remove non-table (with crit) # non-table ok (where = None) wp = tm.makePanel() - store.put('wp', wp, format='t') - store.remove('wp', [('minor_axis', ['A', 'D'])]) + store.put('wp', wp, format='table') + store.remove('wp', ["minor_axis=['A', 'D']"]) rs = store.select('wp') expected = wp.reindex(minor_axis=['B', 'C']) assert_panel_equal(rs, expected) @@ -1966,8 +1964,8 @@ def test_remove_crit(self): # group row removal date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10]) - crit4 = Term('major_axis', date4) - store.put('wp3', wp, format='table') + crit4 = Term('major_axis=date4') + store.put('wp3', wp, format='t') n = store.remove('wp3', where=[crit4]) assert(n == 36) result = store.select('wp3') @@ -1978,8 +1976,8 @@ def test_remove_crit(self): store.put('wp', wp, format='table') date = wp.major_axis[len(wp.major_axis) // 2] - crit1 = Term('major_axis', '>', date) - crit2 = Term('minor_axis', ['A', 'D']) + crit1 = Term('major_axis>date') + crit2 = Term("minor_axis=['A', 'D']") n = store.remove('wp', where=[crit1]) assert(n == 56) @@ -1995,14 +1993,14 @@ def test_remove_crit(self): store.put('wp2', wp, format='table') date1 = wp.major_axis[1:3] - crit1 = Term('major_axis', date1) + crit1 = Term('major_axis=date1') store.remove('wp2', where=[crit1]) result = store.select('wp2') expected = wp.reindex(major_axis=wp.major_axis - date1) assert_panel_equal(result, expected) date2 = wp.major_axis[5] - crit2 = Term('major_axis', date2) + crit2 = Term('major_axis=date2') store.remove('wp2', where=[crit2]) result = store['wp2'] expected = wp.reindex( @@ -2010,7 +2008,7 @@ def test_remove_crit(self): assert_panel_equal(result, expected) date3 = [wp.major_axis[7], wp.major_axis[9]] - crit3 = Term('major_axis', date3) + crit3 = Term('major_axis=date3') store.remove('wp2', where=[crit3]) result = store['wp2'] expected = wp.reindex( @@ -2020,62 +2018,102 @@ def test_remove_crit(self): # corners store.put('wp4', wp, format='table') n = store.remove( - 'wp4', where=[Term('major_axis', '>', wp.major_axis[-1])]) + 'wp4', where=[Term('major_axis>wp.major_axis[-1]')]) result = store.select('wp4') assert_panel_equal(result, wp) - def test_terms(self): + def test_invalid_terms(self): with ensure_clean(self.path) as store: + df = tm.makeTimeDataFrame() + df['string'] = 'foo' + df.ix[0:4,'string'] = 'bar' wp = tm.makePanel() p4d = tm.makePanel4D() + store.put('df', df, format='table') store.put('wp', wp, format='table') store.put('p4d', p4d, format='table') # some invalid terms - terms = [ - ['minor', ['A', 'B']], - ['index', ['20121114']], - ['index', ['20121114', '20121114']], - ] - for t in terms: - self.assertRaises(Exception, store.select, 'wp', t) + self.assertRaises(ValueError, store.select, 'wp', "minor=['A', 'B']") + self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114']"]) + self.assertRaises(ValueError, store.select, 'wp', ["index=['20121114', '20121114']"]) + self.assertRaises(TypeError, Term) - self.assertRaises(Exception, Term.__init__) - self.assertRaises(Exception, Term.__init__, 'blah') - self.assertRaises(Exception, Term.__init__, 'index') - self.assertRaises(Exception, Term.__init__, 'index', '==') - self.assertRaises(Exception, Term.__init__, 'index', '>', 5) + # more invalid + self.assertRaises(ValueError, store.select, 'df','df.index[3]') + self.assertRaises(SyntaxError, store.select, 'df','index>') + self.assertRaises(ValueError, store.select, 'wp', "major_axis<'20000108' & minor_axis['A', 'B']") + + # from the docs + with tm.ensure_clean(self.path) as path: + dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10)) + dfq.to_hdf(path,'dfq',format='table',data_columns=True) + + # check ok + read_hdf(path,'dfq',where="index>Timestamp('20130104') & columns=['A', 'B']") + read_hdf(path,'dfq',where="A>0 or C>0") + + # catch the invalid reference + with tm.ensure_clean(self.path) as path: + dfq = DataFrame(np.random.randn(10,4),columns=list('ABCD'),index=date_range('20130101',periods=10)) + dfq.to_hdf(path,'dfq',format='table') + + self.assertRaises(ValueError, read_hdf, path,'dfq',where="A>0 or C>0") + + def test_terms(self): + + with ensure_clean(self.path) as store: + + wp = tm.makePanel() + p4d = tm.makePanel4D() + store.put('wp', wp, table=True) + store.put('p4d', p4d, table=True) # panel result = store.select('wp', [Term( - 'major_axis<20000108'), Term('minor_axis', '=', ['A', 'B'])]) + 'major_axis<"20000108"'), Term("minor_axis=['A', 'B']")]) expected = wp.truncate(after='20000108').reindex(minor=['A', 'B']) assert_panel_equal(result, expected) + # with deprecation + result = store.select('wp', [Term( + 'major_axis','<',"20000108"), Term("minor_axis=['A', 'B']")]) + expected = wp.truncate(after='20000108').reindex(minor=['A', 'B']) + tm.assert_panel_equal(result, expected) + # p4d - result = store.select('p4d', [Term('major_axis<20000108'), - Term('minor_axis', '=', ['A', 'B']), - Term('items', '=', ['ItemA', 'ItemB'])]) + result = store.select('p4d', [Term('major_axis<"20000108"'), + Term("minor_axis=['A', 'B']"), + Term("items=['ItemA', 'ItemB']")]) expected = p4d.truncate(after='20000108').reindex( minor=['A', 'B'], items=['ItemA', 'ItemB']) assert_panel4d_equal(result, expected) - # valid terms + # back compat invalid terms terms = [ dict(field='major_axis', op='>', value='20121114'), - ('major_axis', '20121114'), - ('major_axis', '>', '20121114'), - (('major_axis', ['20121114', '20121114']),), - ('major_axis', datetime.datetime(2012, 11, 14)), + [ dict(field='major_axis', op='>', value='20121114') ], + [ "minor_axis=['A','B']", dict(field='major_axis', op='>', value='20121114') ] + ] + for t in terms: + with tm.assert_produces_warning(expected_warning=DeprecationWarning): + Term(t) + + # valid terms + terms = [ + ('major_axis=20121114'), + ('major_axis>20121114'), + (("major_axis=['20121114', '20121114']"),), + ('major_axis=datetime.datetime(2012, 11, 14)'), 'major_axis> 20121114', 'major_axis >20121114', 'major_axis > 20121114', - (('minor_axis', ['A', 'B']),), - (('minor_axis', ['A', 'B']),), - ((('minor_axis', ['A', 'B']),),), - (('items', ['ItemA', 'ItemB']),), + (("minor_axis=['A', 'B']"),), + (("minor_axis=['A', 'B']"),), + ((("minor_axis==['A', 'B']"),),), + (("items=['ItemA', 'ItemB']"),), ('items=ItemA'), ] @@ -2085,13 +2123,53 @@ def test_terms(self): # valid for p4d only terms = [ - (('labels', '=', ['l1', 'l2']),), - Term('labels', '=', ['l1', 'l2']), + (("labels=['l1', 'l2']"),), + Term("labels=['l1', 'l2']"), ] for t in terms: store.select('p4d', t) + def test_term_compat(self): + with ensure_clean(self.path) as store: + + wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'], + major_axis=date_range('1/1/2000', periods=5), + minor_axis=['A', 'B', 'C', 'D']) + store.append('wp',wp) + + result = store.select('wp', [Term('major_axis>20000102'), + Term('minor_axis', '=', ['A','B']) ]) + expected = wp.loc[:,wp.major_axis>Timestamp('20000102'),['A','B']] + assert_panel_equal(result, expected) + + store.remove('wp', Term('major_axis>20000103')) + result = store.select('wp') + expected = wp.loc[:,wp.major_axis<=Timestamp('20000103'),:] + assert_panel_equal(result, expected) + + def test_same_name_scoping(self): + + with ensure_clean(self.path) as store: + + import pandas as pd + df = DataFrame(np.random.randn(20, 2),index=pd.date_range('20130101',periods=20)) + store.put('df', df, table=True) + expected = df[df.index>pd.Timestamp('20130105')] + + import datetime + result = store.select('df','index>datetime.datetime(2013,1,5)') + assert_frame_equal(result,expected) + + from datetime import datetime + + # technically an error, but allow it + result = store.select('df','index>datetime.datetime(2013,1,5)') + assert_frame_equal(result,expected) + + result = store.select('df','index>datetime(2013,1,5)') + assert_frame_equal(result,expected) + def test_series(self): s = tm.makeStringSeries() @@ -2211,7 +2289,7 @@ def test_index_types(self): self._check_roundtrip(ser, func) ser = Series(values, [datetime.datetime( - 2012, 1, 1), datetime.datetime(2012, 1, 2)]) + 2012, 1, 1), datetime.datetime(2012, 1, 2)]) self._check_roundtrip(ser, func) def test_timeseries_preepoch(self): @@ -2525,7 +2603,7 @@ def test_select(self): _maybe_remove(store, 'wp') store.append('wp', wp) items = ['Item%03d' % i for i in range(80)] - result = store.select('wp', Term('items', items)) + result = store.select('wp', Term('items=items')) expected = wp.reindex(items=items) assert_panel_equal(expected, result) @@ -2542,7 +2620,7 @@ def test_select(self): tm.assert_frame_equal(expected, result) # equivalentsly - result = store.select('df', [('columns', ['A', 'B'])]) + result = store.select('df', [("columns=['A', 'B']")]) expected = df.reindex(columns=['A', 'B']) tm.assert_frame_equal(expected, result) @@ -2575,7 +2653,8 @@ def test_select_dtypes(self): df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), A=np.random.randn(300))) _maybe_remove(store, 'df') store.append('df', df, data_columns=['ts', 'A']) - result = store.select('df', [Term('ts', '>=', Timestamp('2012-02-01'))]) + + result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")]) expected = df[df.ts >= Timestamp('2012-02-01')] tm.assert_frame_equal(expected, result) @@ -2602,7 +2681,7 @@ def test_select_dtypes(self): _maybe_remove(store, 'df_int') store.append('df_int', df) result = store.select( - 'df_int', [Term("index<10"), Term("columns", "=", ["A"])]) + 'df_int', [Term("index<10"), Term("columns=['A']")]) expected = df.reindex(index=list(df.index)[0:10],columns=['A']) tm.assert_frame_equal(expected, result) @@ -2612,7 +2691,7 @@ def test_select_dtypes(self): _maybe_remove(store, 'df_float') store.append('df_float', df) result = store.select( - 'df_float', [Term("index<10.0"), Term("columns", "=", ["A"])]) + 'df_float', [Term("index<10.0"), Term("columns=['A']")]) expected = df.reindex(index=list(df.index)[0:10],columns=['A']) tm.assert_frame_equal(expected, result) @@ -2622,36 +2701,36 @@ def test_select_with_many_inputs(self): df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), A=np.random.randn(300), - B=lrange(300), + B=range(300), users = ['a']*50 + ['b']*50 + ['c']*100 + ['a%03d' % i for i in range(100)])) _maybe_remove(store, 'df') store.append('df', df, data_columns=['ts', 'A', 'B', 'users']) # regular select - result = store.select('df', [Term('ts', '>=', Timestamp('2012-02-01'))]) + result = store.select('df', [Term("ts>=Timestamp('2012-02-01')")]) expected = df[df.ts >= Timestamp('2012-02-01')] tm.assert_frame_equal(expected, result) # small selector - result = store.select('df', [Term('ts', '>=', Timestamp('2012-02-01')),Term('users',['a','b','c'])]) + result = store.select('df', [Term("ts>=Timestamp('2012-02-01') & users=['a','b','c']")]) expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(['a','b','c']) ] tm.assert_frame_equal(expected, result) # big selector along the columns selector = [ 'a','b','c' ] + [ 'a%03d' % i for i in range(60) ] - result = store.select('df', [Term('ts', '>=', Timestamp('2012-02-01')),Term('users',selector)]) + result = store.select('df', [Term("ts>=Timestamp('2012-02-01')"),Term('users=selector')]) expected = df[ (df.ts >= Timestamp('2012-02-01')) & df.users.isin(selector) ] tm.assert_frame_equal(expected, result) - selector = lrange(100,200) - result = store.select('df', [Term('B', selector)]) + selector = range(100,200) + result = store.select('df', [Term('B=selector')]) expected = df[ df.B.isin(selector) ] tm.assert_frame_equal(expected, result) self.assert_(len(result) == 100) # big selector along the index selector = Index(df.ts[0:100].values) - result = store.select('df', [Term('ts', selector)]) + result = store.select('df', [Term('ts=selector')]) expected = df[ df.ts.isin(selector.values) ] tm.assert_frame_equal(expected, result) self.assert_(len(result) == 100) @@ -2807,15 +2886,15 @@ def test_panel_select(self): store.put('wp', wp, format='table') date = wp.major_axis[len(wp.major_axis) // 2] - crit1 = ('major_axis', '>=', date) - crit2 = ('minor_axis', '=', ['A', 'D']) + crit1 = ('major_axis>=date') + crit2 = ("minor_axis=['A', 'D']") result = store.select('wp', [crit1, crit2]) expected = wp.truncate(before=date).reindex(minor=['A', 'D']) assert_panel_equal(result, expected) result = store.select( - 'wp', ['major_axis>=20000124', ('minor_axis', '=', ['A', 'B'])]) + 'wp', ['major_axis>="20000124"', ("minor_axis=['A', 'B']")]) expected = wp.truncate(before='20000124').reindex(minor=['A', 'B']) assert_panel_equal(result, expected) @@ -2827,9 +2906,9 @@ def test_frame_select(self): store.put('frame', df,format='table') date = df.index[len(df) // 2] - crit1 = ('index', '>=', date) - crit2 = ('columns', ['A', 'D']) - crit3 = ('columns', 'A') + crit1 = Term('index>=date') + crit2 = ("columns=['A', 'D']") + crit3 = ('columns=A') result = store.select('frame', [crit1, crit2]) expected = df.ix[date:, ['A', 'D']] @@ -2850,6 +2929,67 @@ def test_frame_select(self): # self.assertRaises(ValueError, store.select, # 'frame', [crit1, crit2]) + def test_frame_select_complex(self): + """ select via complex criteria """ + + df = tm.makeTimeDataFrame() + df['string'] = 'foo' + df.loc[df.index[0:4],'string'] = 'bar' + + with ensure_clean(self.path) as store: + store.put('df', df, table=True, data_columns=['string']) + + # empty + result = store.select('df', 'index>df.index[3] & string="bar"') + expected = df.loc[(df.index>df.index[3]) & (df.string=='bar')] + tm.assert_frame_equal(result, expected) + + result = store.select('df', 'index>df.index[3] & string="foo"') + expected = df.loc[(df.index>df.index[3]) & (df.string=='foo')] + tm.assert_frame_equal(result, expected) + + # or + result = store.select('df', 'index>df.index[3] | string="bar"') + expected = df.loc[(df.index>df.index[3]) | (df.string=='bar')] + tm.assert_frame_equal(result, expected) + + result = store.select('df', '(index>df.index[3] & index<=df.index[6]) | string="bar"') + expected = df.loc[((df.index>df.index[3]) & (df.index<=df.index[6])) | (df.string=='bar')] + tm.assert_frame_equal(result, expected) + + # invert + result = store.select('df', 'string!="bar"') + expected = df.loc[df.string!='bar'] + tm.assert_frame_equal(result, expected) + + # invert not implemented in numexpr :( + self.assertRaises(NotImplementedError, store.select, 'df', '~(string="bar")') + + # invert ok for filters + result = store.select('df', "~(columns=['A','B'])") + expected = df.loc[:,df.columns-['A','B']] + tm.assert_frame_equal(result, expected) + + # in + result = store.select('df', "index>df.index[3] & columns in ['A','B']") + expected = df.loc[df.index>df.index[3]].reindex(columns=['A','B']) + tm.assert_frame_equal(result, expected) + + def test_invalid_filtering(self): + + # can't use more than one filter (atm) + + df = tm.makeTimeDataFrame() + + with ensure_clean(self.path) as store: + store.put('df', df, table=True) + + # not implemented + self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A'] | columns=['B']") + + # in theory we could deal with this + self.assertRaises(NotImplementedError, store.select, 'df', "columns=['A','B'] & columns=['C']") + def test_string_select(self): # GH 2973 @@ -2898,7 +3038,6 @@ def test_string_select(self): expected = df[df.int!=2] assert_frame_equal(result,expected) - def test_read_column(self): df = tm.makeTimeDataFrame() @@ -2917,7 +3056,7 @@ def f(): # valid result = store.select_column('df', 'index') tm.assert_almost_equal(result.values, Series(df.index).values) - tm.assert_isinstance(result,Series) + self.assert_(isinstance(result,Series)) # not a data indexable column self.assertRaises( @@ -3116,18 +3255,11 @@ def test_select_as_multiple(self): tm.assert_frame_equal(result, expected) # multiple (diff selector) - try: - result = store.select_as_multiple(['df1', 'df2'], where=[Term( - 'index', '>', df2.index[4])], selector='df2') - expected = concat([df1, df2], axis=1) - expected = expected[5:] - tm.assert_frame_equal(result, expected) - except (Exception) as detail: - print("error in select_as_multiple %s" % str(detail)) - print("store: %s" % store) - print("df1: %s" % df1) - print("df2: %s" % df2) - + result = store.select_as_multiple(['df1', 'df2'], where=[Term( + 'index>df2.index[4]')], selector='df2') + expected = concat([df1, df2], axis=1) + expected = expected[5:] + tm.assert_frame_equal(result, expected) # test excpection for diff rows store.append('df3', tm.makeTimeDataFrame(nper=50)) @@ -3142,15 +3274,15 @@ def test_start_stop(self): store.append('df', df) result = store.select( - 'df', [Term("columns", "=", ["A"])], start=0, stop=5) + 'df', [Term("columns=['A']")], start=0, stop=5) expected = df.ix[0:4, ['A']] tm.assert_frame_equal(result, expected) # out of range result = store.select( - 'df', [Term("columns", "=", ["A"])], start=30, stop=40) + 'df', [Term("columns=['A']")], start=30, stop=40) assert(len(result) == 0) - tm.assert_isinstance(result, DataFrame) + assert(type(result) == DataFrame) def test_select_filter_corner(self): @@ -3161,7 +3293,7 @@ def test_select_filter_corner(self): with ensure_clean(self.path) as store: store.put('frame', df, format='table') - crit = Term('columns', df.columns[:75]) + crit = Term('columns=df.columns[:75]') result = store.select('frame', [crit]) tm.assert_frame_equal(result, df.ix[:, df.columns[:75]]) @@ -3190,7 +3322,6 @@ def _check_double_roundtrip(self, obj, comparator, compression=False, again = store['obj'] comparator(again, obj, **kwargs) - def _check_roundtrip_table(self, obj, comparator, compression=False): options = {} if compression: @@ -3296,6 +3427,7 @@ def test_pytables_native_read(self): try: store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native.h5'), 'r') d2 = store['detector/readout'] + assert isinstance(d2, DataFrame) finally: safe_close(store) @@ -3303,6 +3435,7 @@ def test_pytables_native_read(self): store = HDFStore(tm.get_data_path('legacy_hdf/pytables_native2.h5'), 'r') str(store) d1 = store['detector'] + assert isinstance(d1, DataFrame) finally: safe_close(store) @@ -3330,11 +3463,12 @@ def test_legacy_table_read(self): # old version warning with tm.assert_produces_warning(expected_warning=IncompatibilityWarning): self.assertRaises( - Exception, store.select, 'wp1', Term('minor_axis', '=', 'B')) + Exception, store.select, 'wp1', Term('minor_axis=B')) - with tm.assert_produces_warning(expected_warning=IncompatibilityWarning): df2 = store.select('df2') - store.select('df2', Term('index', '>', df2.index[2])) + result = store.select('df2', Term('index>df2.index[2]')) + expected = df2[df2.index > df2.index[2]] + assert_frame_equal(expected, result) finally: safe_close(store) @@ -3352,11 +3486,18 @@ def test_legacy_0_10_read(self): def test_legacy_0_11_read(self): # legacy from 0.11 try: - store = HDFStore(tm.get_data_path('legacy_hdf/legacy_table_0.11.h5'), 'r') + path = os.path.join('legacy_hdf', 'legacy_table_0.11.h5') + store = HDFStore(tm.get_data_path(path), 'r') str(store) + assert 'df' in store + assert 'df1' in store + assert 'mi' in store df = store.select('df') df1 = store.select('df1') mi = store.select('mi') + assert isinstance(df, DataFrame) + assert isinstance(df1, DataFrame) + assert isinstance(mi, DataFrame) finally: safe_close(store) @@ -3364,10 +3505,9 @@ def test_copy(self): def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs): try: - import os - if f is None: - f = tm.get_data_path('legacy_hdf/legacy_0.10.h5') + f = tm.get_data_path(os.path.join('legacy_hdf', + 'legacy_0.10.h5')) store = HDFStore(f, 'r') @@ -3380,7 +3520,7 @@ def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs): # check keys if keys is None: - keys = list(store.keys()) + keys = store.keys() self.assert_(set(keys) == set(tstore.keys())) # check indicies & nrows @@ -3437,6 +3577,7 @@ def test_legacy_table_write(self): df = DataFrame(dict(A = 'foo', B = 'bar'),index=lrange(10)) store.append('df', df, data_columns = ['B'], min_itemsize={'A' : 200 }) + store.append('wp', wp) store.close() @@ -3524,6 +3665,7 @@ def _test_sort(obj): else: raise ValueError('type not supported here') + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py index e2051eba7f42a..8c5764a3f59a6 100644 --- a/pandas/tests/test_common.py +++ b/pandas/tests/test_common.py @@ -4,15 +4,13 @@ import nose from nose.tools import assert_equal -import unittest import numpy as np from pandas.tslib import iNaT from pandas import Series, DataFrame, date_range, DatetimeIndex, Timestamp -import pandas.compat as compat +from pandas import compat from pandas.compat import range, long, lrange, lmap, u from pandas.core.common import notnull, isnull -import pandas.compat as compat import pandas.core.common as com import pandas.util.testing as tm import pandas.core.config as cf @@ -42,6 +40,7 @@ def __getitem__(self): assert(not is_seq(A())) + def test_notnull(): assert notnull(1.) assert not notnull(None) @@ -121,11 +120,13 @@ def test_isnull_datetime(): assert(mask[0]) assert(not mask[1:].any()) + def test_datetimeindex_from_empty_datetime64_array(): for unit in [ 'ms', 'us', 'ns' ]: idx = DatetimeIndex(np.array([], dtype='datetime64[%s]' % unit)) assert(len(idx) == 0) + def test_nan_to_nat_conversions(): df = DataFrame(dict({ @@ -144,6 +145,7 @@ def test_nan_to_nat_conversions(): if LooseVersion(np.__version__) >= '1.7.0': assert(s[8].value == np.datetime64('NaT').astype(np.int64)) + def test_any_none(): assert(com._any_none(1, 2, 3, None)) assert(not com._any_none(1, 2, 3, 4)) @@ -308,6 +310,7 @@ def test_ensure_int32(): result = com._ensure_int32(values) assert(result.dtype == np.int32) + def test_ensure_platform_int(): # verify that when we create certain types of indices diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index ff76c7c070946..f81620b897a4a 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -4,31 +4,25 @@ import unittest import nose -import operator -from numpy import random, nan from numpy.random import randn + +import operator import numpy as np from numpy.testing import assert_array_equal -import pandas as pan -from pandas.core.api import DataFrame, Series, notnull, isnull -from pandas.core import expressions as expr +from pandas.core.api import DataFrame +from pandas.computation import expressions as expr -from pandas.util.testing import (assert_almost_equal, - assert_series_equal, - assert_frame_equal) +from pandas.util.testing import assert_series_equal, assert_frame_equal from pandas import compat -import pandas.util.testing as tm -import pandas.lib as lib - -from numpy.testing.decorators import slow if not expr._USE_NUMEXPR: - raise nose.SkipTest + raise nose.SkipTest("numexpr not available") + -_frame = DataFrame(np.random.randn(10000, 4), columns = list('ABCD'), dtype='float64') -_frame2 = DataFrame(np.random.randn(100, 4), columns = list('ABCD'), dtype='float64') +_frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64') +_frame2 = DataFrame(randn(100, 4), columns = list('ABCD'), dtype='float64') _mixed = DataFrame({ 'A' : _frame['A'].copy(), 'B' : _frame['B'].astype('float32'), 'C' : _frame['C'].astype('int64'), 'D' : _frame['D'].astype('int32') }) _mixed2 = DataFrame({ 'A' : _frame2['A'].copy(), 'B' : _frame2['B'].astype('float32'), 'C' : _frame2['C'].astype('int64'), 'D' : _frame2['D'].astype('int32') }) _integer = DataFrame(np.random.randint(1, 100, size=(10001, 4)), columns = list('ABCD'), dtype='int64') @@ -128,11 +122,11 @@ def testit(): result = expr.evaluate(op, op_str, f, f, use_numexpr=True) expected = expr.evaluate(op, op_str, f, f, use_numexpr=False) assert_array_equal(result,expected.values) - + result = expr._can_use_numexpr(op, op_str, f2, f2, 'evaluate') self.assert_(result == False) - + expr.set_use_numexpr(False) testit() expr.set_use_numexpr(True) @@ -149,7 +143,7 @@ def testit(): f11 = f f12 = f + 1 - + f21 = f2 f22 = f2 + 1 @@ -163,7 +157,7 @@ def testit(): result = expr.evaluate(op, op_str, f11, f12, use_numexpr=True) expected = expr.evaluate(op, op_str, f11, f12, use_numexpr=False) assert_array_equal(result,expected.values) - + result = expr._can_use_numexpr(op, op_str, f21, f22, 'evaluate') self.assert_(result == False) @@ -180,7 +174,7 @@ def test_where(self): def testit(): for f in [ self.frame, self.frame2, self.mixed, self.mixed2 ]: - + for cond in [ True, False ]: c = np.empty(f.shape,dtype=np.bool_) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index a5c1941a7f2d3..423707e0016d8 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -11,6 +11,8 @@ import nose import functools import itertools +from itertools import product + from pandas.compat import( map, zip, range, long, lrange, lmap, lzip, OrderedDict, cPickle as pickle, u, StringIO @@ -18,7 +20,7 @@ from pandas import compat from numpy import random, nan -from numpy.random import randn +from numpy.random import randn, rand import numpy as np import numpy.ma as ma from numpy.testing import assert_array_equal @@ -30,7 +32,7 @@ import pandas.core.format as fmt import pandas.core.datetools as datetools from pandas.core.api import (DataFrame, Index, Series, notnull, isnull, - MultiIndex, DatetimeIndex, Timestamp, Period) + MultiIndex, DatetimeIndex, Timestamp) from pandas import date_range import pandas as pd from pandas.io.parsers import read_csv @@ -40,10 +42,14 @@ assert_series_equal, assert_frame_equal, assertRaisesRegexp, + assertRaises, makeCustomDataframe as mkdf, ensure_clean) from pandas.core.indexing import IndexingError from pandas.core.common import PandasError +from pandas.compat import OrderedDict +from pandas.computation.expr import Expr +import pandas.computation as comp import pandas.util.testing as tm import pandas.lib as lib @@ -81,6 +87,7 @@ def _check_mixed_float(df, dtype = None): if dtypes.get('D'): assert(df.dtypes['D'] == dtypes['D']) + def _check_mixed_int(df, dtype = None): dtypes = dict(A = 'int32', B = 'uint64', C = 'uint8', D = 'int64') if isinstance(dtype, compat.string_types): @@ -97,8 +104,6 @@ def _check_mixed_int(df, dtype = None): assert(df.dtypes['D'] == dtypes['D']) - - class CheckIndexing(object): _multiprocess_can_split_ = True @@ -122,6 +127,14 @@ def test_getitem(self): with assertRaisesRegexp(KeyError, 'no item named random'): self.frame['random'] + df = self.frame.copy() + df['$10'] = randn(len(df)) + ad = randn(len(df)) + df['@awesome_domain'] = ad + self.assertRaises(KeyError, df.__getitem__, 'df["$10"]') + res = df['@awesome_domain'] + assert_array_equal(ad, res.values) + def test_getitem_dupe_cols(self): df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b']) try: @@ -2119,7 +2132,6 @@ def test_constructor_cast_failure(self): # this is ok df['foo2'] = np.ones((4,2)).tolist() - def test_constructor_dtype_nocast_view(self): df = DataFrame([[1, 2]]) should_be_view = DataFrame(df, dtype=df[0].dtype) @@ -3166,7 +3178,6 @@ def test_constructor_single_value(self): with tm.assertRaisesRegexp(TypeError, 'incompatible data and dtype'): DataFrame('a', [1, 2], ['a', 'c'], float) - def test_constructor_with_datetimes(self): intname = np.dtype(np.int_).name floatname = np.dtype(np.float_).name @@ -5238,8 +5249,6 @@ def make_dtnat_arr(n,nnat=None): _do_test(mkdf(nrows, ncols,r_idx_nlevels=2,c_idx_nlevels=2), path,rnlvl=2,cnlvl=2) - - def test_to_csv_from_csv_w_some_infs(self): # test roundtrip with inf, -inf, nan, as full columns and mix @@ -8100,6 +8109,7 @@ def test_mask_edge_case_1xN_frame(self): #---------------------------------------------------------------------- # Transposing + def test_transpose(self): frame = self.frame dft = frame.T @@ -8228,7 +8238,6 @@ def test_diff(self): assert_series_equal(the_diff['A'], tf['A'] - tf['A'].shift(1)) - def test_diff_mixed_dtype(self): df = DataFrame(np.random.randn(5, 3)) df['A'] = np.array([1, 2, 3, 4, 5], dtype=object) @@ -10137,7 +10146,6 @@ def test_unstack_dtypes(self): expected = Series({'float64' : 2, 'object' : 2}) assert_series_equal(result, expected) - def test_reset_index(self): stacked = self.frame.stack()[::2] stacked = DataFrame({'foo': stacked, 'bar': stacked}) @@ -11106,10 +11114,632 @@ def test_isin_with_string_scalar(self): with tm.assertRaises(TypeError): df.isin('aaa') + +def skip_if_no_ne(engine='numexpr'): + if engine == 'numexpr': + try: + import numexpr as ne + except ImportError: + raise nose.SkipTest("cannot query engine numexpr when numexpr not " + "installed") + + +def skip_if_no_pandas_parser(parser): + if parser != 'pandas': + raise nose.SkipTest("cannot evaluate with parser {0!r}".format(parser)) + + +class TestDataFrameQueryWithMultiIndex(object): + def check_query_with_named_multiindex(self, parser, engine): + skip_if_no_ne(engine) + a = tm.choice(['red', 'green'], size=10) + b = tm.choice(['eggs', 'ham'], size=10) + index = MultiIndex.from_arrays([a, b], names=['color', 'food']) + df = DataFrame(randn(10, 2), index=index) + ind = Series(df.index.get_level_values('color').values, index=index, + name='color') + + # equality + #import ipdb; ipdb.set_trace() + res1 = df.query('color == "red"', parser=parser, engine=engine) + res2 = df.query('"red" == color', parser=parser, engine=engine) + exp = df[ind == 'red'] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + # inequality + res1 = df.query('color != "red"', parser=parser, engine=engine) + res2 = df.query('"red" != color', parser=parser, engine=engine) + exp = df[ind != 'red'] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + # list equality (really just set membership) + res1 = df.query('color == ["red"]', parser=parser, engine=engine) + res2 = df.query('["red"] == color', parser=parser, engine=engine) + exp = df[ind.isin(['red'])] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + res1 = df.query('color != ["red"]', parser=parser, engine=engine) + res2 = df.query('["red"] != color', parser=parser, engine=engine) + exp = df[~ind.isin(['red'])] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + # in/not in ops + res1 = df.query('["red"] in color', parser=parser, engine=engine) + res2 = df.query('"red" in color', parser=parser, engine=engine) + exp = df[ind.isin(['red'])] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + res1 = df.query('["red"] not in color', parser=parser, engine=engine) + res2 = df.query('"red" not in color', parser=parser, engine=engine) + exp = df[~ind.isin(['red'])] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + def test_query_with_named_multiindex(self): + for parser, engine in product(['pandas'], ENGINES): + yield self.check_query_with_named_multiindex, parser, engine + + def check_query_with_unnamed_multiindex(self, parser, engine): + skip_if_no_ne(engine) + a = tm.choice(['red', 'green'], size=10) + b = tm.choice(['eggs', 'ham'], size=10) + index = MultiIndex.from_arrays([a, b]) + df = DataFrame(randn(10, 2), index=index) + ind = Series(df.index.get_level_values(0).values, index=index) + + res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine) + res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine) + exp = df[ind == 'red'] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + # inequality + res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine) + res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine) + exp = df[ind != 'red'] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + # list equality (really just set membership) + res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine) + res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine) + exp = df[ind.isin(['red'])] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine) + res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine) + exp = df[~ind.isin(['red'])] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + # in/not in ops + res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine) + res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine) + exp = df[ind.isin(['red'])] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine) + res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine) + exp = df[~ind.isin(['red'])] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + #### LEVEL 1 #### + ind = Series(df.index.get_level_values(1).values, index=index) + res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine) + res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine) + exp = df[ind == 'eggs'] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + # inequality + res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine) + res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine) + exp = df[ind != 'eggs'] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + # list equality (really just set membership) + res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine) + res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine) + exp = df[ind.isin(['eggs'])] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine) + res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine) + exp = df[~ind.isin(['eggs'])] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + # in/not in ops + res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine) + res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine) + exp = df[ind.isin(['eggs'])] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine) + res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine) + exp = df[~ind.isin(['eggs'])] + assert_frame_equal(res1, exp) + assert_frame_equal(res2, exp) + + def test_query_with_unnamed_multiindex(self): + for parser, engine in product(['pandas'], ENGINES): + yield self.check_query_with_unnamed_multiindex, parser, engine + + def check_query_with_partially_named_multiindex(self, parser, engine): + skip_if_no_ne(engine) + a = tm.choice(['red', 'green'], size=10) + b = np.arange(10) + index = MultiIndex.from_arrays([a, b]) + index.names = [None, 'rating'] + df = DataFrame(randn(10, 2), index=index) + res = df.query('rating == 1', parser=parser, engine=engine) + ind = Series(df.index.get_level_values('rating').values, index=index, + name='rating') + exp = df[ind == 1] + assert_frame_equal(res, exp) + + res = df.query('rating != 1', parser=parser, engine=engine) + ind = Series(df.index.get_level_values('rating').values, index=index, + name='rating') + exp = df[ind != 1] + assert_frame_equal(res, exp) + + res = df.query('ilevel_0 == "red"', parser=parser, engine=engine) + ind = Series(df.index.get_level_values(0).values, index=index) + exp = df[ind == "red"] + assert_frame_equal(res, exp) + + res = df.query('ilevel_0 != "red"', parser=parser, engine=engine) + ind = Series(df.index.get_level_values(0).values, index=index) + exp = df[ind != "red"] + assert_frame_equal(res, exp) + + def test_query_with_partially_named_multiindex(self): + for parser, engine in product(['pandas'], ENGINES): + yield self.check_query_with_partially_named_multiindex, parser, engine + + +class TestDataFrameQueryNumExprPandas(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.engine = 'numexpr' + cls.parser = 'pandas' + skip_if_no_ne() + + @classmethod + def tearDownClass(cls): + del cls.engine, cls.parser + + def test_date_query_method(self): + engine, parser = self.engine, self.parser + df = DataFrame(randn(5, 3)) + df['dates1'] = date_range('1/1/2012', periods=5) + df['dates2'] = date_range('1/1/2013', periods=5) + df['dates3'] = date_range('1/1/2014', periods=5) + res = df.query('dates1 < 20130101 < dates3', engine=engine, + parser=parser) + expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)] + assert_frame_equal(res, expec) + + def test_query_scope(self): + engine, parser = self.engine, self.parser + from pandas.computation.common import NameResolutionError + + df = DataFrame({"i": lrange(10), "+": lrange(3, 13), + "r": lrange(4, 14)}) + i, s = 5, 6 + self.assertRaises(NameResolutionError, df.query, 'i < 5', + engine=engine, parser=parser, local_dict={'i': i}) + self.assertRaises(SyntaxError, df.query, 'i - +', engine=engine, + parser=parser) + self.assertRaises(NameResolutionError, df.query, 'i == s', + engine=engine, parser=parser, local_dict={'i': i, + 's': s}) + + def test_query_scope_index(self): + engine, parser = self.engine, self.parser + from pandas.computation.common import NameResolutionError + df = DataFrame(np.random.randint(10, size=(10, 3)), + index=Index(range(10), name='blob'), + columns=['a', 'b', 'c']) + from numpy import sin + df.index.name = 'sin' + self.assertRaises(NameResolutionError, df.query, 'sin > 5', + engine=engine, parser=parser, local_dict={'sin': + sin}) + + def test_query(self): + engine, parser = self.engine, self.parser + df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c']) + + assert_frame_equal(df.query('a < b', engine=engine, parser=parser), + df[df.a < df.b]) + assert_frame_equal(df.query('a + b > b * c', engine=engine, + parser=parser), + df[df.a + df.b > df.b * df.c]) + + local_dict = dict(df.iteritems()) + local_dict.update({'df': df}) + self.assertRaises(NameError, df.query, 'a < d & b < f', + local_dict=local_dict, engine=engine, parser=parser) + + # make sure that it's not just because we didn't pass the locals in + self.assertRaises(AssertionError, self.assertRaises, NameError, + df.query, 'a < b', local_dict={'df': df}, + engine=engine, parser=parser) + + def test_query_index_with_name(self): + engine, parser = self.engine, self.parser + df = DataFrame(np.random.randint(10, size=(10, 3)), + index=Index(range(10), name='blob'), + columns=['a', 'b', 'c']) + res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser) + expec = df[(df.index < 5) & (df.a < df.b)] + assert_frame_equal(res, expec) + + res = df.query('blob < b', engine=engine, parser=parser) + expec = df[df.index < df.b] + + assert_frame_equal(res, expec) + + def test_query_index_without_name(self): + engine, parser = self.engine, self.parser + df = DataFrame(np.random.randint(10, size=(10, 3)), + index=range(10), columns=['a', 'b', 'c']) + + # "index" should refer to the index + res = df.query('index < b', engine=engine, parser=parser) + expec = df[df.index < df.b] + assert_frame_equal(res, expec) + + # test against a scalar + res = df.query('index < 5', engine=engine, parser=parser) + expec = df[df.index < 5] + assert_frame_equal(res, expec) + + def test_nested_scope(self): + engine = self.engine + parser = self.parser + # smoke test + x = 1 + result = pd.eval('x + 1', engine=engine, parser=parser) + self.assertEqual(result, 2) + + df = DataFrame(np.random.randn(5, 3)) + df2 = DataFrame(np.random.randn(5, 3)) + expected = df[(df>0) & (df2>0)] + + result = df.query('(df>0) & (df2>0)', engine=engine, parser=parser) + assert_frame_equal(result, expected) + + result = pd.eval('df[(df > 0) and (df2 > 0)]', engine=engine, + parser=parser) + assert_frame_equal(result, expected) + + result = pd.eval('df[(df > 0) and (df2 > 0) and df[df > 0] > 0]', + engine=engine, parser=parser) + expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)] + assert_frame_equal(result, expected) + + result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser) + expected = df.query('(df>0) & (df2>0)', engine=engine, parser=parser) + assert_frame_equal(result, expected) + + def test_local_syntax(self): + skip_if_no_pandas_parser(self.parser) + + from pandas.computation.common import NameResolutionError + + engine, parser = self.engine, self.parser + df = DataFrame(randn(100, 10), columns=list('abcdefghij')) + b = 1 + expect = df[df.a < b] + result = df.query('a < @b', engine=engine, parser=parser) + assert_frame_equal(result, expect) + + # scope issue with self.assertRaises so just catch it and let it pass + try: + df.query('a < @b', engine=engine, parser=parser) + except NameResolutionError: + pass + + del b + expect = df[df.a < df.b] + result = df.query('a < b', engine=engine, parser=parser) + assert_frame_equal(result, expect) + + def test_chained_cmp_and_in(self): + skip_if_no_pandas_parser(self.parser) + engine, parser = self.engine, self.parser + cols = list('abc') + df = DataFrame(randn(100, len(cols)), columns=cols) + res = df.query('a < b < c and a not in b not in c', engine=engine, + parser=parser) + ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b) + expec = df[ind] + assert_frame_equal(res, expec) + + +class TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas): + @classmethod + def setUpClass(cls): + cls.engine = 'numexpr' + cls.parser = 'python' + skip_if_no_ne(cls.engine) + cls.frame = _frame.copy() + + @classmethod + def tearDownClass(cls): + del cls.frame, cls.engine, cls.parser + + def test_date_query_method(self): + engine, parser = self.engine, self.parser + df = DataFrame(randn(5, 3)) + df['dates1'] = date_range('1/1/2012', periods=5) + df['dates2'] = date_range('1/1/2013', periods=5) + df['dates3'] = date_range('1/1/2014', periods=5) + res = df.query('(df.dates1 < 20130101) & (20130101 < df.dates3)', + engine=engine, parser=parser) + expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)] + assert_frame_equal(res, expec) + + def test_nested_scope(self): + engine = self.engine + parser = self.parser + # smoke test + x = 1 + result = pd.eval('x + 1', engine=engine, parser=parser) + self.assertEqual(result, 2) + + df = DataFrame(np.random.randn(5, 3)) + df2 = DataFrame(np.random.randn(5, 3)) + expected = df[(df>0) & (df2>0)] + + result = df.query('(df>0) & (df2>0)', engine=engine, parser=parser) + assert_frame_equal(result, expected) + + result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine, + parser=parser) + assert_frame_equal(result, expected) + + result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]', + engine=engine, parser=parser) + expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)] + assert_frame_equal(result, expected) + + result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser) + expected = df.query('(df>0) & (df2>0)', engine=engine, parser=parser) + assert_frame_equal(result, expected) + + +class TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas): + @classmethod + def setUpClass(cls): + cls.engine = 'python' + cls.parser = 'pandas' + cls.frame = _frame.copy() + + @classmethod + def tearDownClass(cls): + del cls.frame, cls.engine, cls.parser + + +class TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython): + @classmethod + def setUpClass(cls): + cls.engine = cls.parser = 'python' + cls.frame = _frame.copy() + + @classmethod + def tearDownClass(cls): + del cls.frame, cls.engine, cls.parser + + +PARSERS = 'python', 'pandas' +ENGINES = 'python', 'numexpr' + + +class TestDataFrameQueryStrings(object): + def check_str_query_method(self, parser, engine): + skip_if_no_ne(engine) + df = DataFrame(randn(10, 1), columns=['b']) + df['strings'] = Series(list('aabbccddee')) + expect = df[df.strings == 'a'] + + if parser != 'pandas': + col = 'strings' + lst = '"a"' + + lhs = [col] * 2 + [lst] * 2 + rhs = lhs[::-1] + + eq, ne = '==', '!=' + ops = 2 * ([eq] + [ne]) + + for lhs, op, rhs in zip(lhs, ops, rhs): + ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs) + assertRaises(NotImplementedError, df.query, ex, engine=engine, + parser=parser, local_dict={'strings': df.strings}) + else: + res = df.query('"a" == strings', engine=engine, parser=parser) + assert_frame_equal(res, expect) + + res = df.query('strings == "a"', engine=engine, parser=parser) + assert_frame_equal(res, expect) + assert_frame_equal(res, df[df.strings.isin(['a'])]) + + expect = df[df.strings != 'a'] + res = df.query('strings != "a"', engine=engine, parser=parser) + assert_frame_equal(res, expect) + + res = df.query('"a" != strings', engine=engine, parser=parser) + assert_frame_equal(res, expect) + assert_frame_equal(res, df[~df.strings.isin(['a'])]) + + def test_str_query_method(self): + for parser, engine in product(PARSERS, ENGINES): + yield self.check_str_query_method, parser, engine + + def test_str_list_query_method(self): + for parser, engine in product(PARSERS, ENGINES): + yield self.check_str_list_query_method, parser, engine + + def check_str_list_query_method(self, parser, engine): + skip_if_no_ne(engine) + df = DataFrame(randn(10, 1), columns=['b']) + df['strings'] = Series(list('aabbccddee')) + expect = df[df.strings.isin(['a', 'b'])] + + if parser != 'pandas': + col = 'strings' + lst = '["a", "b"]' + + lhs = [col] * 2 + [lst] * 2 + rhs = lhs[::-1] + + eq, ne = '==', '!=' + ops = 2 * ([eq] + [ne]) + + for lhs, op, rhs in zip(lhs, ops, rhs): + ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs) + assertRaises(NotImplementedError, df.query, ex, engine=engine, + parser=parser, local_dict={'strings': df.strings}) + else: + res = df.query('strings == ["a", "b"]', engine=engine, + parser=parser) + assert_frame_equal(res, expect) + + res = df.query('["a", "b"] == strings', engine=engine, + parser=parser) + assert_frame_equal(res, expect) + + expect = df[~df.strings.isin(['a', 'b'])] + + res = df.query('strings != ["a", "b"]', engine=engine, + parser=parser) + assert_frame_equal(res, expect) + + res = df.query('["a", "b"] != strings', engine=engine, + parser=parser) + assert_frame_equal(res, expect) + + def check_query_with_string_columns(self, parser, engine): + skip_if_no_ne(engine) + df = DataFrame({'a': list('aaaabbbbcccc'), + 'b': list('aabbccddeeff'), + 'c': np.random.randint(5, size=12), + 'd': np.random.randint(9, size=12)}) + if parser == 'pandas': + res = df.query('a in b', parser=parser, engine=engine) + expec = df[df.a.isin(df.b)] + assert_frame_equal(res, expec) + + res = df.query('a in b and c < d', parser=parser, engine=engine) + expec = df[df.a.isin(df.b) & (df.c < df.d)] + assert_frame_equal(res, expec) + else: + with assertRaises(NotImplementedError): + df.query('a in b', parser=parser, engine=engine) + + with assertRaises(NotImplementedError): + df.query('a in b and c < d', parser=parser, engine=engine) + + def test_query_with_string_columns(self): + for parser, engine in product(PARSERS, ENGINES): + yield self.check_query_with_string_columns, parser, engine + + def check_object_array_eq_ne(self, parser, engine): + skip_if_no_ne(engine) + df = DataFrame({'a': list('aaaabbbbcccc'), + 'b': list('aabbccddeeff'), + 'c': np.random.randint(5, size=12), + 'd': np.random.randint(9, size=12)}) + res = df.query('a == b', parser=parser, engine=engine) + exp = df[df.a == df.b] + assert_frame_equal(res, exp) + + res = df.query('a != b', parser=parser, engine=engine) + exp = df[df.a != df.b] + assert_frame_equal(res, exp) + + def test_object_array_eq_ne(self): + for parser, engine in product(PARSERS, ENGINES): + yield self.check_object_array_eq_ne, parser, engine + + +class TestDataFrameEvalNumExprPandas(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.engine = 'numexpr' + cls.parser = 'pandas' + skip_if_no_ne() + + @classmethod + def tearDownClass(cls): + del cls.engine, cls.parser + + def setUp(self): + self.frame = DataFrame(randn(10, 3), columns=list('abc')) + + def tearDown(self): + del self.frame + + def test_simple_expr(self): + res = self.frame.eval('a + b', engine=self.engine, parser=self.parser) + expect = self.frame.a + self.frame.b + assert_series_equal(res, expect) + + def test_bool_arith_expr(self): + res = self.frame.eval('a[a < 1] + b', engine=self.engine, + parser=self.parser) + expect = self.frame.a[self.frame.a < 1] + self.frame.b + assert_series_equal(res, expect) + + +class TestDataFrameEvalNumExprPython(TestDataFrameEvalNumExprPandas): + @classmethod + def setUpClass(cls): + cls.engine = 'numexpr' + cls.parser = 'python' + skip_if_no_ne() + + @classmethod + def tearDownClass(cls): + del cls.engine, cls.parser + + +class TestDataFrameEvalPythonPandas(TestDataFrameEvalNumExprPandas): + @classmethod + def setUpClass(cls): + cls.engine = 'python' + cls.parser = 'pandas' + + @classmethod + def tearDownClass(cls): + del cls.engine, cls.parser + + +class TestDataFrameEvalPythonPython(TestDataFrameEvalNumExprPython): + @classmethod + def setUpClass(cls): + cls.engine = cls.parser = 'python' + + @classmethod + def tearDownClass(cls): + del cls.engine, cls.parser + + if __name__ == '__main__': - # unittest.main() - import nose - # nose.runmodule(argv=[__file__,'-vvs','-x', '--ipdb-failure'], - # exit=False) nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False) diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 1572ca481d8a4..8646d261306ca 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -926,7 +926,8 @@ def join(self, other, how='left', level=None, return_indexers=False): See Index.join """ if (not isinstance(other, DatetimeIndex) and len(other) > 0 and - other.inferred_type != 'mixed-integer'): + other.inferred_type not in ('floating', 'mixed-integer', + 'mixed-integer-float', 'mixed')): try: other = DatetimeIndex(other) except TypeError: diff --git a/pandas/util/testing.py b/pandas/util/testing.py index abc13fb2ad9ee..0718dc8926011 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -15,7 +15,7 @@ from contextlib import contextmanager from distutils.version import LooseVersion -from numpy.random import randn +from numpy.random import randn, rand import numpy as np from pandas.core.common import isnull, _is_sequence @@ -27,14 +27,14 @@ import pandas.compat as compat from pandas.compat import( map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter, - raise_with_traceback + raise_with_traceback, httplib ) from pandas import bdate_range from pandas.tseries.index import DatetimeIndex from pandas.tseries.period import PeriodIndex -from pandas.io.common import urlopen, HTTPException +from pandas.io.common import urlopen Index = index.Index MultiIndex = index.MultiIndex @@ -48,6 +48,10 @@ _RAISE_NETWORK_ERROR_DEFAULT = False +def randbool(size=(), p=0.5): + return rand(*size) <= p + + def rands(n): choices = string.ascii_letters + string.digits return ''.join(random.choice(choices) for _ in range(n)) @@ -58,10 +62,17 @@ def randu(n): choices += string.digits return ''.join([random.choice(choices) for _ in range(n)]) + +def choice(x, size=10): + """sample with replacement; uniform over the input""" + try: + return np.random.choice(x, size=size) + except AttributeError: + return np.random.randint(len(x), size=size).choose(x) + #------------------------------------------------------------------------------ # Console debugging tools - def debug(f, *args, **kwargs): from pdb import Pdb as OldPdb try: @@ -752,7 +763,7 @@ def dec(f): return wrapper -_network_error_classes = IOError, HTTPException +_network_error_classes = IOError, httplib.HTTPException @optional_args @@ -796,13 +807,13 @@ def network(t, raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, >>> import nose >>> @network ... def test_network(): - ... with urlopen("rabbit://bonanza.com") as f: - ... pass + ... with urlopen("rabbit://bonanza.com") as f: + ... pass ... >>> try: - ... test_network() + ... test_network() ... except nose.SkipTest: - ... print "SKIPPING!" + ... print("SKIPPING!") ... SKIPPING! @@ -811,8 +822,8 @@ def network(t, raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, >>> @network(raise_on_error=True) ... def test_network(): - ... with urlopen("complaint://deadparrot.com") as f: - ... pass + ... with urlopen("complaint://deadparrot.com") as f: + ... pass ... >>> test_network() Traceback (most recent call last): diff --git a/setup.py b/setup.py index b7df339daf75a..ffd6089bdc88d 100755 --- a/setup.py +++ b/setup.py @@ -83,7 +83,7 @@ except ImportError: cython = False -from os.path import splitext, basename, join as pjoin +from os.path import join as pjoin class build_ext(_build_ext): @@ -506,6 +506,8 @@ def pxd(name): maintainer=AUTHOR, packages=['pandas', 'pandas.compat', + 'pandas.computation', + 'pandas.computation.tests', 'pandas.core', 'pandas.io', 'pandas.rpy', diff --git a/vb_suite/binary_ops.py b/vb_suite/binary_ops.py index 54774344520c9..3f076f9f922a3 100644 --- a/vb_suite/binary_ops.py +++ b/vb_suite/binary_ops.py @@ -21,7 +21,7 @@ start_date=datetime(2012, 1, 1)) setup = common_setup + """ -import pandas.core.expressions as expr +import pandas.computation.expressions as expr df = DataFrame(np.random.randn(20000, 100)) df2 = DataFrame(np.random.randn(20000, 100)) expr.set_numexpr_threads(1) @@ -32,7 +32,7 @@ start_date=datetime(2013, 2, 26)) setup = common_setup + """ -import pandas.core.expressions as expr +import pandas.computation.expressions as expr df = DataFrame(np.random.randn(20000, 100)) df2 = DataFrame(np.random.randn(20000, 100)) expr.set_use_numexpr(False) @@ -53,7 +53,7 @@ start_date=datetime(2012, 1, 1)) setup = common_setup + """ -import pandas.core.expressions as expr +import pandas.computation.expressions as expr df = DataFrame(np.random.randn(20000, 100)) df2 = DataFrame(np.random.randn(20000, 100)) expr.set_numexpr_threads(1) @@ -63,7 +63,7 @@ start_date=datetime(2013, 2, 26)) setup = common_setup + """ -import pandas.core.expressions as expr +import pandas.computation.expressions as expr df = DataFrame(np.random.randn(20000, 100)) df2 = DataFrame(np.random.randn(20000, 100)) expr.set_use_numexpr(False) @@ -84,7 +84,7 @@ start_date=datetime(2012, 1, 1)) setup = common_setup + """ -import pandas.core.expressions as expr +import pandas.computation.expressions as expr df = DataFrame(np.random.randn(20000, 100)) df2 = DataFrame(np.random.randn(20000, 100)) expr.set_numexpr_threads(1) @@ -94,7 +94,7 @@ start_date=datetime(2013, 2, 26)) setup = common_setup + """ -import pandas.core.expressions as expr +import pandas.computation.expressions as expr df = DataFrame(np.random.randn(20000, 100)) df2 = DataFrame(np.random.randn(20000, 100)) expr.set_use_numexpr(False) diff --git a/vb_suite/eval.py b/vb_suite/eval.py new file mode 100644 index 0000000000000..c666cd431cbb4 --- /dev/null +++ b/vb_suite/eval.py @@ -0,0 +1,114 @@ +from vbench.benchmark import Benchmark +from datetime import datetime + +common_setup = """from pandas_vb_common import * +import pandas as pd +df = DataFrame(np.random.randn(20000, 100)) +df2 = DataFrame(np.random.randn(20000, 100)) +df3 = DataFrame(np.random.randn(20000, 100)) +df4 = DataFrame(np.random.randn(20000, 100)) +""" + +setup = common_setup + """ +import pandas.computation.expressions as expr +expr.set_numexpr_threads(1) +""" + +SECTION = 'Eval' + +#---------------------------------------------------------------------- +# binary ops + +#---------------------------------------------------------------------- +# add +eval_frame_add_all_threads = \ + Benchmark("pd.eval('df + df2 + df3 + df4')", common_setup, + name='eval_frame_add_all_threads', + start_date=datetime(2013, 7, 21)) + + + +eval_frame_add_one_thread = \ + Benchmark("pd.eval('df + df2 + df3 + df4')", setup, + name='eval_frame_add_one_thread', + start_date=datetime(2013, 7, 26)) + +eval_frame_add_python = \ + Benchmark("pd.eval('df + df2 + df3 + df4', engine='python')", common_setup, + name='eval_frame_add_python', start_date=datetime(2013, 7, 21)) + +eval_frame_add_python_one_thread = \ + Benchmark("pd.eval('df + df2 + df3 + df4', engine='python')", setup, + name='eval_frame_add_python_one_thread', + start_date=datetime(2013, 7, 26)) +#---------------------------------------------------------------------- +# mult + +eval_frame_mult_all_threads = \ + Benchmark("pd.eval('df * df2 * df3 * df4')", common_setup, + name='eval_frame_mult_all_threads', + start_date=datetime(2012, 7, 21)) + +eval_frame_mult_one_thread = \ + Benchmark("pd.eval('df * df2 * df3 * df4')", setup, + name='eval_frame_mult_one_thread', + start_date=datetime(2012, 7, 26)) + +eval_frame_mult_python = \ + Benchmark("pdl.eval('df * df2 * df3 * df4', engine='python')", + common_setup, + name='eval_frame_mult_python', start_date=datetime(2013, 7, 21)) + +eval_frame_mult_python_one_thread = \ + Benchmark("pd.eval('df * df2 * df3 * df4', engine='python')", setup, + name='eval_frame_mult_python_one_thread', + start_date=datetime(2012, 7, 26)) + +#---------------------------------------------------------------------- +# multi and + +eval_frame_and_all_threads = \ + Benchmark("pd.eval('(df > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)')", + common_setup, + name='eval_frame_and_all_threads', + start_date=datetime(2012, 7, 21)) + +eval_frame_and_one_thread = \ + Benchmark("pd.eval('(df > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)')", setup, + name='eval_frame_and_one_thread', + start_date=datetime(2012, 7, 26)) + +setup = common_setup +eval_frame_and_python = \ + Benchmark("pd.eval('(df > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)', engine='python')", + common_setup, name='eval_frame_and_python', + start_date=datetime(2013, 7, 21)) + +eval_frame_and_one_thread = \ + Benchmark("pd.eval('(df > 0) & (df2 > 0) & (df3 > 0) & (df4 > 0)', engine='python')", + setup, + name='eval_frame_and_python_one_thread', + start_date=datetime(2012, 7, 26)) + +#-------------------------------------------------------------------- +# chained comp +eval_frame_chained_cmp_all_threads = \ + Benchmark("pd.eval('df < df2 < df3 < df4')", common_setup, + name='eval_frame_chained_cmp_all_threads', + start_date=datetime(2012, 7, 21)) + +eval_frame_chained_cmp_one_thread = \ + Benchmark("pd.eval('df < df2 < df3 < df4')", setup, + name='eval_frame_chained_cmp_one_thread', + start_date=datetime(2012, 7, 26)) + +setup = common_setup +eval_frame_chained_cmp_python = \ + Benchmark("pd.eval('df < df2 < df3 < df4', engine='python')", + common_setup, name='eval_frame_chained_cmp_python', + start_date=datetime(2013, 7, 26)) + +eval_frame_chained_cmp_one_thread = \ + Benchmark("pd.eval('df < df2 < df3 < df4', engine='python')", setup, + name='eval_frame_chained_cmp_python_one_thread', + start_date=datetime(2012, 7, 26)) diff --git a/vb_suite/indexing.py b/vb_suite/indexing.py index 1264ae053ffca..beefec256ed81 100644 --- a/vb_suite/indexing.py +++ b/vb_suite/indexing.py @@ -106,7 +106,7 @@ start_date=datetime(2012, 1, 1)) setup = common_setup + """ -import pandas.core.expressions as expr +import pandas.computation.expressions as expr df = DataFrame(np.random.randn(50000, 100)) df2 = DataFrame(np.random.randn(50000, 100)) expr.set_numexpr_threads(1) @@ -118,7 +118,7 @@ setup = common_setup + """ -import pandas.core.expressions as expr +import pandas.computation.expressions as expr df = DataFrame(np.random.randn(50000, 100)) df2 = DataFrame(np.random.randn(50000, 100)) expr.set_use_numexpr(False) diff --git a/vb_suite/suite.py b/vb_suite/suite.py index ca83855c2a109..f3c8dfe3032e0 100644 --- a/vb_suite/suite.py +++ b/vb_suite/suite.py @@ -23,7 +23,8 @@ 'sparse', 'reshape', 'stat_ops', - 'timeseries'] + 'timeseries', + 'eval'] by_module = {} benchmarks = []
closes #3393 closes #2560 cc @jreback # `eval` Roadmap ## PyTables allows natural syntax for queries, with in-line variables allowed some examples `"ts>=Timestamp('2012-02-01') & users=['a','b','c']"` `['A > 0']` `'(index>df.index[3] & index<=df.index[6]) | string="bar"'` ``` dates=pd.date_range('20130101',periods=5) 'index>dates' ``` Todos: - [x] Release Notes - [x] v0.13.txt examples (and link to docs) - [x] update docs / need new examples - [x] API changes (disallow dict, show deprecations for separated expressions) - [x] tests for `|` and `~` operators - [x] tests for invalid filter expressions - [x] code clean up, maybe create new `Expr` base class to opt in/out of allowed operations (e.g. `Numexpr` doesn't want to allow certain operations, but `PyTables` needs them, so maybe define in the base class for simplicity, and just not allow them (checked in `visit`), which right now doesn't allow a `generic_visitor`) - [x] can `ops.Value` be equiv of `ops.Constant`? ## `query` method - [x] a few more doc examples - [x] add a section in `query` docstring to show use of `__getitem__` and then add to `indexing.rst` - [x] `query` docstring ## `DataFrame.eval` method - [x] implement query in terms of this - [x] tests ## Documentation - [x] **RELEASE NOTES** - [x] v0.13.txt examples & link to docs - [x] document `in` and `not in` - [x] enhancing performance section - [x] `eval` docstring - [x] Warn users that this is _not_ a magic bullet - E.g., `x = 3; y = 2; pd.eval('x // y', engine='python')` is many times slower than the same operation in actual Python. ## Engines - [x] `python` engine for testing (basically a restricted eval, should make sure nothing dangerous a la `eval('os.system("sudo rm -rf /")')`) is possible here - [x] `numexpr` engine ## Parsers - [x] `python` parser is currrently not failing on complex bool ops involving subscripting like it should (potentially could `eval` the compiled AST instead of dispatching to function calls) - [x] preparse should handle subscripting - [x] `pandas` parser should handle subscripting, i.e., `df.query(...) == pd.eval('df[...]')` - [x] `pandas` parser - [x] `python` parser - [x] `pytables` engine and parser (internal) Should python be the default engine for small frames? ## Functions - [x] toplevel `isexpr` - [x] move `eval` to top-level pandas ## Error Handling - [x] syntax error handling - [x] name error handling - [x] ugly stack overflow when an expression is not in frame...issue with variable resolution in `expr.py`. should fail fast. ## Alignment - Algorithm 1. flatten the tree of terms 2. resolve the variables, special casing only numpy arrays/scalars/single unary operations (probably room for more abstraction here) 3. align by joining on the indices using the current broadcasting behavior 4. update the original namespace with the newly aligned version of the underlying numpy array 5. reconstruct the final object with the aforementioned joined indices and type - [x] unary alignment - [x] binary alignment ## Scope - [x] add `@a` syntax for referencing local variables with the same name as a column - [x] deal with global scoping issues (not sure if this is tested well enough...which means probably not) - [x] allow `Expr` objects to be passed to `eval` (`Expr` will pull in the local and global variables from the calling stack frame) ## Miscellaneous - [x] support for `MultiIndex` queries - [x] **remove instance testing methods once series ndframe is merged** - [x] `str` ops (implemented using `isin`) - [x] basic `Timestamp` and `datetime` functionality - [x] vbench! - [x] single variable or constant evaluation e.g., `pd.eval('1')` or `pd.eval('df')` - [x] reconstruct the final object to return the expected output type, e.g., `df + df2` should return a `DataFrame` if `df` and `df2` are both `DataFrame` objects. - [x] change `Expression` to `Expr` - [x] add `truediv` keyword to `Expr`, default to `True` - [x] allow `and`, `or` and `not` aliases for the operators `&`, `|` and `~`, respectively - [x] chained comparisons - [x] attribute access syntax e.g., `df.A` ## Operators - [x] `%` - [x] `**` - [x] unary ops don't play well when they are operands of binary ops ## Tests - [ ] `PeriodIndex` objects are untested at large (a few scattered tests for basic things) - [x] add tests for strange column names e.g., `df['@awesome_domain']`, `df['df["$10"]']` - [x] Python 3 handles complex scalars differently, so test that - [x] **revert equality testing in `align.py` once #4379 is merged** - [x] boolean ops fail with a `DataFrame` with a `PeriodIndex` in the columns - [x] test alignment of `DatetimeIndex` objects - [x] rewrite a few tests in the python evaluator to make sure the `//` operator works as it should (it works, but there may be edge cases that I haven't though of yet) - [x] `//` `numexpr` only supports scalar evaluation here: variables are not allowed so this will always eval in python space (half-assed it a bit in the tests here need to fix that) - [x] truediv keyword tests - [x] add tests for nans - [x] fix `**` associativity issue in testing ## Syntax (Easier) - [x] handle `in`, `not in` ( ~~was thinking~~ implemented as `a in b` -> `b.isin(a)` and `a not in b` -> `not a in b` -> `~b.isin(a)`) ## Syntax (More difficult) - reductions (`sum` and `prod`) - math ops (sin, cos, tan, sqrt, etc.), i.e., whatever `numexpr` supports - ~~function calls~~ (we're not building a Python interpreter here :wink:) ## Optimizations There's a quite a bit of recursion going on here, where maybe something stack-based might be more efficient. For example, an expression like `df['f in g and a < b < (c ** 2 + b ** 2 - a ** 3) and d in b not in e']` will make 3 recursive calls to eval (not including the top level call). One for the first `in` expression and two for the `in`/`not in` chain. A stack-based implementation of the parser (instead of recursively visiting nodes) could possible shave off a few milliseconds. Not sure if this is worth it though. ## Others - ~~alignment for `NDFrame`~~ #4336 - ~~replace current `evaluate` function~~
https://api.github.com/repos/pandas-dev/pandas/pulls/4164
2013-07-08T20:32:22Z
2013-09-16T18:59:14Z
2013-09-16T18:59:14Z
2014-07-14T06:34:32Z
ENH: add expression evaluation functionality via eval
closes #3393. cc @jreback # `eval` Roadmap ## PyTables allows natural syntax for queries, with in-line variables allowed some examples `"ts>=Timestamp('2012-02-01') & users=['a','b','c']"` `['A > 0']` ``` dates=pd.date_range('20130101',periods=5) 'index>dates' ``` Todos: - [ ] update docs / need new examples - [ ] API changes (disallow dict, show deprecations for separated expressions) - [ ] tests for `|` and `~` operators, and invalid filter expressions - [ ] code clean up, maybe create new `Expr` base class to opt in/out of allowed operations (e.g. `Numexpr` doesn't want to allow certain operations, but `PyTables` needs them, so maybe define in the base class for simplicity, and just not allow them (checked in `visit`), which right now doesn't allow a `generic_visitor`) - [ ] can `ops.Value` be equiv of `ops.Constant`? ## Documentation - [ ] enhancing performance section - [x] `eval` docstring - [ ] Warn users that this is _not_ a magic bullet - E.g., `x = 3; y = 2; pd.eval('x // y', engine='python')` is 1000 times slower than the same operation in actual Python. ## Engines - [x] python engine for testing (basically a restricted eval, should make sure nothing dangerous a la `eval('os.system("sudo rm -rf /")')`) is possible here - [x] numexpr engine Should python be the default engine for small frames? ## Functions - [x] toplevel `isexpr` - [x] move `eval` to top-level pandas ## Error Handling - [x] syntax error handling - [x] name error handling ## Alignment - Algorithm 1. flatten the tree of terms 2. resolve the variables, special casing only numpy arrays/scalars/single unary operations (probably room for more abstraction here) 3. align by joining on the indices using the current broadcasting behavior 4. update the original namespace with the newly aligned version of the underlying numpy array 5. reconstruct the final object with the aforementioned joined indices and type - [ ] punt to `'python'` engine when a `Series` and `DataFrame` both have `DatetimeIndexes` (I would rather not do this and just enforce the to-be-deprecated index-aligning-if-both-indices-are-datetimeindex behavior) - [ ] `PeriodIndex`es don't work (well, they won't pass my tests) because of a stack overflow bug when joining a `DatetimeIndex` and a `PeriodIndex` (that bug is slated for 0.13, but a design decision needs to be made) - [ ] alignment for `NDFrame` (is this really necessary?) - [x] unary alignment - [x] binary alignment ## Scope - [x] deal with global scoping issues (not sure if this is tested well enough...which means probably not) - [x] allow `Expr` objects to be passed to `eval` (`Expr` will pull in the local and global variables from the calling stack frame) ## Miscellaneous - [ ] replace current `evaluate` function - [ ] `Expr` in `NDFrame.__getitem__` cc @jreback - [x] reconstruct the final object to return the expected output type, e.g., `df + df2` should return a `DataFrame` if `df` and `df2` are both `DataFrame` objects. - [x] change `Expression` to `Expr` - [x] add `truediv` keyword to `Expr`, default to `True` ## Operators - [x] `%` - [x] `**` - [x] unary ops don't play well when they are operands of binary ops ## Tests - [ ] `DatetimeIndex` and `PeriodIndex` objects are untested because of a few join and alignment issues. - [ ] rewrite a few tests in the python evaluator to make sure the `//` operator works as it should (it works, but there may be edge cases that I haven't though of yet) - [ ] `//` `numexpr` only supports scalar evaluation here: variables are not allowed so this will always eval in python space (half-assed it a bit in the tests here need to fix that) - [x] truediv keyword tests - [x] add tests for nans - [x] fix `**` associativity issue in testing ## Near Future - allow `and`, `or` and `not` aliases for the operators `&`, `|` and `~`, respectively - attribute accessing e.g., `df.A` syntax - reductions (`sum` and `prod`) - math ops (sin, cos, tan, sqrt, etc.) ## Far Future - non-python indexing (or just punt to python)
https://api.github.com/repos/pandas-dev/pandas/pulls/4162
2013-07-08T18:16:45Z
2013-07-08T20:11:27Z
2013-07-08T20:11:27Z
2013-09-17T20:53:44Z
DOC: Improve excel docs
diff --git a/doc/source/io.rst b/doc/source/io.rst index bc15aa102dcec..f61eb54888d1c 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1494,7 +1494,8 @@ any pickled pandas object (or any other pickled object) from file: Excel files ----------- -The ``read_excel`` method can read an Excel 2003 file using the ``xlrd`` Python +The ``read_excel`` method can read Excel 2003 (``.xls``) and +Excel 2007 (``.xlsx``) files using the ``xlrd`` Python module and use the same parsing code as the above to convert tabular data into a DataFrame. See the :ref:`cookbook<cookbook.excel>` for some advanced strategies @@ -1516,9 +1517,6 @@ advanced strategies read_excel('path_to_file.xls', 'Sheet1', index_col=None, na_values=['NA']) -To read sheets from an Excel 2007 file, you can pass a filename with a ``.xlsx`` -extension, in which case the ``openpyxl`` module will be used to read the file. - It is often the case that users will insert columns to do temporary computations in Excel and you may not want to read in those columns. `read_excel` takes a `parse_cols` keyword to allow you to specify a subset of columns to parse. diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 03b547fcd47b7..a691075844f8f 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -37,6 +37,11 @@ def read_excel(path_or_buf, sheetname, kind=None, **kwds): column ranges (e.g. "A:E" or "A,C,E:F") na_values : list-like, default None List of additional strings to recognize as NA/NaN + keep_default_na : bool, default True + If na_values are specified and keep_default_na is False the default NaN + values are overridden, otherwise they're appended to + verbose : boolean, default False + Indicate number of NA values placed in non-numeric columns Returns ------- @@ -101,6 +106,11 @@ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0, column ranges (e.g. "A:E" or "A,C,E:F") na_values : list-like, default None List of additional strings to recognize as NA/NaN + keep_default_na : bool, default True + If na_values are specified and keep_default_na is False the default NaN + values are overridden, otherwise they're appended to + verbose : boolean, default False + Indicate number of NA values placed in non-numeric columns Returns -------
I simply updated the docs to reflect that `xlrd` handles reading of both `.xls` and `.xlsx` files, and I updated docstrings affected functions by #4131 so that their docstrings stand on their own.
https://api.github.com/repos/pandas-dev/pandas/pulls/4154
2013-07-07T04:40:23Z
2013-07-10T13:09:33Z
2013-07-10T13:09:33Z
2014-07-16T08:18:02Z
ENH: add correct versions of numpy installs
diff --git a/.gitignore b/.gitignore index 9a0794373cfdc..3da165e07c77c 100644 --- a/.gitignore +++ b/.gitignore @@ -36,4 +36,5 @@ pandas/io/*.json .idea/pandas.iml .build_cache_dir .vagrant -*.whl \ No newline at end of file +*.whl +**/wheelhouse/* diff --git a/.travis.yml b/.travis.yml index ff292767011cf..3d3d90eb47129 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,13 +10,13 @@ env: matrix: include: - python: 2.7 - env: NOSE_ARGS="slow and not network" LOCALE_OVERRIDE="zh_CN.GB18030" FULL_DEPS=true + env: NOSE_ARGS="slow and not network" LOCALE_OVERRIDE="zh_CN.GB18030" FULL_DEPS=true JOB_TAG=_LOCALE - python: 2.7 env: NOSE_ARGS="not slow" FULL_DEPS=true - python: 3.2 env: NOSE_ARGS="not slow" FULL_DEPS=true - python: 3.3 - env: NOSE_ARGS="not slow" + env: NOSE_ARGS="not slow" FULL_DEPS=true # allow importing from site-packages, # so apt-get python-x works for system pythons diff --git a/ci/install.sh b/ci/install.sh index 8145e2e74e893..5c681a707ce49 100755 --- a/ci/install.sh +++ b/ci/install.sh @@ -15,85 +15,42 @@ echo "inside $0" -# Install Dependencie +# Install Dependencies # as of pip 1.4rc2, wheel files are still being broken regularly, this is a known good # commit. should revert to pypi when a final release is out pip install -I git+https://github.com/pypa/pip@42102e9deaea99db08b681d06906c2945f6f95e2#egg=pip -pip Install -I https://bitbucket.org/pypa/setuptools/downloads/setuptools-0.8b6.tar.gz +pv="${TRAVIS_PYTHON_VERSION:0:1}" +[ "$pv" == "2" ] && pv="" +[ "$pv" == "2" ] && DISTRIBUTE_VERSION="==0.6.35" + +pip install -I distribute${DISTRIBUTE_VERSION} pip install wheel # comment this line to disable the fetching of wheel files -#PIP_ARGS+=" -I --use-wheel --find-links=https://cache27-pypandas.rhcloud.com/" +PIP_ARGS+=" -I --use-wheel --find-links=http://cache27diy-cpycloud.rhcloud.com/${TRAVIS_PYTHON_VERSION}${JOB_TAG}/" # Force virtualenv to accpet system_site_packages rm -f $VIRTUAL_ENV/lib/python$TRAVIS_PYTHON_VERSION/no-global-site-packages.txt -if [ x"$LOCALE_OVERRIDE" != x"" ]; then + +if [ -n "$LOCALE_OVERRIDE" ]; then # make sure the locale is available # probably useless, since you would need to relogin sudo locale-gen "$LOCALE_OVERRIDE" -fi; - -#scipy is not included in the cached venv -if [ x"$FULL_DEPS" == x"true" ] ; then - # for pytables gets the lib as well - time sudo apt-get $APT_ARGS install libhdf5-serial-dev - - if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then - time sudo apt-get $APT_ARGS install python3-bs4 - elif [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then - time sudo apt-get $APT_ARGS install python-bs4 - fi - - if [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then - time sudo apt-get $APT_ARGS install python3-scipy - elif [ ${TRAVIS_PYTHON_VERSION} == "2.7" ]; then - time sudo apt-get $APT_ARGS install python-scipy - fi fi -# Hard Deps -for dep in nose 'python-dateutil' 'pytz>=2013a' 'cython==0.19.1'; do - time pip install $PIP_ARGS $dep -done - -if [ ${TRAVIS_PYTHON_VERSION} == "3.3" ]; then # should be >=3,3 - time pip install $PIP_ARGS numpy==1.7.1 -elif [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then - # sudo apt-get $APT_ARGS install python3-numpy; # 1.6.2 or precise - time pip install $PIP_ARGS numpy==1.6.1 -else - time pip install $PIP_ARGS numpy==1.6.1 -fi +time pip install $PIP_ARGS -r ci/requirements-${TRAVIS_PYTHON_VERSION}${JOB_TAG}.txt # Optional Deps if [ x"$FULL_DEPS" == x"true" ]; then echo "Installing FULL_DEPS" + # for pytables gets the lib as well + time sudo apt-get $APT_ARGS install libhdf5-serial-dev + time sudo apt-get $APT_ARGS install python${pv}-bs4 + time sudo apt-get $APT_ARGS install python${pv}-scipy + + time sudo apt-get $APT_ARGS remove python${pv}-lxml - if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then - time pip install $PIP_ARGS xlwt - time pip install $PIP_ARGS bottleneck==0.6.0 - time pip install $PIP_ARGS numexpr==2.1 - time pip install $PIP_ARGS tables==2.3.1 - else - time pip install $PIP_ARGS numexpr==2.1 - time pip install $PIP_ARGS tables==3.0.0 - fi - - time pip install $PIP_ARGS matplotlib==1.2.1 - time pip install $PIP_ARGS openpyxl - time pip install $PIP_ARGS xlrd>=0.9.0 - time pip install $PIP_ARGS 'http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r=' - time pip install $PIP_ARGS patsy - time pip install $PIP_ARGS html5lib - - if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then - time sudo apt-get $APT_ARGS remove python3-lxml - elif [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then - time sudo apt-get $APT_ARGS remove python-lxml - fi - - pip install $PIP_ARGS lxml # fool statsmodels into thinking pandas was already installed # so it won't refuse to install itself. diff --git a/ci/requirements-2.6.txt b/ci/requirements-2.6.txt new file mode 100644 index 0000000000000..3873f56fa6070 --- /dev/null +++ b/ci/requirements-2.6.txt @@ -0,0 +1,4 @@ +numpy==1.6.1 +cython==0.19.1 +python-dateutil==2.1 +pytz==2013b diff --git a/ci/requirements-2.7.txt b/ci/requirements-2.7.txt new file mode 100644 index 0000000000000..797066a0f1699 --- /dev/null +++ b/ci/requirements-2.7.txt @@ -0,0 +1,15 @@ +python-dateutil==2.1 +pytz==2013b +xlwt==0.7.5 +numpy==1.7.1 +cython==0.19.1 +bottleneck==0.6.0 +numexpr==2.1 +tables==2.3.1 +matplotlib==1.2.1 +openpyxl==1.6.2 +xlrd==0.9.2 +patsy==0.1.0 +html5lib==1.0b2 +lxml==3.2.1 +http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r= diff --git a/ci/requirements-2.7_LOCALE.txt b/ci/requirements-2.7_LOCALE.txt new file mode 100644 index 0000000000000..9c9306bdf1872 --- /dev/null +++ b/ci/requirements-2.7_LOCALE.txt @@ -0,0 +1,14 @@ +python-dateutil +pytz==2013b +xlwt==0.7.5 +openpyxl==1.6.2 +xlrd==0.9.2 +numpy==1.6.1 +cython==0.19.1 +bottleneck==0.6.0 +numexpr==2.1 +tables==2.3.1 +matplotlib==1.2.1 +patsy==0.1.0 +html5lib==1.0b2 +lxml==3.2.1 diff --git a/ci/requirements-3.2.txt b/ci/requirements-3.2.txt new file mode 100644 index 0000000000000..c72ccb06f5167 --- /dev/null +++ b/ci/requirements-3.2.txt @@ -0,0 +1,12 @@ +python-dateutil==2.1 +pytz==2013b +openpyxl==1.6.2 +xlrd==0.9.2 +html5lib==1.0b2 +numpy==1.6.2 +cython==0.19.1 +numexpr==2.1 +tables==3.0.0 +matplotlib==1.2.1 +patsy==0.1.0 +lxml==3.2.1 diff --git a/ci/requirements-3.3.txt b/ci/requirements-3.3.txt new file mode 100644 index 0000000000000..c00c51f4ab7d2 --- /dev/null +++ b/ci/requirements-3.3.txt @@ -0,0 +1,12 @@ +python-dateutil==2.1 +pytz==2013b +openpyxl==1.6.2 +xlrd==0.9.2 +html5lib==1.0b2 +numpy==1.7.1 +cython==0.19.1 +numexpr==2.1 +tables==3.0.0 +matplotlib==1.2.1 +patsy==0.1.0 +lxml==3.2.1 diff --git a/ci/speedpack/Vagrantfile b/ci/speedpack/Vagrantfile index d4d718548e7c8..ec939b7c0a937 100644 --- a/ci/speedpack/Vagrantfile +++ b/ci/speedpack/Vagrantfile @@ -7,6 +7,7 @@ Vagrant.configure("2") do |config| # config.vbguest.auto_update = true # config.vbguest.no_remote = true + config.vm.synced_folder File.expand_path("..", Dir.pwd), "/reqf" config.vm.synced_folder "wheelhouse", "/wheelhouse" config.vm.provider :virtualbox do |vb| diff --git a/ci/speedpack/build.sh b/ci/speedpack/build.sh index 93a7e83b97161..39994fb3f30d6 100755 --- a/ci/speedpack/build.sh +++ b/ci/speedpack/build.sh @@ -6,93 +6,71 @@ # # Runtime can be up to an hour or more. -echo "Running build.sh..." -set -x - -WHEEL_DIR=/wheelhouse -VERSIONS="2.6 2.7 3.2 3.3" -SCRIPT_FILE="/tmp/run.sh" -PARALLEL=false +echo "Building wheels..." -export PIP_ARGS=" --download-cache /tmp -w $WHEEL_DIR --use-wheel --find-links=$WHEEL_DIR" +# print a trace for everything; RTFM +set -x +# install and update some basics apt-get update apt-get install python-software-properties git -y apt-add-repository ppa:fkrull/deadsnakes -y apt-get update +# install some deps and virtualenv apt-get install python-pip libfreetype6-dev libpng12-dev -y pip install virtualenv apt-get install libhdf5-serial-dev g++ -y +apt-get build-dep python-lxml -y +export PYTHONIOENCODING='utf-8' -function generate_wheels { - VER=$1 - set -x +function generate_wheels() { + # get the requirements file + local reqfile="$1" - if [ x"$VIRTUAL_ENV" != x"" ]; then - deactivate - fi + # get the python version + local TAG=$(echo $reqfile | grep -Po "(\d\.?[\d\-](_\w+)?)") - cd ~/ - sudo rm -Rf venv-$VER - virtualenv -p python$VER venv-$VER - source venv-$VER/bin/activate + # base dir for wheel dirs + local WHEELSTREET=/wheelhouse + local WHEELHOUSE="$WHEELSTREET/$TAG" - pip install -I --download-cache /tmp git+https://github.com/pypa/pip@42102e9d#egg=pip - pip install -I --download-cache /tmp https://bitbucket.org/pypa/setuptools/downloads/setuptools-0.8b6.tar.gz - pip install -I --download-cache /tmp wheel + local PY_VER="${TAG:0:3}" + local PY_MAJOR="${PY_VER:0:1}" + local PIP_ARGS="--use-wheel --find-links=$WHEELHOUSE --download-cache /tmp" - export INCLUDE_PATH=/usr/include/python$VER/ - export C_INCLUDE_PATH=/usr/include/python$VER/ - pip wheel $PIP_ARGS cython==0.19.1 - pip install --use-wheel --find-links=$WHEEL_DIR cython==0.19.1 + # install the python version if not installed + apt-get install python$PY_VER python$PY_VER-dev -y - pip wheel $PIP_ARGS numpy==1.6.1 - pip wheel $PIP_ARGS numpy==1.7.1 - pip install --use-wheel --find-links=$WHEEL_DIR numpy==1.7.1 - pip wheel $PIP_ARGS bottleneck==0.6.0 + # create a new virtualenv + rm -Rf /tmp/venv + virtualenv -p python$PY_VER /tmp/venv + source /tmp/venv/bin/activate - pip wheel $PIP_ARGS numexpr==1.4.2 - pip install --use-wheel --find-links=$WHEEL_DIR numexpr==1.4.2 - pip wheel $PIP_ARGS tables==2.3.1 - pip wheel $PIP_ARGS tables==2.4.0 + # install pip setuptools + pip install -I --download-cache /tmp 'git+https://github.com/pypa/pip@42102e9d#egg=pip' + DISTRIBUTE_VERSION= + if [ "${PY_MAJOR}" == "2" ]; then + DISTRIBUTE_VERSION="==0.6.35" + fi + pip install -I --download-cache /tmp distribute${DISTRIBUTE_VERSION} + pip install -I --download-cache /tmp wheel - pip uninstall numexpr -y - pip wheel $PIP_ARGS numexpr==2.1 - pip install --use-wheel --find-links=$WHEEL_DIR numexpr==2.1 - pip wheel $PIP_ARGS tables==3.0.0 - pip uninstall numexpr -y + # make the dir if it doesn't exist + mkdir -p $WHEELHOUSE - pip wheel $PIP_ARGS matplotlib==1.2.1 + # put the requirements file in the wheelhouse + cp $reqfile $WHEELHOUSE + + # install and build the wheels + cat $reqfile | while read N; do + pip wheel $PIP_ARGS --wheel-dir=$WHEELHOUSE $N + pip install $PIP_ARGS --no-index $N + done } -for VER in $VERSIONS ; do - apt-get install python$VER python$VER-dev -y +for reqfile in $(ls -1 /reqf/requirements-*.*); do + generate_wheels "$reqfile" done - -if $PARALLEL; then - echo '#!/bin/bash' > $SCRIPT_FILE - echo "export WHEEL_DIR=$WHEEL_DIR" >> $SCRIPT_FILE - echo "export PIP_ARGS='$PIP_ARGS'">> $SCRIPT_FILE - - declare -f generate_wheels >> $SCRIPT_FILE - echo 'generate_wheels $1' >> $SCRIPT_FILE - chmod u+x $SCRIPT_FILE - - pip install -I --download-cache /tmp git+https://github.com/pypa/pip@42102e9d#egg=pip - pip install --download-cache /tmp --no-install wheel - pip install --download-cache /tmp --no-install https://bitbucket.org/pypa/setuptools/downloads/setuptools-0.8b6.tar.gz - - for VER in 2.6 2.7 3.2 3.3 ; do - $SCRIPT_FILE $VER & - done - - wait - -else - for VER in 2.6 2.7 3.2 3.3 ; do - generate_wheels $VER - done -fi diff --git a/ci/speedpack/wheelhouse/placeholder b/ci/speedpack/wheelhouse/placeholder deleted file mode 100644 index e69de29bb2d1d..0000000000000
closes #4142. should be able to finish up tomorrow, just need to put things on the server and test
https://api.github.com/repos/pandas-dev/pandas/pulls/4153
2013-07-07T04:38:21Z
2013-07-08T04:15:07Z
2013-07-08T04:15:07Z
2014-07-16T08:17:58Z
ENH: Melt with MultiIndex columns
diff --git a/doc/source/release.rst b/doc/source/release.rst index f4d61e70e94b3..b827af2173412 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -77,6 +77,7 @@ pandas 0.12 to specify custom column names of the returned DataFrame (:issue:`3649`), thanks @hoechenberger. If ``var_name`` is not specified and ``dataframe.columns.name`` is not None, then this will be used as the ``var_name`` (:issue:`4144`). + Also support for MultiIndex columns. - clipboard functions use pyperclip (no dependencies on Windows, alternative dependencies offered for Linux) (:issue:`3837`). - Plotting functions now raise a ``TypeError`` before trying to plot anything diff --git a/pandas/core/index.py b/pandas/core/index.py index a3aa0804bcfe2..7b20d791c6593 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -1653,7 +1653,9 @@ def get_level_values(self, level): num = self._get_level_number(level) unique_vals = self.levels[num] # .values labels = self.labels[num] - return unique_vals.take(labels) + values = unique_vals.take(labels) + values.name = self.names[num] + return values def format(self, space=2, sparsify=None, adjoin=True, names=False, na_rep='NaN', formatter=None): diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index e9d5fe124fc74..1b3aa0f962e10 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -5,19 +5,20 @@ import numpy as np +import six + from pandas.core.series import Series from pandas.core.frame import DataFrame from pandas.core.categorical import Categorical from pandas.core.common import (notnull, _ensure_platform_int, _maybe_promote, - _maybe_upcast, isnull) + isnull) from pandas.core.groupby import (get_group_index, _compress_group_index, decons_group_index) import pandas.core.common as com import pandas.algos as algos -from pandas import lib -from pandas.core.index import MultiIndex, Index +from pandas.core.index import MultiIndex class ReshapeError(Exception): @@ -35,21 +36,26 @@ class _Unstacker(object): Examples -------- + >>> import pandas as pd + >>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'), + ... ('two', 'a'), ('two', 'b')]) + >>> s = pd.Series(np.arange(1.0, 5.0), index=index) >>> s - one a 1. - one b 2. - two a 3. - two b 4. + one a 1 + b 2 + two a 3 + b 4 + dtype: float64 >>> s.unstack(level=-1) a b - one 1. 2. - two 3. 4. + one 1 2 + two 3 4 >>> s.unstack(level=0) one two - a 1. 2. - b 3. 4. + a 1 2 + b 3 4 Returns ------- @@ -159,7 +165,7 @@ def get_result(self): values[j] = orig_values[i] else: index = index.take(self.unique_groups) - + return DataFrame(values, index=index, columns=columns) def get_new_values(self): @@ -601,7 +607,7 @@ def _stack_multi_columns(frame, level=-1, dropna=True): def melt(frame, id_vars=None, value_vars=None, - var_name=None, value_name='value'): + var_name=None, value_name='value', col_level=None): """ "Unpivots" a DataFrame from wide format to long format, optionally leaving id variables set @@ -613,27 +619,47 @@ def melt(frame, id_vars=None, value_vars=None, value_vars : tuple, list, or ndarray var_name : scalar, if None uses frame.column.name or 'variable' value_name : scalar, default 'value' + col_level : scalar, if columns are a MultiIndex then use this level to melt Examples -------- + >>> import pandas as pd + >>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'}, + ... 'B': {0: 1, 1: 3, 2: 5}, + ... 'C': {0: 2, 1: 4, 2: 6}}) + >>> df - A B C - a 1 2 - b 3 4 - c 5 6 + A B C + 0 a 1 2 + 1 b 3 4 + 2 c 5 6 >>> melt(df, id_vars=['A'], value_vars=['B']) - A variable value - a B 1 - b B 3 - c B 5 - + A variable value + 0 a B 1 + 1 b B 3 + 2 c B 5 + >>> melt(df, id_vars=['A'], value_vars=['B'], ... var_name='myVarname', value_name='myValname') - A myVarname myValname - a B 1 - b B 3 - c B 5 + A myVarname myValname + 0 a B 1 + 1 b B 3 + 2 c B 5 + + >>> df.columns = [list('ABC'), list('DEF')] + + >>> melt(df, col_level=0, id_vars=['A'], value_vars=['B']) + A variable value + 0 a B 1 + 1 b B 3 + 2 c B 5 + + >>> melt(df, id_vars=[('A', 'D')], value_vars=[('B', 'E')]) + (A, D) variable_0 variable_1 value + 0 a B E 1 + 1 b B E 3 + 2 c B E 5 """ # TODO: what about the existing index? @@ -652,8 +678,21 @@ def melt(frame, id_vars=None, value_vars=None, else: frame = frame.copy() + if col_level is not None: # allow list or other? + frame.columns = frame.columns.get_level_values(col_level) # frame is a copy + if var_name is None: - var_name = frame.columns.name if frame.columns.name is not None else 'variable' + if isinstance(frame.columns, MultiIndex): + if len(frame.columns.names) == len(set(frame.columns.names)): + var_name = frame.columns.names + else: + var_name = ['variable_%s' % i for i in + xrange(len(frame.columns.names))] + else: + var_name = [frame.columns.name if frame.columns.name is not None + else 'variable'] + if isinstance(var_name, six.string_types): + var_name = [var_name] N, K = frame.shape K -= len(id_vars) @@ -662,11 +701,13 @@ def melt(frame, id_vars=None, value_vars=None, for col in id_vars: mdata[col] = np.tile(frame.pop(col).values, K) - mcolumns = id_vars + [var_name, value_name] + mcolumns = id_vars + var_name + [value_name] mdata[value_name] = frame.values.ravel('F') - mdata[var_name] = np.asarray(frame.columns).repeat(N) - + for i, col in enumerate(var_name): + # asanyarray will keep the columns as an Index + mdata[col] = np.asanyarray(frame.columns.get_level_values(i)).repeat(N) + return DataFrame(mdata, columns=mcolumns) @@ -683,13 +724,16 @@ def lreshape(data, groups, dropna=True, label=None): Examples -------- + >>> import pandas as pd + >>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526], + ... 'team': ['Red Sox', 'Yankees'], + ... 'year1': [2007, 2008], 'year2': [2008, 2008]}) >>> data hr1 hr2 team year1 year2 0 514 545 Red Sox 2007 2008 1 573 526 Yankees 2007 2008 - >>> pd.lreshape(data, {'year': ['year1', 'year2'], - 'hr': ['hr1', 'hr2']}) + >>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']}) team hr year 0 Red Sox 514 2007 1 Yankees 573 2007 diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py index d9808ab48ca41..33533104919db 100644 --- a/pandas/tests/test_index.py +++ b/pandas/tests/test_index.py @@ -1029,6 +1029,8 @@ def test_get_level_values(self): expected = ['foo', 'foo', 'bar', 'baz', 'qux', 'qux'] self.assert_(np.array_equal(result, expected)) + self.assertEquals(result.name, 'first') + result = self.index.get_level_values('first') expected = self.index.get_level_values(0) self.assert_(np.array_equal(result, expected)) diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py index 09c63746c8d4b..b24e097238a70 100644 --- a/pandas/tests/test_reshape.py +++ b/pandas/tests/test_reshape.py @@ -10,6 +10,7 @@ import nose from pandas import DataFrame +import pandas as pd from numpy import nan import numpy as np @@ -30,6 +31,12 @@ def setUp(self): self.var_name = 'var' self.value_name = 'val' + self.df1 = pd.DataFrame([[ 1.067683, -1.110463, 0.20867 ], + [-1.321405, 0.368915, -1.055342], + [-0.807333, 0.08298 , -0.873361]]) + self.df1.columns = [list('ABC'), list('abc')] + self.df1.columns.names = ['CAP', 'low'] + def test_default_col_names(self): result = melt(self.df) self.assertEqual(result.columns.tolist(), ['variable', 'value']) @@ -128,6 +135,17 @@ def test_custom_var_and_value_name(self): result20 = melt(self.df) self.assertEqual(result20.columns.tolist(), ['foo', 'value']) + def test_col_level(self): + res1 = melt(self.df1, col_level=0) + res2 = melt(self.df1, col_level='CAP') + self.assertEqual(res1.columns.tolist(), ['CAP', 'value']) + self.assertEqual(res1.columns.tolist(), ['CAP', 'value']) + + def test_multiindex(self): + res = pd.melt(self.df1) + self.assertEqual(res.columns.tolist(), ['CAP', 'low', 'value']) + + class TestConvertDummies(unittest.TestCase): def test_convert_dummies(self): df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
No idea if there's actually a market for this, but anyhow: ``` In [1]: df = pd.DataFrame([[ 1.067683, -1.110463, 0.20867 ], [-1.321405, 0.368915, -1.055342], [-0.807333, 0.08298 , -0.873361]]) In [2]: df.columns = [list('ABC'), list('abc')] In [3]: df.columns.names = ['CAP', 'low'] In [4]: df Out[4]: CAP A B C low a b c 0 1.067683 -1.110463 0.208670 1 -1.321405 0.368915 -1.055342 2 -0.807333 0.082980 -0.873361 In [5]: pd.melt(df, col_level=0) Out[5]: CAP value 0 A 1.067683 1 A -1.321405 2 A -0.807333 3 B -1.110463 4 B 0.368915 5 B 0.082980 6 C 0.208670 7 C -1.055342 8 C -0.873361 In [6]: pd.melt(df,) Out[6]: CAP low value 0 A a 1.067683 1 A a -1.321405 2 A a -0.807333 3 B b -1.110463 4 B b 0.368915 5 B b 0.082980 6 C c 0.208670 7 C c -1.055342 8 C c -0.873361 In [7]: df.columns.names = [None, None] In [8]: pd.melt(df,) Out[8]: variable_0 variable_1 value 0 A a 1.067683 1 A a -1.321405 2 A a -0.807333 3 B b -1.110463 4 B b 0.368915 5 B b 0.082980 6 C c 0.208670 7 C c -1.055342 8 C c -0.873361 ``` Also includes fix for `get_level_values` and name attribute. cc #4144
https://api.github.com/repos/pandas-dev/pandas/pulls/4150
2013-07-06T17:08:19Z
2013-07-12T13:16:57Z
2013-07-12T13:16:57Z
2014-06-12T13:34:21Z
TST: additional test case for GH4146
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 7cd31b8f04b3a..a4174c236c26a 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -1094,8 +1094,12 @@ def test_mi_access(self): # GH 4146, not returning a block manager when selecting a unique index # from a duplicate index expected = DataFrame([['a',1,1]],index=['A1'],columns=['h1','h3','h5'],).T - df3 = df2['A'] - result = df3['A1'] + result = df2['A']['A1'] + assert_frame_equal(result,expected) + + # selecting a non_unique from the 2nd level + expected = DataFrame([['d',4,4],['e',5,5]],index=Index(['B2','B2'],name='sub'),columns=['h1','h3','h5'],).T + result = df2['A']['B2'] assert_frame_equal(result,expected) if __name__ == '__main__':
https://api.github.com/repos/pandas-dev/pandas/pulls/4149
2013-07-06T13:27:51Z
2013-07-06T13:43:13Z
2013-07-06T13:43:13Z
2014-07-16T08:17:52Z
BUG: (GH4145/4146) Fixed bugs in multi-index selection with column multi index duplicates
diff --git a/doc/source/release.rst b/doc/source/release.rst index 7a271688c318b..dce1a25cf434b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -318,6 +318,8 @@ pandas 0.12 iterated over when regex=False (:issue:`4115`) - Fixed bug in ``convert_objects(convert_numeric=True)`` where a mixed numeric and object Series/Frame was not converting properly (:issue:`4119`) + - Fixed bugs in multi-index selection with column multi-index and duplicates + (:issue:`4145`, :issue:`4146`) pandas 0.11.0 diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index 27c12fcd2e8eb..fea7f3153b8a6 100644 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -608,7 +608,7 @@ def _convert_to_indexer(self, obj, axis=0): mask = check == -1 if mask.any(): raise KeyError('%s not in index' % objarr[mask]) - + return indexer else: @@ -1100,9 +1100,14 @@ def _check_slice_bounds(slobj, values): def _maybe_droplevels(index, key): # drop levels + original_index = index if isinstance(key, tuple): for _ in key: - index = index.droplevel(0) + try: + index = index.droplevel(0) + except: + # we have dropped too much, so back out + return original_index else: index = index.droplevel(0) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 99af2d7becb39..f23a89635aaf2 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1660,18 +1660,23 @@ def get(self, item): # duplicate index but only a single result if com.is_integer(indexer): + b, loc = ref_locs[indexer] - return b.iget(loc) + values = [ b.iget(loc) ] + index = Index([ self.items[indexer] ]) + + # we have a multiple result, potentially across blocks else: - # we have a multiple result, potentially across blocks values = [ block.iget(i) for block, i in ref_locs[indexer] ] index = self.items[indexer] - axes = [ index ] + self.axes[1:] - blocks = form_blocks(values, index, axes) - mgr = BlockManager(blocks, axes) - mgr._consolidate_inplace() - return mgr + + # create and return a new block manager + axes = [ index ] + self.axes[1:] + blocks = form_blocks(values, index, axes) + mgr = BlockManager(blocks, axes) + mgr._consolidate_inplace() + return mgr def iget(self, i): item = self.items[i] diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py index 8b6bf1ed7f651..7cd31b8f04b3a 100644 --- a/pandas/tests/test_indexing.py +++ b/pandas/tests/test_indexing.py @@ -2,6 +2,7 @@ import unittest import nose import itertools +from StringIO import StringIO from numpy import random, nan from numpy.random import randn @@ -45,7 +46,7 @@ def _get_value(f, i, values=False): # check agains values if values: return f.values[i] - + # this is equiv of f[col][row]..... #v = f #for a in reversed(i): @@ -70,7 +71,7 @@ def _get_result(obj, method, key, axis): xp = getattr(obj, method).__getitem__(_axify(obj,key,axis)) except: xp = getattr(obj, method).__getitem__(key) - + return xp def _axify(obj, key, axis): @@ -127,11 +128,11 @@ def setUp(self): setattr(self,o,d) def check_values(self, f, func, values = False): - + if f is None: return axes = f.axes indicies = itertools.product(*axes) - + for i in indicies: result = getattr(f,func)[i] @@ -194,7 +195,7 @@ def _print(result, error = None): if fails is True: if result == 'fail': result = 'ok (fail)' - + if not result.startswith('ok'): raise AssertionError(_print(result)) @@ -212,7 +213,7 @@ def _print(result, error = None): result = 'ok (%s)' % type(detail).__name__ _print(result) return - + result = type(detail).__name__ raise AssertionError(_print(result, error = detail)) @@ -244,14 +245,14 @@ def _print(result, error = None): obj = d[t] if obj is not None: obj = obj.copy() - + k2 = key2 _eq(t, o, a, obj, key1, k2) def test_at_and_iat_get(self): def _check(f, func, values = False): - + if f is not None: indicies = _generate_indices(f, values) for i in indicies: @@ -260,7 +261,7 @@ def _check(f, func, values = False): assert_almost_equal(result, expected) for o in self._objs: - + d = getattr(self,o) # iat @@ -274,11 +275,11 @@ def _check(f, func, values = False): _check(d['labels'],'at') _check(d['ts'], 'at') _check(d['floats'],'at') - + def test_at_and_iat_set(self): def _check(f, func, values = False): - + if f is not None: indicies = _generate_indices(f, values) for i in indicies: @@ -287,7 +288,7 @@ def _check(f, func, values = False): assert_almost_equal(expected, 1) for t in self._objs: - + d = getattr(self,t) _check(d['ints'],'iat',values=True) @@ -302,12 +303,12 @@ def _check(f, func, values = False): _check(d['floats'],'at') def test_at_timestamp(self): - + # as timestamp is not a tuple! dates = date_range('1/1/2000', periods=8) df = DataFrame(randn(8, 4), index=dates, columns=['A', 'B', 'C', 'D']) s = df['A'] - + result = s.at[dates[5]] xp = s.values[5] self.assert_(result == xp) @@ -320,7 +321,7 @@ def test_iloc_getitem_int(self): # integer self.check_result('integer', 'iloc', 2, 'ix', { 0 : 4, 1: 6, 2: 8 }, typs = ['ints']) self.check_result('integer', 'iloc', 2, 'indexer', 2, typs = ['labels','mixed','ts','floats','empty'], fails = IndexError) - + def test_iloc_getitem_neg_int(self): # neg integer @@ -332,7 +333,7 @@ def test_iloc_getitem_list_int(self): # list of ints self.check_result('list int', 'iloc', [0,1,2], 'ix', { 0 : [0,2,4], 1 : [0,3,6], 2: [0,4,8] }, typs = ['ints']) self.check_result('list int', 'iloc', [0,1,2], 'indexer', [0,1,2], typs = ['labels','mixed','ts','floats','empty'], fails = IndexError) - + def test_iloc_getitem_dups(self): # no dups in panel (bug?) @@ -378,7 +379,7 @@ def test_iloc_setitem(self): assert_frame_equal(result, expected) def test_iloc_multiindex(self): - df = DataFrame(np.random.randn(3, 3), + df = DataFrame(np.random.randn(3, 3), columns=[[2,2,4],[6,8,10]], index=[[4,4,8],[8,10,12]]) @@ -415,7 +416,7 @@ def test_loc_getitem_label_out_of_range(self): # out of range label self.check_result('label range', 'loc', 'f', 'ix', 'f', typs = ['ints','labels','mixed','ts','floats'], fails=KeyError) - + def test_loc_getitem_label_list(self): # list of labels @@ -426,7 +427,7 @@ def test_loc_getitem_label_list(self): self.check_result('list lbl', 'loc', ['A','B','C'], 'ix', ['A','B','C'], typs = ['labels'], axes=1) self.check_result('list lbl', 'loc', ['Z','Y','W'], 'ix', ['Z','Y','W'], typs = ['labels'], axes=2) self.check_result('list lbl', 'loc', [2,8,'null'], 'ix', [2,8,'null'], typs = ['mixed'], axes=0) - self.check_result('list lbl', 'loc', [Timestamp('20130102'),Timestamp('20130103')], 'ix', + self.check_result('list lbl', 'loc', [Timestamp('20130102'),Timestamp('20130103')], 'ix', [Timestamp('20130102'),Timestamp('20130103')], typs = ['ts'], axes=0) # fails @@ -434,7 +435,7 @@ def test_loc_getitem_label_list(self): self.check_result('list lbl', 'loc', [0,2,3], 'ix', [0,2,3], typs = ['ints'], axes=0, fails = KeyError) self.check_result('list lbl', 'loc', [3,6,7], 'ix', [3,6,9], typs = ['ints'], axes=1, fails = KeyError) self.check_result('list lbl', 'loc', [4,8,10], 'ix', [4,8,12], typs = ['ints'], axes=2, fails = KeyError) - + # array like self.check_result('array like', 'loc', Series(index=[0,2,4]).index, 'ix', [0,2,4], typs = ['ints'], axes=0) self.check_result('array like', 'loc', Series(index=[3,6,9]).index, 'ix', [3,6,9], typs = ['ints'], axes=1) @@ -449,10 +450,10 @@ def test_loc_getitem_bool(self): def test_loc_getitem_int_slice(self): - # int slices in int + # int slices in int self.check_result('int slice1', 'loc', slice(2,4), 'ix', { 0 : [2,4], 1: [3,6], 2: [4,8] }, typs = ['ints'], fails=KeyError) - # ok + # ok self.check_result('int slice2', 'loc', slice(2,4), 'ix', [2,4], typs = ['ints'], axes = 0) self.check_result('int slice2', 'loc', slice(3,6), 'ix', [3,6], typs = ['ints'], axes = 1) self.check_result('int slice2', 'loc', slice(4,8), 'ix', [4,8], typs = ['ints'], axes = 2) @@ -589,7 +590,7 @@ def test_iloc_getitem_frame(self): result = df.iloc[s.index] expected = df.ix[[2,4,6,8]] assert_frame_equal(result, expected) - + # out-of-bounds slice self.assertRaises(IndexError, df.iloc.__getitem__, tuple([slice(None),slice(1,5,None)])) self.assertRaises(IndexError, df.iloc.__getitem__, tuple([slice(None),slice(-5,3,None)])) @@ -648,7 +649,7 @@ def test_iloc_multiindex(self): ['A', 'A', 'B']], index=[['i', 'i', 'j', 'k'], ['X', 'X', 'Y','Y']]) - mi_int = DataFrame(np.random.randn(3, 3), + mi_int = DataFrame(np.random.randn(3, 3), columns=[[2,2,4],[6,8,10]], index=[[4,4,8],[8,10,12]]) @@ -679,7 +680,7 @@ def test_loc_multiindex(self): ['A', 'A', 'B']], index=[['i', 'i', 'j'], ['X', 'X', 'Y']]) - mi_int = DataFrame(np.random.randn(3, 3), + mi_int = DataFrame(np.random.randn(3, 3), columns=[[2,2,4],[6,8,10]], index=[[4,4,8],[8,10,12]]) @@ -749,7 +750,7 @@ def test_xs_multiindex(self): assert_frame_equal(result, expected) def test_setitem_dtype_upcast(self): - + # GH3216 df = DataFrame([{"a": 1}, {"a": 3, "b": 2}]) df['c'] = np.nan @@ -761,7 +762,7 @@ def test_setitem_dtype_upcast(self): def test_setitem_iloc(self): - + # setitem with an iloc list df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"], columns=["A", "B", "C"]) df.iloc[[0,1],[1,2]] @@ -830,20 +831,20 @@ def test_indexing_mixed_frame_bug(self): self.assert_(df.iloc[0,2] == '-----') #if I look at df, then element [0,2] equals '_'. If instead I type df.ix[idx,'test'], I get '-----', finally by typing df.iloc[0,2] I get '_'. - + def test_set_index_nan(self): # GH 3586 - df = DataFrame({'PRuid': {17: 'nonQC', 18: 'nonQC', 19: 'nonQC', 20: '10', 21: '11', 22: '12', 23: '13', - 24: '24', 25: '35', 26: '46', 27: '47', 28: '48', 29: '59', 30: '10'}, - 'QC': {17: 0.0, 18: 0.0, 19: 0.0, 20: nan, 21: nan, 22: nan, 23: nan, 24: 1.0, 25: nan, - 26: nan, 27: nan, 28: nan, 29: nan, 30: nan}, - 'data': {17: 7.9544899999999998, 18: 8.0142609999999994, 19: 7.8591520000000008, 20: 0.86140349999999999, + df = DataFrame({'PRuid': {17: 'nonQC', 18: 'nonQC', 19: 'nonQC', 20: '10', 21: '11', 22: '12', 23: '13', + 24: '24', 25: '35', 26: '46', 27: '47', 28: '48', 29: '59', 30: '10'}, + 'QC': {17: 0.0, 18: 0.0, 19: 0.0, 20: nan, 21: nan, 22: nan, 23: nan, 24: 1.0, 25: nan, + 26: nan, 27: nan, 28: nan, 29: nan, 30: nan}, + 'data': {17: 7.9544899999999998, 18: 8.0142609999999994, 19: 7.8591520000000008, 20: 0.86140349999999999, 21: 0.87853110000000001, 22: 0.8427041999999999, 23: 0.78587700000000005, 24: 0.73062459999999996, - 25: 0.81668560000000001, 26: 0.81927080000000008, 27: 0.80705009999999999, 28: 0.81440240000000008, - 29: 0.80140849999999997, 30: 0.81307740000000006}, - 'year': {17: 2006, 18: 2007, 19: 2008, 20: 1985, 21: 1985, 22: 1985, 23: 1985, + 25: 0.81668560000000001, 26: 0.81927080000000008, 27: 0.80705009999999999, 28: 0.81440240000000008, + 29: 0.80140849999999997, 30: 0.81307740000000006}, + 'year': {17: 2006, 18: 2007, 19: 2008, 20: 1985, 21: 1985, 22: 1985, 23: 1985, 24: 1985, 25: 1985, 26: 1985, 27: 1985, 28: 1985, 29: 1985, 30: 1986}}).reset_index() result = df.set_index(['year','PRuid','QC']).reset_index().reindex(columns=df.columns) @@ -871,7 +872,7 @@ def test_iloc_panel_issue(self): self.assert_(p.iloc[1, 1, :3].shape == (3,)) self.assert_(p.iloc[1, :3, 1].shape == (3,)) self.assert_(p.iloc[:3, 1, 1].shape == (3,)) - + def test_multi_assign(self): # GH 3626, an assignement of a sub-df to a df @@ -892,7 +893,7 @@ def test_multi_assign(self): 'PF':[0,0,0,0,1,1], 'col1':Series([0,1,4,6,8,10]), 'col2':[12,7,16,np.nan,20,22]}) - + # frame on rhs df2.ix[mask, cols]= dft.ix[mask, cols] @@ -1006,7 +1007,7 @@ def test_non_unique_loc(self): ## https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs # these are going to raise becuase the we are non monotonic - df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]}, index = [0,1,0,1,2,3]) + df = DataFrame({'A' : [1,2,3,4,5,6], 'B' : [3,4,5,6,7,8]}, index = [0,1,0,1,2,3]) self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1,None)])) self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(0,None)])) self.assertRaises(KeyError, df.loc.__getitem__, tuple([slice(1,2)])) @@ -1066,6 +1067,36 @@ def test_iloc_non_unique_indexing(self): result = df2.loc[idx] assert_frame_equal(result, expected) + def test_mi_access(self): + + # GH 4145 + data = """h1 main h3 sub h5 +0 a A 1 A1 1 +1 b B 2 B1 2 +2 c B 3 A1 3 +3 d A 4 B2 4 +4 e A 5 B2 5 +5 f B 6 A2 6 +""" + + df = pd.read_csv(StringIO(data),sep='\s+',index_col=0) + df2 = df.set_index(['main', 'sub']).T.sort_index(1) + index = Index(['h1','h3','h5']) + columns = MultiIndex.from_tuples([('A','A1')],names=['main','sub']) + expected = DataFrame([['a',1,1]],index=columns,columns=index).T + + result = df2.loc[:,('A','A1')] + assert_frame_equal(result,expected) + + result = df2[('A','A1')] + assert_frame_equal(result,expected) + + # GH 4146, not returning a block manager when selecting a unique index + # from a duplicate index + expected = DataFrame([['a',1,1]],index=['A1'],columns=['h1','h3','h5'],).T + df3 = df2['A'] + result = df3['A1'] + assert_frame_equal(result,expected) if __name__ == '__main__': import nose
closes #4145, #4146 ``` In [1]: data = """h1 main h3 sub h5 ...: 0 a A 1 A1 1 ...: 1 b B 2 B1 2 ...: 2 c B 3 A1 3 ...: 3 d A 4 B2 4 ...: 4 e A 5 B2 5 ...: 5 f B 6 A2 6""" In [2]: df = pd.read_csv(StringIO(data),sep='\s+',index_col=0) In [3]: df2 = df.set_index(['main', 'sub']).T.sort_index(1) In [4]: df2.loc[:,('A','A1')] Out[4]: main A sub A1 h1 a h3 1 h5 1 In [5]: df2[('A','A1')] Out[5]: main A sub A1 h1 a h3 1 h5 1 In [6]: df2['A']['A1'] Out[6]: A1 h1 a h3 1 h5 1 ``` This should NOT work, as this is tryingo select 2 different columns ('A' is ok, but 'A1' fails which is correct) ``` In [7]: df2[['A','A1']] KeyError: "['A1'] not in index" ``` This is ok though ``` In [8]: df2[[('A','A1')]] Out[8]: main A sub A1 h1 a h3 1 h5 1 ```
https://api.github.com/repos/pandas-dev/pandas/pulls/4148
2013-07-06T13:03:12Z
2013-07-06T13:21:40Z
2013-07-06T13:21:40Z
2014-06-15T09:47:51Z
ENH melt uses column name if available
diff --git a/doc/source/release.rst b/doc/source/release.rst index 7a271688c318b..a2f2d5308ff4d 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -75,13 +75,14 @@ pandas 0.12 - Simplified the API and added a describe method to Categorical - ``melt`` now accepts the optional parameters ``var_name`` and ``value_name`` to specify custom column names of the returned DataFrame (:issue:`3649`), - thanks @hoechenberger + thanks @hoechenberger. If ``var_name`` is not specified and ``dataframe.columns.name`` + is not None, then this will be used as the ``var_name`` (:issue:`4144`). - clipboard functions use pyperclip (no dependencies on Windows, alternative dependencies offered for Linux) (:issue:`3837`). - Plotting functions now raise a ``TypeError`` before trying to plot anything if the associated objects have have a dtype of ``object`` (:issue:`1818`, - :issue:`3572`, :issue:`3911`, :issue:`3912`), but they will try to convert object arrays to - numeric arrays if possible so that you can still plot, for example, an + :issue:`3572`, :issue:`3911`, :issue:`3912`), but they will try to convert object + arrays to numeric arrays if possible so that you can still plot, for example, an object array with floats. This happens before any drawing takes place which elimnates any spurious plots from showing up. - Added Faq section on repr display options, to help users customize their setup. diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py index 2cbeb1cf58a8f..e9d5fe124fc74 100644 --- a/pandas/core/reshape.py +++ b/pandas/core/reshape.py @@ -601,7 +601,7 @@ def _stack_multi_columns(frame, level=-1, dropna=True): def melt(frame, id_vars=None, value_vars=None, - var_name='variable', value_name='value'): + var_name=None, value_name='value'): """ "Unpivots" a DataFrame from wide format to long format, optionally leaving id variables set @@ -611,8 +611,8 @@ def melt(frame, id_vars=None, value_vars=None, frame : DataFrame id_vars : tuple, list, or ndarray value_vars : tuple, list, or ndarray - var_name : scalar - value_name : scalar + var_name : scalar, if None uses frame.column.name or 'variable' + value_name : scalar, default 'value' Examples -------- @@ -634,6 +634,7 @@ def melt(frame, id_vars=None, value_vars=None, a B 1 b B 3 c B 5 + """ # TODO: what about the existing index? if id_vars is not None: @@ -651,6 +652,9 @@ def melt(frame, id_vars=None, value_vars=None, else: frame = frame.copy() + if var_name is None: + var_name = frame.columns.name if frame.columns.name is not None else 'variable' + N, K = frame.shape K -= len(id_vars) diff --git a/pandas/tests/test_reshape.py b/pandas/tests/test_reshape.py index 6e5f6bffd7544..09c63746c8d4b 100644 --- a/pandas/tests/test_reshape.py +++ b/pandas/tests/test_reshape.py @@ -20,105 +20,141 @@ _multiprocess_can_split_ = True -def test_melt(): - df = tm.makeTimeDataFrame()[:10] - df['id1'] = (df['A'] > 0).astype(np.int64) - df['id2'] = (df['B'] > 0).astype(np.int64) - - var_name = 'var' - value_name = 'val' - - # Default column names - result = melt(df) - result1 = melt(df, id_vars=['id1']) - result2 = melt(df, id_vars=['id1', 'id2']) - result3 = melt(df, id_vars=['id1', 'id2'], - value_vars='A') - result4 = melt(df, id_vars=['id1', 'id2'], - value_vars=['A', 'B']) - - expected4 = DataFrame({'id1': df['id1'].tolist() * 2, - 'id2': df['id2'].tolist() * 2, - 'variable': ['A']*10 + ['B']*10, - 'value': df['A'].tolist() + df['B'].tolist()}, - columns=['id1', 'id2', 'variable', 'value']) - tm.assert_frame_equal(result4, expected4) - - # Supply custom name for the 'variable' column - result5 = melt(df, var_name=var_name) - result6 = melt(df, id_vars=['id1'], var_name=var_name) - result7 = melt(df, id_vars=['id1', 'id2'], var_name=var_name) - result8 = melt(df, id_vars=['id1', 'id2'], - value_vars='A', var_name=var_name) - result9 = melt(df, id_vars=['id1', 'id2'], - value_vars=['A', 'B'], var_name=var_name) - - expected9 = DataFrame({'id1': df['id1'].tolist() * 2, - 'id2': df['id2'].tolist() * 2, - var_name: ['A']*10 + ['B']*10, - 'value': df['A'].tolist() + df['B'].tolist()}, - columns=['id1', 'id2', var_name, 'value']) - tm.assert_frame_equal(result9, expected9) - - # Supply custom name for the 'value' column - result10 = melt(df, value_name=value_name) - result11 = melt(df, id_vars=['id1'], value_name=value_name) - result12 = melt(df, id_vars=['id1', 'id2'], value_name=value_name) - result13 = melt(df, id_vars=['id1', 'id2'], - value_vars='A', value_name=value_name) - result14 = melt(df, id_vars=['id1', 'id2'], - value_vars=['A', 'B'], value_name=value_name) - - expected14 = DataFrame({'id1': df['id1'].tolist() * 2, - 'id2': df['id2'].tolist() * 2, - 'variable': ['A']*10 + ['B']*10, - value_name: df['A'].tolist() + df['B'].tolist()}, - columns=['id1', 'id2', 'variable', value_name]) - tm.assert_frame_equal(result14, expected14) - - # Supply custom names for the 'variable' and 'value' columns - result15 = melt(df, var_name=var_name, value_name=value_name) - result16 = melt(df, id_vars=['id1'], var_name=var_name, value_name=value_name) - result17 = melt(df, id_vars=['id1', 'id2'], - var_name=var_name, value_name=value_name) - result18 = melt(df, id_vars=['id1', 'id2'], - value_vars='A', var_name=var_name, value_name=value_name) - result19 = melt(df, id_vars=['id1', 'id2'], - value_vars=['A', 'B'], var_name=var_name, value_name=value_name) - - expected19 = DataFrame({'id1': df['id1'].tolist() * 2, - 'id2': df['id2'].tolist() * 2, - var_name: ['A']*10 + ['B']*10, - value_name: df['A'].tolist() + df['B'].tolist()}, - columns=['id1', 'id2', var_name, value_name]) - tm.assert_frame_equal(result19, expected19) - -def test_convert_dummies(): - df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', - 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'three', - 'two', 'two', 'one', 'three'], - 'C': np.random.randn(8), - 'D': np.random.randn(8)}) - - result = convert_dummies(df, ['A', 'B']) - result2 = convert_dummies(df, ['A', 'B'], prefix_sep='.') - - expected = DataFrame({'A_foo': [1, 0, 1, 0, 1, 0, 1, 1], - 'A_bar': [0, 1, 0, 1, 0, 1, 0, 0], - 'B_one': [1, 1, 0, 0, 0, 0, 1, 0], - 'B_two': [0, 0, 1, 0, 1, 1, 0, 0], - 'B_three': [0, 0, 0, 1, 0, 0, 0, 1], - 'C': df['C'].values, - 'D': df['D'].values}, - columns=result.columns, dtype=float) - expected2 = expected.rename(columns=lambda x: x.replace('_', '.')) - - tm.assert_frame_equal(result, expected) - tm.assert_frame_equal(result2, expected2) - - -class Test_lreshape(unittest.TestCase): +class TestMelt(unittest.TestCase): + + def setUp(self): + self.df = tm.makeTimeDataFrame()[:10] + self.df['id1'] = (self.df['A'] > 0).astype(np.int64) + self.df['id2'] = (self.df['B'] > 0).astype(np.int64) + + self.var_name = 'var' + self.value_name = 'val' + + def test_default_col_names(self): + result = melt(self.df) + self.assertEqual(result.columns.tolist(), ['variable', 'value']) + + result1 = melt(self.df, id_vars=['id1']) + self.assertEqual(result1.columns.tolist(), ['id1', 'variable', 'value']) + + result2 = melt(self.df, id_vars=['id1', 'id2']) + self.assertEqual(result2.columns.tolist(), ['id1', 'id2', 'variable', 'value']) + + def test_value_vars(self): + result3 = melt(self.df, id_vars=['id1', 'id2'], value_vars='A') + self.assertEqual(len(result3), 10) + + result4 = melt(self.df, id_vars=['id1', 'id2'], value_vars=['A', 'B']) + expected4 = DataFrame({'id1': self.df['id1'].tolist() * 2, + 'id2': self.df['id2'].tolist() * 2, + 'variable': ['A']*10 + ['B']*10, + 'value': self.df['A'].tolist() + self.df['B'].tolist()}, + columns=['id1', 'id2', 'variable', 'value']) + tm.assert_frame_equal(result4, expected4) + + def test_custom_var_name(self): + result5 = melt(self.df, var_name=self.var_name) + self.assertEqual(result5.columns.tolist(), ['var', 'value']) + + result6 = melt(self.df, id_vars=['id1'], var_name=self.var_name) + self.assertEqual(result6.columns.tolist(), ['id1', 'var', 'value']) + + result7 = melt(self.df, id_vars=['id1', 'id2'], var_name=self.var_name) + self.assertEqual(result7.columns.tolist(), ['id1', 'id2', 'var', 'value']) + + result8 = melt(self.df, id_vars=['id1', 'id2'], + value_vars='A', var_name=self.var_name) + self.assertEqual(result8.columns.tolist(), ['id1', 'id2', 'var', 'value']) + + result9 = melt(self.df, id_vars=['id1', 'id2'], + value_vars=['A', 'B'], var_name=self.var_name) + expected9 = DataFrame({'id1': self.df['id1'].tolist() * 2, + 'id2': self.df['id2'].tolist() * 2, + self.var_name: ['A']*10 + ['B']*10, + 'value': self.df['A'].tolist() + self.df['B'].tolist()}, + columns=['id1', 'id2', self.var_name, 'value']) + tm.assert_frame_equal(result9, expected9) + + def test_custom_value_name(self): + result10 = melt(self.df, value_name=self.value_name) + self.assertEqual(result10.columns.tolist(), ['variable', 'val']) + + result11 = melt(self.df, id_vars=['id1'], value_name=self.value_name) + self.assertEqual(result11.columns.tolist(), ['id1', 'variable', 'val']) + + result12 = melt(self.df, id_vars=['id1', 'id2'], value_name=self.value_name) + self.assertEqual(result12.columns.tolist(), ['id1', 'id2', 'variable', 'val']) + + result13 = melt(self.df, id_vars=['id1', 'id2'], + value_vars='A', value_name=self.value_name) + self.assertEqual(result13.columns.tolist(), ['id1', 'id2', 'variable', 'val']) + + result14 = melt(self.df, id_vars=['id1', 'id2'], + value_vars=['A', 'B'], value_name=self.value_name) + expected14 = DataFrame({'id1': self.df['id1'].tolist() * 2, + 'id2': self.df['id2'].tolist() * 2, + 'variable': ['A']*10 + ['B']*10, + self.value_name: self.df['A'].tolist() + self.df['B'].tolist()}, + columns=['id1', 'id2', 'variable', self.value_name]) + tm.assert_frame_equal(result14, expected14) + + def test_custom_var_and_value_name(self): + + result15 = melt(self.df, var_name=self.var_name, value_name=self.value_name) + self.assertEqual(result15.columns.tolist(), ['var', 'val']) + + result16 = melt(self.df, id_vars=['id1'], var_name=self.var_name, value_name=self.value_name) + self.assertEqual(result16.columns.tolist(), ['id1', 'var', 'val']) + + result17 = melt(self.df, id_vars=['id1', 'id2'], + var_name=self.var_name, value_name=self.value_name) + self.assertEqual(result17.columns.tolist(), ['id1', 'id2', 'var', 'val']) + + result18 = melt(df, id_vars=['id1', 'id2'], + value_vars='A', var_name=self.var_name, value_name=self.value_name) + self.assertEqual(result18.columns.tolist(), ['id1', 'id2', 'var', 'val']) + + result19 = melt(self.df, id_vars=['id1', 'id2'], + value_vars=['A', 'B'], var_name=self.var_name, value_name=self.value_name) + expected19 = DataFrame({'id1': self.df['id1'].tolist() * 2, + 'id2': self.df['id2'].tolist() * 2, + var_name: ['A']*10 + ['B']*10, + value_name: self.df['A'].tolist() + self.df['B'].tolist()}, + columns=['id1', 'id2', self.var_name, self.value_name]) + tm.assert_frame_equal(result19, expected19) + + def test_custom_var_and_value_name(self): + self.df.columns.name = 'foo' + result20 = melt(self.df) + self.assertEqual(result20.columns.tolist(), ['foo', 'value']) + +class TestConvertDummies(unittest.TestCase): + def test_convert_dummies(self): + df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', + 'foo', 'bar', 'foo', 'foo'], + 'B': ['one', 'one', 'two', 'three', + 'two', 'two', 'one', 'three'], + 'C': np.random.randn(8), + 'D': np.random.randn(8)}) + + result = convert_dummies(df, ['A', 'B']) + result2 = convert_dummies(df, ['A', 'B'], prefix_sep='.') + + expected = DataFrame({'A_foo': [1, 0, 1, 0, 1, 0, 1, 1], + 'A_bar': [0, 1, 0, 1, 0, 1, 0, 0], + 'B_one': [1, 1, 0, 0, 0, 0, 1, 0], + 'B_two': [0, 0, 1, 0, 1, 1, 0, 0], + 'B_three': [0, 0, 0, 1, 0, 0, 0, 1], + 'C': df['C'].values, + 'D': df['D'].values}, + columns=result.columns, dtype=float) + expected2 = expected.rename(columns=lambda x: x.replace('_', '.')) + + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result2, expected2) + + +class TestLreshape(unittest.TestCase): def test_pairs(self): data = {'birthdt': ['08jan2009', '20dec2008', '30dec2008',
Currently melt allows you to manually specify the val_name and var_name, this changes the default behaviour to grab the column name if one is available. ``` In [1]: df = pd.DataFrame({'a': {0: 1.0676829999999999, 1: -1.3214049999999999, 2: -0.80733299999999997}, 'b': {0: -1.110463, 1: 0.36891499999999999, 2: 0.082979999999999998}, 'c': {0: 0.20866999999999999, 1: -1.055342, 2: -0.87336100000000005}}) In [2]: df.columns.name = 'foo' In [3]: df Out[3]: foo a b c 0 1.067683 -1.110463 0.208670 1 -1.321405 0.368915 -1.055342 2 -0.807333 0.082980 -0.873361 In [4]: pd.melt(df) # before this would have columns ['variable', 'value'] Out[4]: foo value 0 a 1.067683 1 a -1.321405 2 a -0.807333 3 b -1.110463 4 b 0.368915 5 b 0.082980 6 c 0.208670 7 c -1.055342 8 c -0.873361 Also, cleaned up all the tests (and added some more) in test_reshape. ```
https://api.github.com/repos/pandas-dev/pandas/pulls/4144
2013-07-06T00:08:11Z
2013-07-06T13:42:08Z
2013-07-06T13:42:08Z
2014-06-13T05:42:28Z
BLD: install pip serially
diff --git a/ci/install.sh b/ci/install.sh index 54de404854af8..4795ac8f4f59d 100755 --- a/ci/install.sh +++ b/ci/install.sh @@ -53,8 +53,9 @@ if [ x"$FULL_DEPS" == x"true" ] ; then fi # Hard Deps -time pip install $PIP_ARGS nose python-dateutil pytz>=2013a -time pip install $PIP_ARGS cython==0.19.1 +for dep in nose 'python-dateutil' 'pytz>=2013a' 'cython==0.19.1'; do + time pip install $PIP_ARGS $dep +done if [ ${TRAVIS_PYTHON_VERSION} == "3.3" ]; then # should be >=3,3 time pip install $PIP_ARGS numpy==1.7.1
closes #4132.
https://api.github.com/repos/pandas-dev/pandas/pulls/4141
2013-07-05T20:50:54Z
2013-07-05T22:38:54Z
2013-07-05T22:38:54Z
2014-07-16T08:17:43Z
BUG: Make ExcelFile.parse pass kwds to TextParser
diff --git a/pandas/io/excel.py b/pandas/io/excel.py index f61db447f2cfc..03b547fcd47b7 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -126,7 +126,8 @@ def parse(self, sheetname, header=0, skiprows=None, skip_footer=0, na_values=na_values, thousands=thousands, chunksize=chunksize, - skip_footer=skip_footer) + skip_footer=skip_footer, + **kwds) def _should_parse(self, i, parse_cols): @@ -163,7 +164,8 @@ def _excel2num(x): def _parse_excel(self, sheetname, header=0, skiprows=None, skip_footer=0, index_col=None, has_index_names=None, parse_cols=None, parse_dates=False, date_parser=None, - na_values=None, thousands=None, chunksize=None): + na_values=None, thousands=None, chunksize=None, + **kwds): from xlrd import (xldate_as_tuple, XL_CELL_DATE, XL_CELL_ERROR, XL_CELL_BOOLEAN) @@ -206,7 +208,8 @@ def _parse_excel(self, sheetname, header=0, skiprows=None, date_parser=date_parser, skiprows=skiprows, skip_footer=skip_footer, - chunksize=chunksize) + chunksize=chunksize, + **kwds) return parser.read() diff --git a/pandas/io/tests/data/test2.xlsx b/pandas/io/tests/data/test2.xlsx new file mode 100644 index 0000000000000..441db5e55e666 Binary files /dev/null and b/pandas/io/tests/data/test2.xlsx differ diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py index 489546557b938..baf6966530772 100644 --- a/pandas/io/tests/test_excel.py +++ b/pandas/io/tests/test_excel.py @@ -182,6 +182,22 @@ def test_excel_cell_error_na(self): expected = DataFrame([[np.nan]], columns=['Test']) tm.assert_frame_equal(parsed, expected) + def test_excel_passes_na(self): + _skip_if_no_xlrd() + + excel_data = ExcelFile(os.path.join(self.dirpath, 'test2.xlsx')) + parsed = excel_data.parse('Sheet1', keep_default_na=False, + na_values=['apple']) + expected = DataFrame([['NA'], [1], ['NA'], [np.nan], ['rabbit']], + columns=['Test']) + tm.assert_frame_equal(parsed, expected) + + parsed = excel_data.parse('Sheet1', keep_default_na=True, + na_values=['apple']) + expected = DataFrame([[np.nan], [1], [np.nan], [np.nan], ['rabbit']], + columns=['Test']) + tm.assert_frame_equal(parsed, expected) + def test_excel_table(self): _skip_if_no_xlrd()
Fixes #4131
https://api.github.com/repos/pandas-dev/pandas/pulls/4139
2013-07-05T17:06:56Z
2013-07-06T13:01:17Z
2013-07-06T13:01:17Z
2014-07-16T08:17:42Z
BUG: period idx str map incorrectly returns a str repr of itself when ind.map(str) is called
diff --git a/doc/source/release.rst b/doc/source/release.rst index 7c09c2a6f16ac..62d3201eb3a58 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -48,6 +48,8 @@ pandas 0.13 with a different block ordering (:issue:`4096`) - The ``by`` argument now works correctly with the ``layout`` argument (:issue:`4102`, :issue:`4014`) in ``*.hist`` plotting methods + - Fixed bug in ``PeriodIndex.map`` where using ``str`` would return the str + representation of the index (:issue:`4136`) pandas 0.12 =========== diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt index 7f63c545c5664..a8b61f338d044 100644 --- a/doc/source/v0.13.0.txt +++ b/doc/source/v0.13.0.txt @@ -24,6 +24,9 @@ Bug Fixes - The ``by`` argument now works correctly with the ``layout`` argument (:issue:`4102`, :issue:`4014`) in ``*.hist`` plotting methods + - Fixed bug in ``PeriodIndex.map`` where using ``str`` would return the str + representation of the index (:issue:`4136`) + See the :ref:`full release notes <release>` or issue tracker on GitHub for a complete list. diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 2db32b14e2eb3..4fec590dddd14 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -469,7 +469,6 @@ def dt64arr_to_periodarr(data, freq, tz): # --- Period index sketch - def _period_index_cmp(opname): """ Wrap comparison operations to convert datetime-like to datetime64 @@ -493,6 +492,7 @@ def wrapper(self, other): return result return wrapper + class PeriodIndex(Int64Index): """ Immutable ndarray holding ordinal values indicating regular periods in @@ -791,10 +791,12 @@ def to_datetime(self, dayfirst=False): # Especially important for group-by functionality def map(self, f): try: - return f(self) - except: - values = self._get_object_array() - return _algos.arrmap_object(values, f) + result = f(self) + if not isinstance(result, np.ndarray): + raise TypeError + return result + except Exception: + return _algos.arrmap_object(self.asobject, f) def _get_object_array(self): freq = self.freq @@ -1169,6 +1171,7 @@ def __setstate__(self, state): else: # pragma: no cover np.ndarray.__setstate__(self, state) + def _get_ordinal_range(start, end, periods, freq): if com._count_not_none(start, end, periods) < 2: raise ValueError('Must specify 2 of start, end, periods') diff --git a/pandas/tseries/tests/test_period.py b/pandas/tseries/tests/test_period.py index 01c984ec2b07d..9fd5e6bf5f3e9 100644 --- a/pandas/tseries/tests/test_period.py +++ b/pandas/tseries/tests/test_period.py @@ -27,6 +27,7 @@ from pandas import Series, TimeSeries, DataFrame from pandas.util.testing import assert_series_equal, assert_almost_equal import pandas.util.testing as tm +from pandas.util import py3compat from numpy.testing import assert_array_equal @@ -1990,7 +1991,30 @@ def test_map(self): result = index.map(lambda x: x.ordinal) exp = [x.ordinal for x in index] - self.assert_(np.array_equal(result, exp)) + assert_array_equal(result, exp) + + def test_map_with_string_constructor(self): + raw = [2005, 2007, 2009] + index = PeriodIndex(raw, freq='A') + types = str, + if not py3compat.PY3: + types += unicode, + + for t in types: + expected = np.array(map(t, raw), dtype=object) + res = index.map(t) + + # should return an array + self.assert_(isinstance(res, np.ndarray)) + + # preserve element types + self.assert_(all(isinstance(resi, t) for resi in res)) + + # dtype should be object + self.assertEqual(res.dtype, np.dtype('object').type) + + # lastly, values should compare equal + assert_array_equal(res, expected) def test_convert_array_of_periods(self): rng = period_range('1/1/2000', periods=20, freq='D')
https://api.github.com/repos/pandas-dev/pandas/pulls/4136
2013-07-05T16:07:44Z
2013-07-25T14:11:10Z
2013-07-25T14:11:10Z
2014-06-21T20:41:46Z
BLD: use wheel packages to reduce travis-ci build times.
diff --git a/.gitignore b/.gitignore index f12847a80edaf..9a0794373cfdc 100644 --- a/.gitignore +++ b/.gitignore @@ -35,3 +35,5 @@ pandas/io/*.json .idea/pandas.iml .build_cache_dir +.vagrant +*.whl \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 30f09deefd93a..ff292767011cf 100644 --- a/.travis.yml +++ b/.travis.yml @@ -10,13 +10,13 @@ env: matrix: include: - python: 2.7 - env: NOSE_ARGS="slow and not network" LOCALE_OVERRIDE="zh_CN.GB18030" FULL_DEPS=true UPLOAD=false + env: NOSE_ARGS="slow and not network" LOCALE_OVERRIDE="zh_CN.GB18030" FULL_DEPS=true - python: 2.7 - env: NOSE_ARGS="not slow" FULL_DEPS=true UPLOAD=true + env: NOSE_ARGS="not slow" FULL_DEPS=true - python: 3.2 - env: NOSE_ARGS="not slow" FULL_DEPS=true UPLOAD=true + env: NOSE_ARGS="not slow" FULL_DEPS=true - python: 3.3 - env: NOSE_ARGS="not slow" UPLOAD=true + env: NOSE_ARGS="not slow" # allow importing from site-packages, # so apt-get python-x works for system pythons diff --git a/ci/install.sh b/ci/install.sh index 60ea5643c6ad2..54de404854af8 100755 --- a/ci/install.sh +++ b/ci/install.sh @@ -14,11 +14,18 @@ # echo "inside $0" + # Install Dependencie -SITE_PKG_DIR=$VIRTUAL_ENV/lib/python$TRAVIS_PYTHON_VERSION/site-packages -echo "Using SITE_PKG_DIR: $SITE_PKG_DIR" +# as of pip 1.4rc2, wheel files are still being broken regularly, this is a known good +# commit. should revert to pypi when a final release is out +pip install -I git+https://github.com/pypa/pip@42102e9deaea99db08b681d06906c2945f6f95e2#egg=pip +pip Install -I https://bitbucket.org/pypa/setuptools/downloads/setuptools-0.8b6.tar.gz +pip install wheel + +# comment this line to disable the fetching of wheel files +PIP_ARGS+=" -I --use-wheel --find-links=https://cache27-pypandas.rhcloud.com/" -# workaround for travis ignoring system_site_packages in travis.yml +# Force virtualenv to accpet system_site_packages rm -f $VIRTUAL_ENV/lib/python$TRAVIS_PYTHON_VERSION/no-global-site-packages.txt if [ x"$LOCALE_OVERRIDE" != x"" ]; then @@ -30,75 +37,73 @@ fi; #scipy is not included in the cached venv if [ x"$FULL_DEPS" == x"true" ] ; then # for pytables gets the lib as well - sudo apt-get $APT_ARGS install libhdf5-serial-dev + time sudo apt-get $APT_ARGS install libhdf5-serial-dev if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then - sudo apt-get $APT_ARGS install python3-bs4 + time sudo apt-get $APT_ARGS install python3-bs4 elif [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then - sudo apt-get $APT_ARGS install python-bs4 + time sudo apt-get $APT_ARGS install python-bs4 fi if [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then - sudo apt-get $APT_ARGS install python3-scipy + time sudo apt-get $APT_ARGS install python3-scipy elif [ ${TRAVIS_PYTHON_VERSION} == "2.7" ]; then - sudo apt-get $APT_ARGS install python-scipy + time sudo apt-get $APT_ARGS install python-scipy fi fi # Hard Deps -pip install $PIP_ARGS nose python-dateutil pytz -pip install $PIP_ARGS cython +time pip install $PIP_ARGS nose python-dateutil pytz>=2013a +time pip install $PIP_ARGS cython==0.19.1 if [ ${TRAVIS_PYTHON_VERSION} == "3.3" ]; then # should be >=3,3 - pip install $PIP_ARGS numpy==1.7.0 + time pip install $PIP_ARGS numpy==1.7.1 elif [ ${TRAVIS_PYTHON_VERSION} == "3.2" ]; then # sudo apt-get $APT_ARGS install python3-numpy; # 1.6.2 or precise - pip install $PIP_ARGS numpy==1.6.1 + time pip install $PIP_ARGS numpy==1.6.1 else - pip install $PIP_ARGS numpy==1.6.1 + time pip install $PIP_ARGS numpy==1.6.1 fi # Optional Deps if [ x"$FULL_DEPS" == x"true" ]; then echo "Installing FULL_DEPS" - pip install $PIP_ARGS cython if [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then - pip install $PIP_ARGS xlwt - pip install $PIP_ARGS bottleneck - pip install $PIP_ARGS numexpr==2.0.1 - pip install $PIP_ARGS tables==2.3.1 + time pip install $PIP_ARGS xlwt + time pip install $PIP_ARGS bottleneck==0.6.0 + time pip install $PIP_ARGS numexpr==2.1 + time pip install $PIP_ARGS tables==2.3.1 else - pip install $PIP_ARGS numexpr - pip install $PIP_ARGS tables + time pip install $PIP_ARGS numexpr==2.1 + time pip install $PIP_ARGS tables==3.0.0 fi - pip install $PIP_ARGS matplotlib - pip install $PIP_ARGS openpyxl - pip install $PIP_ARGS xlrd>=0.9.0 - pip install $PIP_ARGS 'http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r=' - pip install $PIP_ARGS patsy - pip install $PIP_ARGS html5lib + time pip install $PIP_ARGS matplotlib==1.2.1 + time pip install $PIP_ARGS openpyxl + time pip install $PIP_ARGS xlrd>=0.9.0 + time pip install $PIP_ARGS 'http://downloads.sourceforge.net/project/pytseries/scikits.timeseries/0.91.3/scikits.timeseries-0.91.3.tar.gz?r=' + time pip install $PIP_ARGS patsy + time pip install $PIP_ARGS html5lib if [ ${TRAVIS_PYTHON_VERSION:0:1} == "3" ]; then - sudo apt-get $APT_ARGS remove python3-lxml + time sudo apt-get $APT_ARGS remove python3-lxml elif [ ${TRAVIS_PYTHON_VERSION:0:1} == "2" ]; then - sudo apt-get $APT_ARGS remove python-lxml + time sudo apt-get $APT_ARGS remove python-lxml fi - pip install $PIP_ARGS lxml # fool statsmodels into thinking pandas was already installed # so it won't refuse to install itself. mkdir $SITE_PKG_DIR/pandas touch $SITE_PKG_DIR/pandas/__init__.py echo "version='0.10.0-phony'" > $SITE_PKG_DIR/pandas/version.py - pip install $PIP_ARGS git+git://github.com/statsmodels/statsmodels@c9062e43b8a5f7385537ca95#egg=statsmodels + time pip install $PIP_ARGS git+git://github.com/statsmodels/statsmodels@c9062e43b8a5f7385537ca95#egg=statsmodels rm -Rf $SITE_PKG_DIR/pandas # scrub phoney pandas fi # build pandas -python setup.py build_ext install +time python setup.py build_ext install true diff --git a/ci/speedpack/Vagrantfile b/ci/speedpack/Vagrantfile new file mode 100644 index 0000000000000..d4d718548e7c8 --- /dev/null +++ b/ci/speedpack/Vagrantfile @@ -0,0 +1,21 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : +Vagrant.configure("2") do |config| + config.vm.box = "precise64" + config.vm.box_url = "http://files.vagrantup.com/precise64.box" + +# config.vbguest.auto_update = true +# config.vbguest.no_remote = true + + config.vm.synced_folder "wheelhouse", "/wheelhouse" + + config.vm.provider :virtualbox do |vb| + vb.customize ["modifyvm", :id, "--cpus", "4"] + vb.customize ["modifyvm", :id, "--memory", "2048"] + vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] + vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"] + end + + config.vm.provision :shell, :path => "build.sh" + +end diff --git a/ci/speedpack/build.sh b/ci/speedpack/build.sh new file mode 100755 index 0000000000000..93a7e83b97161 --- /dev/null +++ b/ci/speedpack/build.sh @@ -0,0 +1,98 @@ +#!/bin/bash + +# This script is meant to run on a mint precise64 VM. +# The generated wheel files should be compatible +# with travis-ci as of 07/2013. +# +# Runtime can be up to an hour or more. + +echo "Running build.sh..." +set -x + +WHEEL_DIR=/wheelhouse +VERSIONS="2.6 2.7 3.2 3.3" +SCRIPT_FILE="/tmp/run.sh" +PARALLEL=false + +export PIP_ARGS=" --download-cache /tmp -w $WHEEL_DIR --use-wheel --find-links=$WHEEL_DIR" + +apt-get update +apt-get install python-software-properties git -y +apt-add-repository ppa:fkrull/deadsnakes -y +apt-get update + +apt-get install python-pip libfreetype6-dev libpng12-dev -y +pip install virtualenv +apt-get install libhdf5-serial-dev g++ -y + + +function generate_wheels { + VER=$1 + set -x + + if [ x"$VIRTUAL_ENV" != x"" ]; then + deactivate + fi + + cd ~/ + sudo rm -Rf venv-$VER + virtualenv -p python$VER venv-$VER + source venv-$VER/bin/activate + + pip install -I --download-cache /tmp git+https://github.com/pypa/pip@42102e9d#egg=pip + pip install -I --download-cache /tmp https://bitbucket.org/pypa/setuptools/downloads/setuptools-0.8b6.tar.gz + pip install -I --download-cache /tmp wheel + + export INCLUDE_PATH=/usr/include/python$VER/ + export C_INCLUDE_PATH=/usr/include/python$VER/ + pip wheel $PIP_ARGS cython==0.19.1 + pip install --use-wheel --find-links=$WHEEL_DIR cython==0.19.1 + + pip wheel $PIP_ARGS numpy==1.6.1 + pip wheel $PIP_ARGS numpy==1.7.1 + pip install --use-wheel --find-links=$WHEEL_DIR numpy==1.7.1 + pip wheel $PIP_ARGS bottleneck==0.6.0 + + pip wheel $PIP_ARGS numexpr==1.4.2 + pip install --use-wheel --find-links=$WHEEL_DIR numexpr==1.4.2 + pip wheel $PIP_ARGS tables==2.3.1 + pip wheel $PIP_ARGS tables==2.4.0 + + pip uninstall numexpr -y + pip wheel $PIP_ARGS numexpr==2.1 + pip install --use-wheel --find-links=$WHEEL_DIR numexpr==2.1 + pip wheel $PIP_ARGS tables==3.0.0 + pip uninstall numexpr -y + + pip wheel $PIP_ARGS matplotlib==1.2.1 +} + + +for VER in $VERSIONS ; do + apt-get install python$VER python$VER-dev -y +done + +if $PARALLEL; then + echo '#!/bin/bash' > $SCRIPT_FILE + echo "export WHEEL_DIR=$WHEEL_DIR" >> $SCRIPT_FILE + echo "export PIP_ARGS='$PIP_ARGS'">> $SCRIPT_FILE + + declare -f generate_wheels >> $SCRIPT_FILE + echo 'generate_wheels $1' >> $SCRIPT_FILE + chmod u+x $SCRIPT_FILE + + pip install -I --download-cache /tmp git+https://github.com/pypa/pip@42102e9d#egg=pip + pip install --download-cache /tmp --no-install wheel + pip install --download-cache /tmp --no-install https://bitbucket.org/pypa/setuptools/downloads/setuptools-0.8b6.tar.gz + + for VER in 2.6 2.7 3.2 3.3 ; do + $SCRIPT_FILE $VER & + done + + wait + +else + for VER in 2.6 2.7 3.2 3.3 ; do + generate_wheels $VER + done +fi diff --git a/ci/speedpack/nginx/nginx.conf.template b/ci/speedpack/nginx/nginx.conf.template new file mode 100644 index 0000000000000..e2cfeaf053d08 --- /dev/null +++ b/ci/speedpack/nginx/nginx.conf.template @@ -0,0 +1,48 @@ +#user nobody; +worker_processes 1; + +#error_log logs/error.log; +#error_log logs/error.log notice; +#error_log logs/error.log info; + +#pid logs/nginx.pid; + + +events { + worker_connections 1024; +} + + +http { + include mime.types; + default_type application/octet-stream; + + #log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + # '$status $body_bytes_sent "$http_referer" ' + # '"$http_user_agent" "$http_x_forwarded_for"'; + + #access_log logs/access.log on; + + sendfile on; + #tcp_nopush on; + + #keepalive_timeout 0; + keepalive_timeout 65; + + #gzip on; + + server { + listen $OPENSHIFT_IP:$OPENSHIFT_PORT; + + access_log access.log ; + sendfile on; + + location / { + root ../../app-root/data/store/; + autoindex on; + } + + + } + +} diff --git a/ci/speedpack/wheelhouse/placeholder b/ci/speedpack/wheelhouse/placeholder new file mode 100644 index 0000000000000..e69de29bb2d1d
Roughly from 40 minutes to 13 minutes. Before: https://travis-ci.org/pydata/pandas/builds/8738816 After: https://travis-ci.org/y-p/pandas/builds/8749214 The existing build cache system for 2to3 and cythonized caching can knock this down much more, but it's more fragile and part of the reason the previous attempt ultimately failed. Really need to do that in a jenkins server somewhere , if at all. There's no opt-in/whitelist anymore. faster for everyone. The building of the dependencies (a rare occurence) is automated in a provisioning script for vagrant under ci/speedpack. should just be 'vagrant up' and step away for an hour. The package files are hosted on a private openshift server but if a bus hits me it's all here for someone to plunk down a static file webserver somewhere and tweak the url. In case this goes bad and I'm not around to notice, there's one line to comment in ci/install.sh. cc @jreback @cpcloud enjoy.
https://api.github.com/repos/pandas-dev/pandas/pulls/4128
2013-07-04T23:24:22Z
2013-07-04T23:34:56Z
2013-07-04T23:34:56Z
2014-07-16T08:17:35Z
fix typo - change conn to con
diff --git a/pandas/io/sql.py b/pandas/io/sql.py index 68dff479a5015..1794bda20d2fc 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -177,7 +177,7 @@ def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs): ---------- frame: DataFrame name: name of SQL table - conn: an open SQL database connection object + con: an open SQL database connection object flavor: {'sqlite', 'mysql', 'oracle'}, default 'sqlite' if_exists: {'fail', 'replace', 'append'}, default 'fail' fail: If table exists, do nothing.
Noticed a typo in the documentation of `write_frame`. Method def has `con` while documentation had `conn`. `def write_frame(frame, name, con, flavor='sqlite', if_exists='fail', **kwargs):`
https://api.github.com/repos/pandas-dev/pandas/pulls/4127
2013-07-04T16:33:39Z
2013-07-05T13:01:31Z
2013-07-05T13:01:31Z
2014-07-16T08:17:34Z
CLN: Small fix to clean_pyc task
diff --git a/Makefile b/Makefile index 5349443ed477f..77342089365f0 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ clean: clean_pyc -find . -name '*.so' -exec rm {} \; clean_pyc: - -find . -name '*.pyc' -or -name '*.pyo' -exec rm {} \; + -find . -name '*.pyc' -exec rm {} \; -or -name '*.pyo' -exec rm {} \; tseries: pandas/lib.pyx pandas/tslib.pyx pandas/hashtable.pyx python setup.py build_ext --inplace
The previous `clean_pyc` wasn't actually removing `*.pyc` files - apparently each branch of find needs its own exec (I guess?). Anyways this works now.
https://api.github.com/repos/pandas-dev/pandas/pulls/4124
2013-07-04T14:08:37Z
2013-07-04T16:02:45Z
2013-07-04T16:02:45Z
2014-07-16T08:17:33Z
BUG/TST: test fixes for windows compat
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index cc25d7e066e30..401a7746953cb 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2206,7 +2206,10 @@ def _sanitize_column(self, key, value): 'length of index') if not isinstance(value, np.ndarray): - value = com._asarray_tuplesafe(value) + if isinstance(value, list) and len(value) > 0: + value = com._possibly_convert_platform(value) + else: + value = com._asarray_tuplesafe(value) elif isinstance(value, PeriodIndex): value = value.asobject elif value.ndim == 2: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 3e45b69fb740a..915509bac9059 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -4891,14 +4891,17 @@ def make_dtnat_arr(n,nnat=None): s[-i] = NaT s[i] = NaT return s + # N=35000 s1=make_dtnat_arr(chunksize+5) s2=make_dtnat_arr(chunksize+5,0) + # s3=make_dtnat_arr(chunksize+5,0) - df=DataFrame(dict(a=s1,b=s2)) - df.to_csv('/tmp/1.csv',chunksize=chunksize) - recons = DataFrame.from_csv('/tmp/1.csv').convert_objects('coerce') - assert_frame_equal(df, recons,check_names=False,check_less_precise=True) + with ensure_clean('1.csv') as path: + df=DataFrame(dict(a=s1,b=s2)) + df.to_csv(path,chunksize=chunksize) + recons = DataFrame.from_csv(path).convert_objects('coerce') + assert_frame_equal(df, recons,check_names=False,check_less_precise=True) for ncols in [4]: base = int((chunksize// ncols or 1) or 1) @@ -6864,7 +6867,7 @@ def test_replace_convert(self): df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']]) m = {'foo': 1, 'bar': 2, 'bah': 3} rep = df.replace(m) - expec = Series([np.int_, np.int_, np.int_]) + expec = Series([ np.int64] * 3) res = rep.dtypes assert_series_equal(expec, res) @@ -10079,7 +10082,6 @@ def test_insert_column_bug_4032(self): result = df.rename(columns={}) str(result) - expected = DataFrame([[1,1.1],[2, 2.2]],columns=['a','b']) assert_frame_equal(result,expected) df.insert(0, 'c', [1.3, 2.3])
closes #4122
https://api.github.com/repos/pandas-dev/pandas/pulls/4123
2013-07-04T13:25:26Z
2013-07-04T15:11:52Z
2013-07-04T15:11:52Z
2014-06-13T11:22:37Z
BUG: GH4119 Fixed bug in convert_objects(convert_numeric=True) where a mixed numeric and object not converting
diff --git a/doc/source/release.rst b/doc/source/release.rst index a7469ba2e707b..7a271688c318b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -316,6 +316,8 @@ pandas 0.12 - Better error messages on inserting incompatible columns to a frame (:issue:`4107`) - Fixed bug in ``DataFrame.replace`` where a nested dict wasn't being iterated over when regex=False (:issue:`4115`) + - Fixed bug in ``convert_objects(convert_numeric=True)`` where a mixed numeric and + object Series/Frame was not converting properly (:issue:`4119`) pandas 0.11.0 diff --git a/pandas/core/series.py b/pandas/core/series.py index 5ea029b414fef..06abd1d5b4127 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -928,7 +928,7 @@ def astype(self, dtype): return self._constructor(values, index=self.index, name=self.name, dtype=values.dtype) - def convert_objects(self, convert_dates=True, convert_numeric=True, copy=True): + def convert_objects(self, convert_dates=True, convert_numeric=False, copy=True): """ Attempt to infer better dtype diff --git a/pandas/src/inference.pyx b/pandas/src/inference.pyx index 270fb01a42033..f4474bfb5f853 100644 --- a/pandas/src/inference.pyx +++ b/pandas/src/inference.pyx @@ -379,11 +379,14 @@ def maybe_convert_numeric(ndarray[object] values, set na_values, elif util.is_float_object(val): floats[i] = complexes[i] = val seen_float = 1 + elif util.is_integer_object(val): + floats[i] = ints[i] = val + seen_int = 1 elif val is None: floats[i] = complexes[i] = nan seen_float = 1 - elif len(val) == 0: - if convert_empty: + elif hasattr(val,'__len__') and len(val) == 0: + if convert_empty or coerce_numeric: floats[i] = complexes[i] = nan seen_float = 1 else: diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index aae15fa6fd09f..3e45b69fb740a 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -5476,6 +5476,12 @@ def test_convert_objects(self): converted = self.mixed_frame.copy() self.assertRaises(Exception, converted['H'].astype, 'int32') + # mixed in a single column + df = DataFrame(dict(s = Series([1, 'na', 3 ,4]))) + result = df.convert_objects(convert_numeric=True) + expected = DataFrame(dict(s = Series([1, np.nan, 3 ,4]))) + assert_frame_equal(result, expected) + def test_convert_objects_no_conversion(self): mixed1 = DataFrame( {'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']}) diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py index 09f3cc7b61f33..b4ad172ddf340 100644 --- a/pandas/tests/test_series.py +++ b/pandas/tests/test_series.py @@ -3430,25 +3430,36 @@ def test_convert_objects(self): s = Series([1., 2, 3],index=['a','b','c']) result = s.convert_objects(convert_dates=False,convert_numeric=True) - assert_series_equal(s,result) + assert_series_equal(result, s) # force numeric conversion r = s.copy().astype('O') r['a'] = '1' result = r.convert_objects(convert_dates=False,convert_numeric=True) - assert_series_equal(s,result) + assert_series_equal(result, s) r = s.copy().astype('O') r['a'] = '1.' result = r.convert_objects(convert_dates=False,convert_numeric=True) - assert_series_equal(s,result) + assert_series_equal(result, s) r = s.copy().astype('O') r['a'] = 'garbled' expected = s.copy() expected['a'] = np.nan result = r.convert_objects(convert_dates=False,convert_numeric=True) - assert_series_equal(expected,result) + assert_series_equal(result, expected) + + # GH 4119, not converting a mixed type (e.g.floats and object) + s = Series([1, 'na', 3 ,4]) + result = s.convert_objects(convert_numeric=True) + expected = Series([1,np.nan,3,4]) + assert_series_equal(result, expected) + + s = Series([1, '', 3 ,4]) + result = s.convert_objects(convert_numeric=True) + expected = Series([1,np.nan,3,4]) + assert_series_equal(result, expected) # dates s = Series([datetime(2001,1,1,0,0), datetime(2001,1,2,0,0), datetime(2001,1,3,0,0) ]) @@ -3456,18 +3467,17 @@ def test_convert_objects(self): result = s.convert_objects(convert_dates=True,convert_numeric=False) expected = Series([Timestamp('20010101'),Timestamp('20010102'),Timestamp('20010103')],dtype='M8[ns]') - assert_series_equal(expected,result) + assert_series_equal(result, expected) result = s.convert_objects(convert_dates='coerce',convert_numeric=False) - assert_series_equal(expected,result) result = s.convert_objects(convert_dates='coerce',convert_numeric=True) - assert_series_equal(expected,result) + assert_series_equal(result, expected) expected = Series([Timestamp('20010101'),Timestamp('20010102'),Timestamp('20010103'),lib.NaT,lib.NaT,lib.NaT,Timestamp('20010104'),Timestamp('20010105')],dtype='M8[ns]') result = s2.convert_objects(convert_dates='coerce',convert_numeric=False) - assert_series_equal(expected,result) + assert_series_equal(result, expected) result = s2.convert_objects(convert_dates='coerce',convert_numeric=True) - assert_series_equal(expected,result) + assert_series_equal(result, expected) # preserver all-nans (if convert_dates='coerce') s = Series(['foo','bar',1,1.0],dtype='O')
closes #4119
https://api.github.com/repos/pandas-dev/pandas/pulls/4120
2013-07-03T21:37:43Z
2013-07-03T23:43:15Z
2013-07-03T23:43:15Z
2014-07-16T08:17:29Z
BUG: fix replace bug when a nested dict was passed
diff --git a/doc/source/release.rst b/doc/source/release.rst index d410f39da3ab9..a7469ba2e707b 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -314,6 +314,8 @@ pandas 0.12 - Fix bug where ``HDFStore`` will fail to append because of a different block ordering on-disk (:issue:`4096`) - Better error messages on inserting incompatible columns to a frame (:issue:`4107`) + - Fixed bug in ``DataFrame.replace`` where a nested dict wasn't being + iterated over when regex=False (:issue:`4115`) pandas 0.11.0 diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt index c68551123cac2..b44d54d0eb31e 100644 --- a/doc/source/v0.12.0.txt +++ b/doc/source/v0.12.0.txt @@ -457,6 +457,8 @@ Bug Fixes rewritten in an incompatible way (:issue:`4062`, :issue:`4063`) - Fixed bug where sharex and sharey were not being passed to grouped_hist (:issue:`4089`) + - Fixed bug in ``DataFrame.replace`` where a nested dict wasn't being + iterated over when regex=False (:issue:`4115`) See the :ref:`full release notes <release>` or issue tracker diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 57be20a50f7bc..99af2d7becb39 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -756,7 +756,7 @@ def replace(self, to_replace, value, inplace=False, filter=None, blk = super(ObjectBlock, self).replace(to_replace, value, inplace=inplace, filter=filter, regex=regex) - elif both_lists and regex: + elif both_lists: for to_rep, v in itertools.izip(to_replace, value): blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace, filter=filter, regex=regex) diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index a8a435e3bb660..71892575002f2 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -6737,6 +6737,11 @@ def test_regex_replace_dict_nested(self): assert_frame_equal(res3, expec) assert_frame_equal(res4, expec) + def test_regex_replace_dict_nested_gh4115(self): + df = pd.DataFrame({'Type':['Q','T','Q','Q','T'], 'tmp':2}) + expected = DataFrame({'Type': [0,1,0,0,1], 'tmp': 2}) + assert_frame_equal(df.replace({'Type': {'Q':0,'T':1}}), expected) + def test_regex_replace_list_to_scalar(self): mix = {'a': range(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']} df = DataFrame(mix)
closes #4115.
https://api.github.com/repos/pandas-dev/pandas/pulls/4117
2013-07-03T16:08:37Z
2013-07-03T18:59:43Z
2013-07-03T18:59:42Z
2014-07-16T08:17:27Z
CLN/TST: cleanup exception message testing by replacing with assertRaisesRegexp
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index a8a435e3bb660..eccdb53ff6764 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -25,10 +25,12 @@ from pandas import date_range import pandas as pd from pandas.io.parsers import read_csv +from pandas.parser import CParserError from pandas.util.testing import (assert_almost_equal, assert_series_equal, assert_frame_equal, + assertRaisesRegexp, makeCustomDataframe as mkdf, ensure_clean) from pandas.util import py3compat @@ -2208,47 +2210,34 @@ def test_constructor_dict(self): def test_constructor_error_msgs(self): # mix dict and array, wrong size - try: + def testit(): DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']}) - except (Exception), detail: - self.assert_(type(detail) == ValueError) - self.assert_("Mixing dicts with non-Series may lead to ambiguous ordering." in str(detail)) + assertRaisesRegexp(ValueError, "Mixing dicts with non-Series may lead to ambiguous ordering.", testit) # wrong size ndarray, GH 3105 - try: + def testit(): DataFrame(np.arange(12).reshape((4, 3)), columns=['foo', 'bar', 'baz'], index=date_range('2000-01-01', periods=3)) - except (Exception), detail: - self.assert_(type(detail) == ValueError) - self.assert_(str(detail).startswith("Shape of passed values is (3, 4), indices imply (3, 3)")) + assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 4\), indices imply \(3, 3\)", testit) # higher dim raise exception - try: + def testit(): DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1]) - except (Exception), detail: - self.assert_(type(detail) == ValueError) - self.assert_("Must pass 2-d input" in str(detail)) + assertRaisesRegexp(ValueError, "Must pass 2-d input", testit) # wrong size axis labels - try: + def testit(): DataFrame(np.random.rand(2,3), columns=['A', 'B', 'C'], index=[1]) - except (Exception), detail: - self.assert_(type(detail) == ValueError) - self.assert_(str(detail).startswith("Shape of passed values is (3, 2), indices imply (3, 1)")) + assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 2\), indices imply \(3, 1\)", testit) - try: + def testit(): DataFrame(np.random.rand(2,3), columns=['A', 'B'], index=[1, 2]) - except (Exception), detail: - self.assert_(type(detail) == ValueError) - self.assert_(str(detail).startswith("Shape of passed values is (3, 2), indices imply (2, 2)")) + assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 2\), indices imply \(2, 2\)", testit) - try: + def testit(): DataFrame({'a': False, 'b': True}) - except (Exception), detail: - msg = 'If using all scalar values, you must must pass an index' - self.assert_(type(detail) == ValueError) - self.assert_(msg in str(detail)) + assertRaisesRegexp(ValueError, 'If using all scalar values, you must must pass an index', testit) def test_insert_error_msmgs(self): @@ -2256,12 +2245,10 @@ def test_insert_error_msmgs(self): df = DataFrame(np.random.randint(0,2,(4,4)), columns=['a', 'b', 'c', 'd']) - try: + def testit(): df['gr'] = df.groupby(['b', 'c']).count() - except (Exception), detail: - msg = 'incompatible index of inserted column with frame index' - self.assert_(type(detail) == TypeError) - self.assert_(msg in str(detail)) + + assertRaisesRegexp(TypeError, 'incompatible index of inserted column with frame index', testit) def test_constructor_subclass_dict(self): # Test for passing dict subclass to constructor @@ -5133,17 +5120,13 @@ def _make_frame(names=None): df.to_csv(path,tupleize_cols=False) # catch invalid headers - try: + def testit(): read_csv(path,tupleize_cols=False,header=range(3),index_col=0) - except (Exception), detail: - if not str(detail).startswith('Passed header=[0,1,2] are too many rows for this multi_index of columns'): - raise AssertionError("failure in read_csv header=range(3)") + assertRaisesRegexp(CParserError, 'Passed header=\[0,1,2\] are too many rows for this multi_index of columns', testit) - try: + def testit(): read_csv(path,tupleize_cols=False,header=range(7),index_col=0) - except (Exception), detail: - if not str(detail).startswith('Passed header=[0,1,2,3,4,5,6], len of 7, but only 6 lines in file'): - raise AssertionError("failure in read_csv header=range(7)") + assertRaisesRegexp(CParserError, 'Passed header=\[0,1,2,3,4,5,6\], len of 7, but only 6 lines in file', testit) for i in [3,4,5,6,7]: self.assertRaises(Exception, read_csv, path, tupleize_cols=False, header=range(i), index_col=0) diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 4e57977a787f2..5d1053289b49e 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -20,6 +20,7 @@ assert_series_equal, assert_almost_equal, ensure_clean, + assertRaisesRegexp, makeCustomDataframe as mkdf ) import pandas.core.panel as panelm @@ -959,23 +960,17 @@ def test_from_dict_mixed_orient(self): def test_constructor_error_msgs(self): - try: + def testit(): Panel(np.random.randn(3,4,5), range(4), range(5), range(5)) - except (Exception), detail: - self.assert_(type(detail) == ValueError) - self.assert_(str(detail).startswith("Shape of passed values is (3, 4, 5), indices imply (4, 5, 5)")) + assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 4, 5\), indices imply \(4, 5, 5\)", testit) - try: + def testit(): Panel(np.random.randn(3,4,5), range(5), range(4), range(5)) - except (Exception), detail: - self.assert_(type(detail) == ValueError) - self.assert_(str(detail).startswith("Shape of passed values is (3, 4, 5), indices imply (5, 4, 5)")) + assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 4, 5\), indices imply \(5, 4, 5\)", testit) - try: + def testit(): Panel(np.random.randn(3,4,5), range(5), range(5), range(4)) - except (Exception), detail: - self.assert_(type(detail) == ValueError) - self.assert_(str(detail).startswith("Shape of passed values is (3, 4, 5), indices imply (5, 5, 4)")) + assertRaisesRegexp(ValueError, "Shape of passed values is \(3, 4, 5\), indices imply \(5, 5, 4\)", testit) def test_conform(self): df = self.panel['ItemA'][:-5].filter(items=['A', 'B']) @@ -1385,7 +1380,7 @@ def test_to_excel(self): reader = ExcelFile(path) except ImportError: raise nose.SkipTest - + for item, df in self.panel.iterkv(): recdf = reader.parse(str(item), index_col=0) assert_frame_equal(df, recdf)
.
https://api.github.com/repos/pandas-dev/pandas/pulls/4114
2013-07-03T15:24:37Z
2013-07-03T16:27:43Z
2013-07-03T16:27:43Z
2014-07-16T08:17:25Z
BUG: make sure fig is not doubled when passing by to series.hist
diff --git a/doc/source/release.rst b/doc/source/release.rst index 322c4541b5760..b13a6daf3da5c 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -435,6 +435,8 @@ Bug Fixes - Bug in getitem with a duplicate index when using where (:issue:`4879`) - Fix Type inference code coerces float column into datetime (:issue:`4601`) - Fixed ``_ensure_numeric`` does not check for complex numbers (:issue:`4902`) + - Fixed a bug in ``Series.hist`` where two figures were being created when + the ``by`` argument was passed (:issue:`4112`, :issue:`4113`). pandas 0.12.0 diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 558bf17b0cd5c..49dc31514da7a 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -62,9 +62,10 @@ def test_plot(self): _check_plot_works(self.series[:10].plot, kind='barh') _check_plot_works(Series(randn(10)).plot, kind='bar', color='black') + @slow + def test_plot_figsize_and_title(self): # figsize and title import matplotlib.pyplot as plt - plt.close('all') ax = self.series.plot(title='Test', figsize=(16, 8)) self.assertEqual(ax.title.get_text(), 'Test') @@ -79,7 +80,6 @@ def test_bar_colors(self): default_colors = plt.rcParams.get('axes.color_cycle') custom_colors = 'rgcby' - plt.close('all') df = DataFrame(randn(5, 5)) ax = df.plot(kind='bar') @@ -91,7 +91,7 @@ def test_bar_colors(self): rs = rect.get_facecolor() self.assertEqual(xp, rs) - plt.close('all') + tm.close() ax = df.plot(kind='bar', color=custom_colors) @@ -103,8 +103,7 @@ def test_bar_colors(self): rs = rect.get_facecolor() self.assertEqual(xp, rs) - plt.close('all') - + tm.close() from matplotlib import cm # Test str -> colormap functionality @@ -118,7 +117,7 @@ def test_bar_colors(self): rs = rect.get_facecolor() self.assertEqual(xp, rs) - plt.close('all') + tm.close() # Test colormap functionality ax = df.plot(kind='bar', colormap=cm.jet) @@ -131,8 +130,7 @@ def test_bar_colors(self): rs = rect.get_facecolor() self.assertEqual(xp, rs) - plt.close('all') - + tm.close() df.ix[:, [0]].plot(kind='bar', color='DodgerBlue') @slow @@ -192,7 +190,7 @@ def test_hist(self): _check_plot_works(self.ts.hist, ax=ax) _check_plot_works(self.ts.hist, ax=ax, figure=fig) _check_plot_works(self.ts.hist, figure=fig) - plt.close('all') + tm.close() fig, (ax1, ax2) = plt.subplots(1, 2) _check_plot_works(self.ts.hist, figure=fig, ax=ax1) @@ -204,9 +202,8 @@ def test_hist(self): @slow def test_hist_layout(self): n = 10 - df = DataFrame({'gender': np.array(['Male', - 'Female'])[random.randint(2, - size=n)], + gender = tm.choice(['Male', 'Female'], size=n) + df = DataFrame({'gender': gender, 'height': random.normal(66, 4, size=n), 'weight': random.normal(161, 32, size=n)}) with tm.assertRaises(ValueError): @@ -219,23 +216,22 @@ def test_hist_layout(self): def test_hist_layout_with_by(self): import matplotlib.pyplot as plt n = 10 - df = DataFrame({'gender': np.array(['Male', - 'Female'])[random.randint(2, - size=n)], + gender = tm.choice(['Male', 'Female'], size=n) + df = DataFrame({'gender': gender, 'height': random.normal(66, 4, size=n), 'weight': random.normal(161, 32, size=n), 'category': random.randint(4, size=n)}) _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1)) - plt.close('all') + tm.close() _check_plot_works(df.height.hist, by=df.gender, layout=(1, 2)) - plt.close('all') + tm.close() _check_plot_works(df.weight.hist, by=df.category, layout=(1, 4)) - plt.close('all') + tm.close() _check_plot_works(df.weight.hist, by=df.category, layout=(4, 1)) - plt.close('all') + tm.close() @slow def test_hist_no_overlap(self): @@ -256,6 +252,15 @@ def test_plot_fails_with_dupe_color_and_style(self): with tm.assertRaises(ValueError): x.plot(style='k--', color='k') + @slow + def test_hist_by_no_extra_plots(self): + import matplotlib.pyplot as plt + n = 10 + df = DataFrame({'gender': tm.choice(['Male', 'Female'], size=n), + 'height': random.normal(66, 4, size=n)}) + axes = df.height.hist(by=df.gender) + self.assertEqual(len(plt.get_fignums()), 1) + def test_plot_fails_when_ax_differs_from_figure(self): from pylab import figure, close fig1 = figure() @@ -436,7 +441,6 @@ def test_plot_xy(self): self._check_data(df.plot(y=1), df[1].plot()) # figsize and title - plt.close('all') ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8)) self.assertEqual(ax.title.get_text(), 'Test') @@ -456,26 +460,26 @@ def test_xcompat(self): lines = ax.get_lines() self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex)) - plt.close('all') + tm.close() pd.plot_params['xaxis.compat'] = True ax = df.plot() lines = ax.get_lines() self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex)) - plt.close('all') + tm.close() pd.plot_params['x_compat'] = False ax = df.plot() lines = ax.get_lines() tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex) - plt.close('all') + tm.close() # useful if you're plotting a bunch together with pd.plot_params.use('x_compat', True): ax = df.plot() lines = ax.get_lines() self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex)) - plt.close('all') + tm.close() ax = df.plot() lines = ax.get_lines() tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex) @@ -499,6 +503,7 @@ def check_line(xpl, rsl): assert_array_equal(xpdata, rsdata) [check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)] + tm.close() @slow def test_subplots(self): @@ -537,19 +542,14 @@ def test_plot_bar(self): columns=['one', 'two', 'three', 'four']) _check_plot_works(df.plot, kind='bar') - close('all') _check_plot_works(df.plot, kind='bar', legend=False) - close('all') _check_plot_works(df.plot, kind='bar', subplots=True) - close('all') _check_plot_works(df.plot, kind='bar', stacked=True) - close('all') df = DataFrame(randn(10, 15), index=list(string.ascii_letters[:10]), columns=lrange(15)) _check_plot_works(df.plot, kind='bar') - close('all') df = DataFrame({'a': [0, 1], 'b': [1, 0]}) _check_plot_works(df.plot, kind='bar') @@ -678,18 +678,18 @@ def test_hist(self): self.assertAlmostEqual(xtick.get_fontsize(), xf) self.assertAlmostEqual(xtick.get_rotation(), xrot) - plt.close('all') + tm.close() # make sure kwargs to hist are handled ax = ser.hist(normed=True, cumulative=True, bins=4) # height of last bin (index 5) must be 1.0 self.assertAlmostEqual(ax.get_children()[5].get_height(), 1.0) - plt.close('all') + tm.close() ax = ser.hist(log=True) # scale of y must be 'log' self.assertEqual(ax.get_yscale(), 'log') - plt.close('all') + tm.close() # propagate attr exception from matplotlib.Axes.hist with tm.assertRaises(AttributeError): @@ -698,7 +698,6 @@ def test_hist(self): @slow def test_hist_layout(self): import matplotlib.pyplot as plt - plt.close('all') df = DataFrame(randn(100, 4)) layout_to_expected_size = ( @@ -847,7 +846,7 @@ def test_line_colors(self): tmp = sys.stderr sys.stderr = StringIO() try: - plt.close('all') + tm.close() ax2 = df.plot(colors=custom_colors) lines2 = ax2.get_lines() for l1, l2 in zip(lines, lines2): @@ -855,7 +854,7 @@ def test_line_colors(self): finally: sys.stderr = tmp - plt.close('all') + tm.close() ax = df.plot(colormap='jet') @@ -867,7 +866,7 @@ def test_line_colors(self): rs = l.get_color() self.assertEqual(xp, rs) - plt.close('all') + tm.close() ax = df.plot(colormap=cm.jet) @@ -881,14 +880,13 @@ def test_line_colors(self): # make color a list if plotting one column frame # handles cases like df.plot(color='DodgerBlue') - plt.close('all') + tm.close() df.ix[:, [0]].plot(color='DodgerBlue') def test_default_color_cycle(self): import matplotlib.pyplot as plt plt.rcParams['axes.color_cycle'] = list('rgbk') - plt.close('all') df = DataFrame(randn(5, 3)) ax = df.plot() @@ -992,7 +990,7 @@ def test_grouped_hist(self): axes = plotting.grouped_hist(df.A, by=df.C) self.assertEqual(len(axes.ravel()), 4) - plt.close('all') + tm.close() axes = df.hist(by=df.C) self.assertEqual(axes.ndim, 2) self.assertEqual(len(axes.ravel()), 4) @@ -1000,7 +998,7 @@ def test_grouped_hist(self): for ax in axes.ravel(): self.assert_(len(ax.patches) > 0) - plt.close('all') + tm.close() # make sure kwargs to hist are handled axes = plotting.grouped_hist(df.A, by=df.C, normed=True, cumulative=True, bins=4) @@ -1010,14 +1008,13 @@ def test_grouped_hist(self): height = ax.get_children()[5].get_height() self.assertAlmostEqual(height, 1.0) - plt.close('all') + tm.close() axes = plotting.grouped_hist(df.A, by=df.C, log=True) # scale of y must be 'log' for ax in axes.ravel(): self.assertEqual(ax.get_yscale(), 'log') - plt.close('all') - + tm.close() # propagate attr exception from matplotlib.Axes.hist with tm.assertRaises(AttributeError): plotting.grouped_hist(df.A, by=df.C, foo='bar') @@ -1026,9 +1023,8 @@ def test_grouped_hist(self): def test_grouped_hist_layout(self): import matplotlib.pyplot as plt n = 100 - df = DataFrame({'gender': np.array(['Male', - 'Female'])[random.randint(2, - size=n)], + gender = tm.choice(['Male', 'Female'], size=n) + df = DataFrame({'gender': gender, 'height': random.normal(66, 4, size=n), 'weight': random.normal(161, 32, size=n), 'category': random.randint(4, size=n)}) @@ -1042,10 +1038,10 @@ def test_grouped_hist_layout(self): layout=(2, 1)) self.assertEqual(df.hist(column='height', by=df.gender, layout=(2, 1)).shape, (2,)) - plt.close('all') + tm.close() self.assertEqual(df.hist(column='height', by=df.category, layout=(4, 1)).shape, (4,)) - plt.close('all') + tm.close() self.assertEqual(df.hist(column='height', by=df.category, layout=(4, 2)).shape, (4, 2)) diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index ce75e755a313f..18109e8c612b9 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -2042,15 +2042,16 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, """ import matplotlib.pyplot as plt - fig = kwds.get('figure', _gcf() - if plt.get_fignums() else plt.figure(figsize=figsize)) - if figsize is not None and tuple(figsize) != tuple(fig.get_size_inches()): - fig.set_size_inches(*figsize, forward=True) - if by is None: - if kwds.get('layout', None): + if kwds.get('layout', None) is not None: raise ValueError("The 'layout' keyword is not supported when " "'by' is None") + # hack until the plotting interface is a bit more unified + fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else + plt.figure(figsize=figsize)) + if (figsize is not None and tuple(figsize) != + tuple(fig.get_size_inches())): + fig.set_size_inches(*figsize, forward=True) if ax is None: ax = fig.gca() elif ax.get_figure() != fig:
closes #4112.
https://api.github.com/repos/pandas-dev/pandas/pulls/4113
2013-07-03T13:56:31Z
2013-09-22T13:32:57Z
2013-09-22T13:32:57Z
2014-06-15T15:35:14Z
CLN: use how="all" in data.py so that things that are not nan in other columns are kept
diff --git a/pandas/io/data.py b/pandas/io/data.py index 2d91bd4cd383c..e5985c703f93f 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -757,7 +757,7 @@ def get_near_stock_price(self, above_below=2, call=True, put=False, get_range = slice(start_index - above_below, start_index + above_below + 1) - chop = df[get_range].dropna() + chop = df[get_range].dropna(how='all') chop.reset_index(inplace=True) data[nam] = chop return [data[nam] for nam in to_ret]
https://api.github.com/repos/pandas-dev/pandas/pulls/4111
2013-07-03T13:25:51Z
2013-07-03T14:02:47Z
2013-07-03T14:02:47Z
2014-07-16T08:17:21Z
ERROR: better error message reporting on inserting incompatible column to frame (GH4107)
diff --git a/doc/source/release.rst b/doc/source/release.rst index 5f2dd3df0c6a3..d410f39da3ab9 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -313,6 +313,7 @@ pandas 0.12 (:issue:`4089`) - Fix bug where ``HDFStore`` will fail to append because of a different block ordering on-disk (:issue:`4096`) + - Better error messages on inserting incompatible columns to a frame (:issue:`4107`) pandas 0.11.0 diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 5fe2d60993f2c..cc25d7e066e30 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2190,7 +2190,13 @@ def _sanitize_column(self, key, value): # copy the values value = value.values.copy() else: - value = value.reindex(self.index).values + + # GH 4107 + try: + value = value.reindex(self.index).values + except: + raise TypeError('incompatible index of inserted column ' + 'with frame index') if is_frame: value = value.T diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py index 4ed4c37b8afc7..a8a435e3bb660 100644 --- a/pandas/tests/test_frame.py +++ b/pandas/tests/test_frame.py @@ -2250,6 +2250,19 @@ def test_constructor_error_msgs(self): self.assert_(type(detail) == ValueError) self.assert_(msg in str(detail)) + def test_insert_error_msmgs(self): + + # GH 4107, more descriptive error message + df = DataFrame(np.random.randint(0,2,(4,4)), + columns=['a', 'b', 'c', 'd']) + + try: + df['gr'] = df.groupby(['b', 'c']).count() + except (Exception), detail: + msg = 'incompatible index of inserted column with frame index' + self.assert_(type(detail) == TypeError) + self.assert_(msg in str(detail)) + def test_constructor_subclass_dict(self): # Test for passing dict subclass to constructor data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in xrange(10)),
closes #4107
https://api.github.com/repos/pandas-dev/pandas/pulls/4108
2013-07-02T23:31:31Z
2013-07-03T00:04:38Z
2013-07-03T00:04:38Z
2014-07-16T08:17:18Z
ENH: add dropna argument to pivot_table
diff --git a/doc/source/release.rst b/doc/source/release.rst index facf753ced9a0..bf3b2ace62908 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -72,6 +72,7 @@ pandas 0.12 - support python3 (via ``PyTables 3.0.0``) (:issue:`3750`) - Add modulo operator to Series, DataFrame - Add ``date`` method to DatetimeIndex + - Add ``dropna`` argument to pivot_table (:issue: `3820`) - Simplified the API and added a describe method to Categorical - ``melt`` now accepts the optional parameters ``var_name`` and ``value_name`` to specify custom column names of the returned DataFrame (:issue:`3649`), diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py index 8d5ba7af0d92b..945f7fb4ab437 100644 --- a/pandas/tools/pivot.py +++ b/pandas/tools/pivot.py @@ -4,12 +4,13 @@ from pandas.core.index import MultiIndex from pandas.core.reshape import _unstack_multiple from pandas.tools.merge import concat +from pandas.tools.util import cartesian_product import pandas.core.common as com import numpy as np def pivot_table(data, values=None, rows=None, cols=None, aggfunc='mean', - fill_value=None, margins=False): + fill_value=None, margins=False, dropna=True): """ Create a spreadsheet-style pivot table as a DataFrame. The levels in the pivot table will be stored in MultiIndex objects (hierarchical indexes) on @@ -31,6 +32,8 @@ def pivot_table(data, values=None, rows=None, cols=None, aggfunc='mean', Value to replace missing values with margins : boolean, default False Add all row / columns (e.g. for subtotal / grand totals) + dropna : boolean, default True + Do not include columns whose entries are all NaN Examples -------- @@ -105,6 +108,19 @@ def pivot_table(data, values=None, rows=None, cols=None, aggfunc='mean', for i in range(len(rows), len(keys))] table = agged.unstack(to_unstack) + if not dropna: + try: + m = MultiIndex.from_arrays(cartesian_product(table.index.levels)) + table = table.reindex_axis(m, axis=0) + except AttributeError: + pass # it's a single level + + try: + m = MultiIndex.from_arrays(cartesian_product(table.columns.levels)) + table = table.reindex_axis(m, axis=1) + except AttributeError: + pass # it's a single level or a series + if isinstance(table, DataFrame): if isinstance(table.columns, MultiIndex): table = table.sortlevel(axis=1) @@ -216,7 +232,7 @@ def _convert_by(by): def crosstab(rows, cols, values=None, rownames=None, colnames=None, - aggfunc=None, margins=False): + aggfunc=None, margins=False, dropna=True): """ Compute a simple cross-tabulation of two (or more) factors. By default computes a frequency table of the factors unless an array of values and an @@ -238,6 +254,8 @@ def crosstab(rows, cols, values=None, rownames=None, colnames=None, If passed, must match number of column arrays passed margins : boolean, default False Add row/column margins (subtotals) + dropna : boolean, default True + Do not include columns whose entries are all NaN Notes ----- @@ -281,13 +299,13 @@ def crosstab(rows, cols, values=None, rownames=None, colnames=None, df = DataFrame(data) df['__dummy__'] = 0 table = df.pivot_table('__dummy__', rows=rownames, cols=colnames, - aggfunc=len, margins=margins) + aggfunc=len, margins=margins, dropna=dropna) return table.fillna(0).astype(np.int64) else: data['__dummy__'] = values df = DataFrame(data) table = df.pivot_table('__dummy__', rows=rownames, cols=colnames, - aggfunc=aggfunc, margins=margins) + aggfunc=aggfunc, margins=margins, dropna=dropna) return table diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py index e333691b1e6d2..a603118c2ad16 100644 --- a/pandas/tools/tests/test_pivot.py +++ b/pandas/tools/tests/test_pivot.py @@ -1,8 +1,9 @@ import unittest import numpy as np +from numpy.testing import assert_equal -from pandas import DataFrame, Series, Index +from pandas import DataFrame, Series, Index, MultiIndex from pandas.tools.merge import concat from pandas.tools.pivot import pivot_table, crosstab import pandas.util.testing as tm @@ -62,6 +63,22 @@ def test_pivot_table_nocols(self): xp = df.pivot_table(rows='cols', aggfunc={'values': 'mean'}).T tm.assert_frame_equal(rs, xp) + def test_pivot_table_dropna(self): + df = DataFrame({'amount': {0: 60000, 1: 100000, 2: 50000, 3: 30000}, + 'customer': {0: 'A', 1: 'A', 2: 'B', 3: 'C'}, + 'month': {0: 201307, 1: 201309, 2: 201308, 3: 201310}, + 'product': {0: 'a', 1: 'b', 2: 'c', 3: 'd'}, + 'quantity': {0: 2000000, 1: 500000, 2: 1000000, 3: 1000000}}) + pv_col = df.pivot_table('quantity', 'month', ['customer', 'product'], dropna=False) + pv_ind = df.pivot_table('quantity', ['customer', 'product'], 'month', dropna=False) + + m = MultiIndex.from_tuples([(u'A', u'a'), (u'A', u'b'), (u'A', u'c'), (u'A', u'd'), + (u'B', u'a'), (u'B', u'b'), (u'B', u'c'), (u'B', u'd'), + (u'C', u'a'), (u'C', u'b'), (u'C', u'c'), (u'C', u'd')]) + + assert_equal(pv_col.columns.values, m.values) + assert_equal(pv_ind.index.values, m.values) + def test_pass_array(self): result = self.data.pivot_table('D', rows=self.data.A, cols=self.data.C) @@ -374,6 +391,16 @@ def test_crosstab_pass_values(self): aggfunc=np.sum) tm.assert_frame_equal(table, expected) + def test_crosstab_dropna(self): + # GH 3820 + a = np.array(['foo', 'foo', 'foo', 'bar', 'bar', 'foo', 'foo'], dtype=object) + b = np.array(['one', 'one', 'two', 'one', 'two', 'two', 'two'], dtype=object) + c = np.array(['dull', 'dull', 'dull', 'dull', 'dull', 'shiny', 'shiny'], dtype=object) + res = crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'], dropna=False) + m = MultiIndex.from_tuples([('one', 'dull'), ('one', 'shiny'), + ('two', 'dull'), ('two', 'shiny')]) + assert_equal(res.columns.values, m.values) + if __name__ == '__main__': import nose nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], diff --git a/pandas/tools/tests/test_util.py b/pandas/tools/tests/test_util.py new file mode 100644 index 0000000000000..1888f2ede35e0 --- /dev/null +++ b/pandas/tools/tests/test_util.py @@ -0,0 +1,21 @@ +import os +import nose +import unittest + +import numpy as np +from numpy.testing import assert_equal + +from pandas.tools.util import cartesian_product + +class TestCartesianProduct(unittest.TestCase): + + def test_simple(self): + x, y = list('ABC'), [1, 22] + result = cartesian_product([x, y]) + expected = [np.array(['A', 'A', 'B', 'B', 'C', 'C']), + np.array([ 1, 22, 1, 22, 1, 22])] + assert_equal(result, expected) + +if __name__ == '__main__': + nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], + exit=False) diff --git a/pandas/tools/util.py b/pandas/tools/util.py index c08636050ca9e..1f2905b86f7d0 100644 --- a/pandas/tools/util.py +++ b/pandas/tools/util.py @@ -1,6 +1,32 @@ from pandas.core.index import Index +import numpy as np def match(needles, haystack): haystack = Index(haystack) needles = Index(needles) - return haystack.get_indexer(needles) \ No newline at end of file + return haystack.get_indexer(needles) + +def cartesian_product(X): + ''' + Numpy version of itertools.product or pandas.util.compat.product. + Sometimes faster (for large inputs)... + + Examples + -------- + >>> cartesian_product([list('ABC'), [1, 2]]) + [array(['A', 'A', 'B', 'B', 'C', 'C'], dtype='|S1'), + array([1, 2, 1, 2, 1, 2])] + + ''' + + lenX = np.fromiter((len(x) for x in X), dtype=int) + cumprodX = np.cumproduct(lenX) + + a = np.roll(cumprodX, 1) + a[0] = 1 + + b = cumprodX[-1] / cumprodX + + return [np.tile(np.repeat(x, b[i]), + np.product(a[i])) + for i, x in enumerate(X)] \ No newline at end of file
fixes #3820 ``` a = np.array(['foo', 'foo', 'foo', 'bar', 'bar', 'foo', 'foo'], dtype=object) b = np.array(['one', 'one', 'two', 'one', 'two', 'two', 'two'], dtype=object) c = np.array(['dull', 'dull', 'dull', 'dull', 'dull', 'shiny', 'shiny'], dtype=object) In [11]: pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'], drop_na=False) Out[11]: b one two c dull shiny dull shiny a bar 1 0 1 0 foo 2 0 1 2 ``` Also same argument for `pivot_table`.
https://api.github.com/repos/pandas-dev/pandas/pulls/4106
2013-07-02T18:16:57Z
2013-07-10T13:20:05Z
2013-07-10T13:20:05Z
2014-07-16T08:17:15Z
ENH: allow layout for grouped DataFrame histograms
diff --git a/doc/source/release.rst b/doc/source/release.rst index 54fa4d30bac0a..7c09c2a6f16ac 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -46,6 +46,8 @@ pandas 0.13 - ``HDFStore`` raising an invalid ``TypeError`` rather than ``ValueError`` when appending with a different block ordering (:issue:`4096`) + - The ``by`` argument now works correctly with the ``layout`` argument + (:issue:`4102`, :issue:`4014`) in ``*.hist`` plotting methods pandas 0.12 =========== diff --git a/doc/source/v0.13.0.txt b/doc/source/v0.13.0.txt index 52bd674cb7830..7f63c545c5664 100644 --- a/doc/source/v0.13.0.txt +++ b/doc/source/v0.13.0.txt @@ -21,6 +21,9 @@ Bug Fixes - ``HDFStore`` raising an invalid ``TypeError`` rather than ``ValueError`` when appending with a different block ordering (:issue:`4096`) + - The ``by`` argument now works correctly with the ``layout`` argument + (:issue:`4102`, :issue:`4014`) in ``*.hist`` plotting methods + See the :ref:`full release notes <release>` or issue tracker on GitHub for a complete list. diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 1b7052bf62824..08b42d7cf8975 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -158,14 +158,12 @@ def test_bar_linewidth(self): for r in ax.patches: self.assert_(r.get_linewidth() == 2) - @slow def test_rotation(self): df = DataFrame(np.random.randn(5, 5)) ax = df.plot(rot=30) for l in ax.get_xticklabels(): self.assert_(l.get_rotation() == 30) - @slow def test_irregular_datetime(self): rng = date_range('1/1/2000', '3/1/2000') rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]] @@ -195,6 +193,36 @@ def test_hist(self): self.assertRaises(ValueError, self.ts.hist, by=self.ts.index, figure=fig) + @slow + def test_hist_layout(self): + n = 10 + df = DataFrame({'gender': np.array(['Male', + 'Female'])[random.randint(2, + size=n)], + 'height': random.normal(66, 4, size=n), 'weight': + random.normal(161, 32, size=n)}) + self.assertRaises(ValueError, df.height.hist, layout=(1, 1)) + self.assertRaises(ValueError, df.height.hist, layout=[1, 1]) + + @slow + def test_hist_layout_with_by(self): + import matplotlib.pyplot as plt + n = 10 + df = DataFrame({'gender': np.array(['Male', + 'Female'])[random.randint(2, + size=n)], + 'height': random.normal(66, 4, size=n), 'weight': + random.normal(161, 32, size=n), + 'category': random.randint(4, size=n)}) + _check_plot_works(df.height.hist, by=df.gender, layout=(2, 1)) + plt.close('all') + _check_plot_works(df.height.hist, by=df.gender, layout=(1, 2)) + plt.close('all') + _check_plot_works(df.weight.hist, by=df.category, layout=(1, 4)) + plt.close('all') + _check_plot_works(df.weight.hist, by=df.category, layout=(4, 1)) + plt.close('all') + def test_plot_fails_when_ax_differs_from_figure(self): from pylab import figure fig1 = figure() @@ -210,7 +238,6 @@ def test_kde(self): ax = self.ts.plot(kind='kde', logy=True) self.assert_(ax.get_yscale() == 'log') - @slow def test_kde_color(self): _skip_if_no_scipy() ax = self.ts.plot(kind='kde', logy=True, color='r') @@ -257,7 +284,6 @@ def test_partially_invalid_plot_data(self): for kind in kinds: self.assertRaises(TypeError, s.plot, kind=kind) - @slow def test_invalid_kind(self): s = Series([1, 2]) self.assertRaises(ValueError, s.plot, kind='aasdf') @@ -323,7 +349,6 @@ def test_plot(self): index=index) _check_plot_works(df.plot, title=u'\u03A3') - @slow def test_nonnumeric_exclude(self): import matplotlib.pyplot as plt plt.close('all') @@ -410,10 +435,9 @@ def test_xcompat(self): lines = ax.get_lines() self.assert_(isinstance(lines[0].get_xdata(), PeriodIndex)) - @slow def test_unsorted_index(self): - df = DataFrame({'y': range(100)}, - index=range(99, -1, -1)) + df = DataFrame({'y': np.arange(100)}, + index=np.arange(99, -1, -1)) ax = df.plot() l = ax.get_lines()[0] rs = l.get_xydata() @@ -479,7 +503,6 @@ def test_plot_bar(self): df = DataFrame({'a': [0, 1], 'b': [1, 0]}) _check_plot_works(df.plot, kind='bar') - @slow def test_bar_stacked_center(self): # GH2157 df = DataFrame({'A': [3] * 5, 'B': range(5)}, index=range(5)) @@ -487,7 +510,6 @@ def test_bar_stacked_center(self): self.assertEqual(ax.xaxis.get_ticklocs()[0], ax.patches[0].get_x() + ax.patches[0].get_width() / 2) - @slow def test_bar_center(self): df = DataFrame({'A': [3] * 5, 'B': range(5)}, index=range(5)) ax = df.plot(kind='bar', grid=True) @@ -710,7 +732,6 @@ def test_plot_int_columns(self): df = DataFrame(np.random.randn(100, 4)).cumsum() _check_plot_works(df.plot, legend=True) - @slow def test_legend_name(self): multi = DataFrame(np.random.randn(4, 4), columns=[np.array(['a', 'a', 'b', 'b']), @@ -800,7 +821,6 @@ def test_line_colors(self): plt.close('all') df.ix[:, [0]].plot(color='DodgerBlue') - @slow def test_default_color_cycle(self): import matplotlib.pyplot as plt plt.rcParams['axes.color_cycle'] = list('rgbk') @@ -815,7 +835,6 @@ def test_default_color_cycle(self): rs = l.get_color() self.assert_(xp == rs) - @slow def test_unordered_ts(self): df = DataFrame(np.array([3.0, 2.0, 1.0]), index=[date(2012, 10, 1), @@ -828,7 +847,6 @@ def test_unordered_ts(self): ydata = ax.lines[0].get_ydata() self.assert_(np.all(ydata == np.array([1.0, 2.0, 3.0]))) - @slow def test_all_invalid_plot_data(self): kinds = 'line', 'bar', 'barh', 'kde', 'density' df = DataFrame(list('abcd')) @@ -843,7 +861,6 @@ def test_partially_invalid_plot_data(self): for kind in kinds: self.assertRaises(TypeError, df.plot, kind=kind) - @slow def test_invalid_kind(self): df = DataFrame(np.random.randn(10, 2)) self.assertRaises(ValueError, df.plot, kind='aasdf') @@ -880,7 +897,6 @@ def test_boxplot(self): _check_plot_works(grouped.boxplot) _check_plot_works(grouped.boxplot, subplots=False) - @slow def test_series_plot_color_kwargs(self): # #1890 import matplotlib.pyplot as plt @@ -890,7 +906,6 @@ def test_series_plot_color_kwargs(self): line = ax.get_lines()[0] self.assert_(line.get_color() == 'green') - @slow def test_time_series_plot_color_kwargs(self): # #1890 import matplotlib.pyplot as plt @@ -901,7 +916,6 @@ def test_time_series_plot_color_kwargs(self): line = ax.get_lines()[0] self.assert_(line.get_color() == 'green') - @slow def test_time_series_plot_color_with_empty_kwargs(self): import matplotlib.pyplot as plt @@ -950,6 +964,33 @@ def test_grouped_hist(self): self.assertRaises(AttributeError, plotting.grouped_hist, df.A, by=df.C, foo='bar') + @slow + def test_grouped_hist_layout(self): + import matplotlib.pyplot as plt + n = 100 + df = DataFrame({'gender': np.array(['Male', + 'Female'])[random.randint(2, + size=n)], + 'height': random.normal(66, 4, size=n), + 'weight': random.normal(161, 32, size=n), + 'category': random.randint(4, size=n)}) + self.assertRaises(ValueError, df.hist, column='weight', by=df.gender, + layout=(1, 1)) + self.assertRaises(ValueError, df.hist, column='weight', by=df.gender, + layout=(1,)) + self.assertRaises(ValueError, df.hist, column='height', by=df.category, + layout=(1, 3)) + self.assertRaises(ValueError, df.hist, column='height', by=df.category, + layout=(2, 1)) + self.assertEqual(df.hist(column='height', by=df.gender, + layout=(2, 1)).shape, (2,)) + plt.close('all') + self.assertEqual(df.hist(column='height', by=df.category, + layout=(4, 1)).shape, (4,)) + plt.close('all') + self.assertEqual(df.hist(column='height', by=df.category, + layout=(4, 2)).shape, (4, 2)) + @slow def test_axis_shared(self): # GH4089 diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index ad305382dd8cc..1ffdf83b02763 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1929,9 +1929,9 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, data = data[column] if by is not None: - axes = grouped_hist(data, by=by, ax=ax, grid=grid, figsize=figsize, - sharex=sharex, sharey=sharey, **kwds) + sharex=sharex, sharey=sharey, layout=layout, + **kwds) for ax in axes.ravel(): if xlabelsize is not None: @@ -2030,6 +2030,9 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None, fig.set_size_inches(*figsize, forward=True) if by is None: + if kwds.get('layout', None): + raise ValueError("The 'layout' keyword is not supported when " + "'by' is None") if ax is None: ax = fig.add_subplot(111) if ax.get_figure() != fig: @@ -2146,9 +2149,12 @@ def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True, grouped = grouped[column] ngroups = len(grouped) - nrows, ncols = layout or _get_layout(ngroups) + if nrows * ncols < ngroups: + raise ValueError("Number of plots in 'layout' must greater than or " + "equal to the number " "of groups in 'by'") + if figsize is None: # our favorite default beating matplotlib's idea of the # default size
Also remove the slow decorator for tests that take < 100 ms time closes #4102. closes #4194.
https://api.github.com/repos/pandas-dev/pandas/pulls/4104
2013-07-02T15:59:06Z
2013-07-25T13:33:03Z
2013-07-25T13:33:03Z
2014-06-27T11:41:44Z
BUG: (GH 4096) block ordering is somewhat non-deterministic in HDFStore; reorder to the existing store
diff --git a/doc/source/release.rst b/doc/source/release.rst index 36e86629c385a..691c7312dde72 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -299,6 +299,8 @@ pandas 0.12 rewritten in an incompatible way (:issue:`4062`, :issue:`4063`) - Fixed bug where sharex and sharey were not being passed to grouped_hist (:issue:`4089`) + - Fix bug where ``HDFStore`` will fail to append because of a different block + ordering on-disk (:issue:`4096`) pandas 0.11.0 diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index d22009be05429..5bf309edffa74 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -2651,7 +2651,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, obj = obj.reindex_axis(a[1], axis=a[0], copy=False) # figure out data_columns and get out blocks - block_obj = self.get_object(obj) + block_obj = self.get_object(obj).consolidate() blocks = block_obj._data.blocks if len(self.non_index_axes): axis, axis_labels = self.non_index_axes[0] @@ -2663,6 +2663,19 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, blocks.extend(block_obj.reindex_axis( [c], axis=axis, copy=False)._data.blocks) + # reorder the blocks in the same order as the existing_table if we can + if existing_table is not None: + by_items = dict([ (tuple(b.items.tolist()),b) for b in blocks ]) + new_blocks = [] + for ea in existing_table.values_axes: + items = tuple(ea.values) + try: + b = by_items.pop(items) + new_blocks.append(b) + except: + raise ValueError("cannot match existing table structure for [%s] on appending data" % items) + blocks = new_blocks + # add my values self.values_axes = [] for i, b in enumerate(blocks): diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index f062216986c98..00d8089ad2ee7 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -596,6 +596,31 @@ def test_append_frame_column_oriented(self): expected = df.reindex(columns=['A'], index=df.index[0:4]) tm.assert_frame_equal(expected, result) + def test_append_with_different_block_ordering(self): + + #GH 4096; using same frames, but different block orderings + with ensure_clean(self.path) as store: + + for i in range(10): + + df = DataFrame(np.random.randn(10,2),columns=list('AB')) + df['index'] = range(10) + df['index'] += i*10 + df['int64'] = Series([1]*len(df),dtype='int64') + df['int16'] = Series([1]*len(df),dtype='int16') + + if i % 2 == 0: + del df['int64'] + df['int64'] = Series([1]*len(df),dtype='int64') + if i % 3 == 0: + a = df.pop('A') + df['A'] = a + + df.set_index('index',inplace=True) + + store.append('df',df) + + def test_ndim_indexables(self): """ test using ndim tables in new ways"""
closes #4096
https://api.github.com/repos/pandas-dev/pandas/pulls/4100
2013-07-01T20:34:46Z
2013-07-01T21:27:26Z
2013-07-01T21:27:26Z
2014-07-16T08:17:10Z
BUG: GH4098, HDFStore not recreating a datetime index properly when has a timezone
diff --git a/doc/source/release.rst b/doc/source/release.rst index 2aa6e1a80085f..36e86629c385a 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -52,7 +52,7 @@ pandas 0.12 - A ``filter`` method on grouped Series or DataFrames returns a subset of the original (:issue:`3680`, :issue:`919`) - Access to historical Google Finance data in pandas.io.data (:issue:`3814`) - - DataFrame plotting methods can sample column colors from a Matplotlib + - DataFrame plotting methods can sample column colors from a Matplotlib colormap via the ``colormap`` keyword. (:issue:`3860`) **Improvements to existing features** @@ -63,7 +63,7 @@ pandas 0.12 - ``convert_objects`` now accepts a ``copy`` parameter (defaults to ``True``) - ``HDFStore`` - - will retain index attributes (freq,tz,name) on recreation (:issue:`3499`) + - will retain index attributes (freq,tz,name) on recreation (:issue:`3499`,:issue:`4098`) - will warn with a ``AttributeConflictWarning`` if you are attempting to append an index with a different frequency than the existing, or attempting to append an index with a different name than the existing @@ -158,7 +158,7 @@ pandas 0.12 - removed ``clipboard`` support to ``pandas.io.clipboard`` - replace top-level and instance methods ``save`` and ``load`` with top-level ``read_pickle`` and ``to_pickle`` instance method, ``save`` and - ``load`` will give deprecation warning. + ``load`` will give deprecation warning. - the ``method`` and ``axis`` arguments of ``DataFrame.replace()`` are deprecated - set FutureWarning to require data_source, and to replace year/month with @@ -215,7 +215,7 @@ pandas 0.12 - Extend ``reindex`` to correctly deal with non-unique indices (:issue:`3679`) - ``DataFrame.itertuples()`` now works with frames with duplicate column names (:issue:`3873`) - - Bug in non-unique indexing via ``iloc`` (:issue:`4017`); added ``takeable`` argument to + - Bug in non-unique indexing via ``iloc`` (:issue:`4017`); added ``takeable`` argument to ``reindex`` for location-based taking - Fixed bug in groupby with empty series referencing a variable before assignment. (:issue:`3510`) @@ -272,16 +272,16 @@ pandas 0.12 - Correctly parse when passed the ``dtype=str`` (or other variable-len string dtypes) in ``read_csv`` (:issue:`3795`) - Fix index name not propogating when using ``loc/ix`` (:issue:`3880`) - - Fix groupby when applying a custom function resulting in a returned DataFrame was + - Fix groupby when applying a custom function resulting in a returned DataFrame was not converting dtypes (:issue:`3911`) - Fixed a bug where ``DataFrame.replace`` with a compiled regular expression in the ``to_replace`` argument wasn't working (:issue:`3907`) - Fixed ``__truediv__`` in Python 2.7 with ``numexpr`` installed to actually do true division when dividing two integer arrays with at least 10000 cells total (:issue:`3764`) - Indexing with a string with seconds resolution not selecting from a time index (:issue:`3925`) - - csv parsers would loop infinitely if ``iterator=True`` but no ``chunksize`` was + - csv parsers would loop infinitely if ``iterator=True`` but no ``chunksize`` was specified (:issue:`3967`), python parser failing with ``chunksize=1`` - - Fix index name not propogating when using ``shift`` + - Fix index name not propogating when using ``shift`` - Fixed dropna=False being ignored with multi-index stack (:issue:`3997`) - Fixed flattening of columns when renaming MultiIndex columns DataFrame (:issue:`4004`) - Fix ``Series.clip`` for datetime series. NA/NaN threshold values will now throw ValueError (:issue:`3996`) diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 6cfbfd0f2d60a..d22009be05429 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -151,8 +151,8 @@ def _tables(): def h5_open(path, mode): tables = _tables() return tables.openFile(path, mode) - - + + @contextmanager def get_store(path, mode='a', complevel=None, complib=None, fletcher32=False): @@ -217,7 +217,7 @@ def read_hdf(path_or_buf, key, **kwargs): # a passed store; user controls open/close f(path_or_buf, False) - + class HDFStore(object): """ dict-like IO interface for storing pandas objects in PyTables @@ -757,7 +757,7 @@ def get_node(self, key): def get_storer(self, key): """ return the storer object for a key, raise if not in the file """ group = self.get_node(key) - if group is None: + if group is None: return None s = self._create_storer(group) s.infer_axes() @@ -810,9 +810,9 @@ def _create_storer(self, group, value = None, table = False, append = False, **k """ return a suitable Storer class to operate """ def error(t): - raise TypeError("cannot properly create the storer for: [%s] [group->%s,value->%s,table->%s,append->%s,kwargs->%s]" % + raise TypeError("cannot properly create the storer for: [%s] [group->%s,value->%s,table->%s,append->%s,kwargs->%s]" % (t,group,type(value),table,append,kwargs)) - + pt = _ensure_decoded(getattr(group._v_attrs,'pandas_type',None)) tt = _ensure_decoded(getattr(group._v_attrs,'table_type',None)) @@ -863,7 +863,7 @@ def error(t): tt = u'appendable_ndim' else: - + # distiguish between a frame/table tt = u'legacy_panel' try: @@ -930,7 +930,7 @@ def _read_group(self, group, **kwargs): class TableIterator(object): """ define the iteration interface on a table - + Parameters ---------- @@ -974,7 +974,7 @@ def __iter__(self): yield v self.close() - + def close(self): if self.auto_close: self.store.close() @@ -1003,7 +1003,7 @@ class IndexCol(object): _info_fields = ['freq','tz','index_name'] def __init__(self, values=None, kind=None, typ=None, cname=None, itemsize=None, - name=None, axis=None, kind_attr=None, pos=None, freq=None, tz=None, + name=None, axis=None, kind_attr=None, pos=None, freq=None, tz=None, index_name=None, **kwargs): self.values = values self.kind = kind @@ -1088,21 +1088,27 @@ def convert(self, values, nan_rep, encoding): except: pass + values =_maybe_convert(values, self.kind, encoding) + kwargs = dict() if self.freq is not None: kwargs['freq'] = _ensure_decoded(self.freq) - if self.tz is not None: - kwargs['tz'] = _ensure_decoded(self.tz) if self.index_name is not None: kwargs['name'] = _ensure_decoded(self.index_name) try: - self.values = Index(_maybe_convert(values, self.kind, self.encoding), **kwargs) + self.values = Index(values, **kwargs) except: # if the output freq is different that what we recorded, then infer it if 'freq' in kwargs: kwargs['freq'] = 'infer' self.values = Index(_maybe_convert(values, self.kind, encoding), **kwargs) + + # set the timezone if indicated + # we stored in utc, so reverse to local timezone + if self.tz is not None: + self.values = self.values.tz_localize('UTC').tz_convert(_ensure_decoded(self.tz)) + return self def take_data(self): @@ -1189,7 +1195,7 @@ def update_info(self, info): idx = info[self.name] except: idx = info[self.name] = dict() - + existing_value = idx.get(key) if key in idx and value is not None and existing_value != value: @@ -1235,7 +1241,7 @@ def is_indexed(self): def convert(self, values, nan_rep, encoding): """ set the values from this selection: take = take ownership """ - + self.values = Int64Index(np.arange(self.table.nrows)) return self @@ -1359,7 +1365,13 @@ def set_atom(self, block, existing_col, min_itemsize, nan_rep, info, encoding=No "invalid timezone specification") values = index.tz_convert('UTC').values.view('i8') - self.tz = tz + + # store a converted timezone + zone = tslib.get_timezone(index.tz) + if zone is None: + zone = tslib.tot_seconds(index.tz.utcoffset()) + self.tz = zone + self.update_info(info) self.set_atom_datetime64(block, values.reshape(block.values.shape)) @@ -1398,7 +1410,7 @@ def set_atom_string(self, block, existing_col, min_itemsize, nan_rep, encoding): inferred_type = lib.infer_dtype(col.ravel()) if inferred_type != 'string': raise TypeError("Cannot serialize the column [%s] because\n" - "its data contents are [%s] object dtype" % + "its data contents are [%s] object dtype" % (item,inferred_type)) @@ -1607,7 +1619,7 @@ def __repr__(self): s = "[%s]" % ','.join([ str(x) for x in s ]) return "%-12.12s (shape->%s)" % (self.pandas_type,s) return self.pandas_type - + def __str__(self): return self.__repr__() @@ -1929,7 +1941,7 @@ def write_array_empty(self, key, value): self._handle.createArray(self.group, key, arr) getattr(self.group, key)._v_attrs.value_type = str(value.dtype) getattr(self.group, key)._v_attrs.shape = value.shape - + def write_array(self, key, value, items=None): if key in self.group: self._handle.removeNode(self.group, key) @@ -2142,7 +2154,7 @@ def shape(self): try: ndim = self.ndim - # items + # items items = 0 for i in range(self.nblocks): node = getattr(self.group, 'block%d_items' % i) @@ -2212,7 +2224,7 @@ class PanelStorer(BlockManagerStorer): pandas_kind = u'wide' obj_type = Panel is_shape_reversed = True - + def write(self, obj, **kwargs): obj._consolidate_inplace() return super(PanelStorer, self).write(obj, **kwargs) @@ -2270,7 +2282,7 @@ def __repr__(self): self.ncols, ','.join([ a.name for a in self.index_axes ]), dc) - + def __getitem__(self, c): """ return the axis for c """ for a in self.axes: @@ -2568,7 +2580,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, try: axes = _AXES_MAP[type(obj)] except: - raise TypeError("cannot properly create the storer for: [group->%s,value->%s]" % + raise TypeError("cannot properly create the storer for: [group->%s,value->%s]" % (self.group._v_name,type(obj))) # map axes to numbers @@ -2597,7 +2609,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, # nan_representation if nan_rep is None: nan_rep = 'nan' - + self.nan_rep = nan_rep # create axes to index and non_index @@ -2665,7 +2677,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None, data_columns=None, name = b.items[0] self.data_columns.append(name) - # make sure that we match up the existing columns + # make sure that we match up the existing columns # if we have an existing table if existing_table is not None and validate: try: @@ -2740,7 +2752,7 @@ def process_filter(field, filt): return obj.ix._getitem_axis(takers,axis=axis_number) raise ValueError("cannot find the field [%s] for filtering!" % field) - + obj = process_filter(field, filt) return obj @@ -3053,7 +3065,7 @@ def write_data_chunk(self, indexes, mask, search, values): self.table.flush() except (Exception), detail: raise Exception("tables cannot write this data -> %s" % str(detail)) - + def delete(self, where=None, **kwargs): # delete all rows (and return the nrows) @@ -3113,7 +3125,7 @@ class AppendableFrameTable(AppendableTable): table_type = u'appendable_frame' ndim = 2 obj_type = DataFrame - + @property def is_transposed(self): return self.index_axes[0].axis == 1 @@ -3266,7 +3278,7 @@ def _convert_index(index, encoding=None): if isinstance(index, DatetimeIndex): converted = index.asi8 - return IndexCol(converted, 'datetime64', _tables().Int64Col(), + return IndexCol(converted, 'datetime64', _tables().Int64Col(), freq=getattr(index,'freq',None), tz=getattr(index,'tz',None), index_name=index_name) elif isinstance(index, (Int64Index, PeriodIndex)): @@ -3382,7 +3394,7 @@ def _unconvert_string_array(data, nan_rep=None, encoding=None): if nan_rep is None: nan_rep = 'nan' - + data = lib.string_array_replace_from_nan_rep(data, nan_rep) return data.reshape(shape) @@ -3421,7 +3433,7 @@ class Term(object): value : a value or list of values (required) queryables : a kinds map (dict of column name -> kind), or None i column is non-indexable encoding : an encoding that will encode the query terms - + Returns ------- a Term object @@ -3582,7 +3594,7 @@ def eval(self): if self.is_in_table: self.condition = self.generate(values[0]) - + else: raise TypeError("passing a filterable condition to a non-table indexer [%s]" % str(self)) diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py index f348e1ddce461..f062216986c98 100644 --- a/pandas/io/tests/test_pytables.py +++ b/pandas/io/tests/test_pytables.py @@ -10,7 +10,7 @@ import pandas from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range, date_range, Index) -from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf, +from pandas.io.pytables import (HDFStore, get_store, Term, read_hdf, IncompatibilityWarning, PerformanceWarning, AttributeConflictWarning) import pandas.util.testing as tm @@ -99,7 +99,7 @@ def test_factory_fun(self): try: with get_store(self.path) as tbl: tbl['a'] = tm.makeDataFrame() - + with get_store(self.path) as tbl: self.assertEquals(len(tbl), 1) self.assertEquals(type(tbl['a']), DataFrame) @@ -113,13 +113,13 @@ def test_conv_read_write(self): def roundtrip(key, obj,**kwargs): obj.to_hdf(self.path, key,**kwargs) return read_hdf(self.path, key) - + o = tm.makeTimeSeries() assert_series_equal(o, roundtrip('series',o)) o = tm.makeStringSeries() assert_series_equal(o, roundtrip('string_series',o)) - + o = tm.makeDataFrame() assert_frame_equal(o, roundtrip('frame',o)) @@ -193,7 +193,7 @@ def test_contains(self): self.assert_('/foo/bar' in store) self.assert_('/foo/b' not in store) self.assert_('bar' not in store) - + # GH 2694 warnings.filterwarnings('ignore', category=tables.NaturalNameWarning) store['node())'] = tm.makeDataFrame() @@ -212,11 +212,11 @@ def test_versioning(self): self.assert_(store.root.a._v_attrs.pandas_version == '0.10.1') self.assert_(store.root.b._v_attrs.pandas_version == '0.10.1') self.assert_(store.root.df1._v_attrs.pandas_version == '0.10.1') - + # write a file and wipe its versioning _maybe_remove(store, 'df2') store.append('df2', df) - + # this is an error because its table_type is appendable, but no version # info store.get_node('df2')._v_attrs.pandas_version = None @@ -229,7 +229,7 @@ def test_reopen_handle(self): store.open('w', warn=False) self.assert_(store._handle.isopen) self.assertEquals(len(store), 0) - + def test_flush(self): with ensure_clean(self.path) as store: @@ -243,11 +243,11 @@ def test_get(self): left = store.get('a') right = store['a'] tm.assert_series_equal(left, right) - + left = store.get('/a') right = store['/a'] tm.assert_series_equal(left, right) - + self.assertRaises(KeyError, store.get, 'b') def test_getattr(self): @@ -290,19 +290,19 @@ def test_put(self): store['foo'] = df[:10] store['/foo'] = df[:10] store.put('c', df[:10], table=True) - + # not OK, not a table self.assertRaises( ValueError, store.put, 'b', df[10:], append=True) - + # node does not currently exist, test _is_table_type returns False in # this case # _maybe_remove(store, 'f') # self.assertRaises(ValueError, store.put, 'f', df[10:], append=True) - + # can't put to a table (use append instead) self.assertRaises(ValueError, store.put, 'c', df[10:], append=True) - + # overwrite table store.put('c', df[:10], table=True, append=False) tm.assert_frame_equal(df[:10], store['c']) @@ -315,20 +315,20 @@ def test_put_string_index(self): ["I am a very long string index: %s" % i for i in range(20)]) s = Series(np.arange(20), index=index) df = DataFrame({'A': s, 'B': s}) - + store['a'] = s tm.assert_series_equal(store['a'], s) - + store['b'] = df tm.assert_frame_equal(store['b'], df) - + # mixed length index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + ["I am a very long string index: %s" % i for i in range(20)]) s = Series(np.arange(21), index=index) df = DataFrame({'A': s, 'B': s}) store['a'] = s tm.assert_series_equal(store['a'], s) - + store['b'] = df tm.assert_frame_equal(store['b'], df) @@ -339,7 +339,7 @@ def test_put_compression(self): store.put('c', df, table=True, complib='zlib') tm.assert_frame_equal(store['c'], df) - + # can't compress if table=False self.assertRaises(ValueError, store.put, 'b', df, table=False, complib='zlib') @@ -353,10 +353,10 @@ def test_put_compression_blosc(self): # can't compress if table=False self.assertRaises(ValueError, store.put, 'b', df, table=False, complib='blosc') - + store.put('c', df, table=True, complib='blosc') tm.assert_frame_equal(store['c'], df) - + def test_put_integer(self): # non-date, non-string index df = DataFrame(np.random.randn(50, 100)) @@ -385,7 +385,7 @@ def test_put_mixed_type(self): expected = store.get('df') tm.assert_frame_equal(expected,df) warnings.filterwarnings('always', category=PerformanceWarning) - + def test_append(self): with ensure_clean(self.path) as store: @@ -399,7 +399,7 @@ def test_append(self): store.put('df2', df[:10], table=True) store.append('df2', df[10:]) tm.assert_frame_equal(store['df2'], df) - + _maybe_remove(store, 'df3') store.append('/df3', df[:10]) store.append('/df3', df[10:]) @@ -412,7 +412,7 @@ def test_append(self): store.append('/df3 foo', df[10:]) tm.assert_frame_equal(store['df3 foo'], df) warnings.filterwarnings('always', category=tables.NaturalNameWarning) - + # panel wp = tm.makePanel() _maybe_remove(store, 'wp1') @@ -426,7 +426,7 @@ def test_append(self): store.append('p4d', p4d.ix[:, :, :10, :]) store.append('p4d', p4d.ix[:, :, 10:, :]) tm.assert_panel4d_equal(store['p4d'], p4d) - + # test using axis labels _maybe_remove(store, 'p4d') store.append('p4d', p4d.ix[:, :, :10, :], axes=[ @@ -434,7 +434,7 @@ def test_append(self): store.append('p4d', p4d.ix[:, :, 10:, :], axes=[ 'items', 'major_axis', 'minor_axis']) tm.assert_panel4d_equal(store['p4d'], p4d) - + # test using differnt number of items on each axis p4d2 = p4d.copy() p4d2['l4'] = p4d['l1'] @@ -476,7 +476,7 @@ def test_append(self): tm.assert_frame_equal(store['uints'], uint_data) def test_encoding(self): - + if sys.byteorder != 'little': raise nose.SkipTest('system byteorder is not little, skipping test_encoding!') @@ -581,11 +581,11 @@ def test_append_frame_column_oriented(self): store.append('df1', df.ix[:, :2], axes=['columns']) store.append('df1', df.ix[:, 2:]) tm.assert_frame_equal(store['df1'], df) - + result = store.select('df1', 'columns=A') expected = df.reindex(columns=['A']) tm.assert_frame_equal(expected, result) - + # this isn't supported self.assertRaises(TypeError, store.select, 'df1', ( 'columns=A', Term('index', '>', df.index[4]))) @@ -616,7 +616,7 @@ def check_indexers(key, indexers): store.append('p4d', p4d.ix[:, :, 10:, :]) tm.assert_panel4d_equal(store.select('p4d'), p4d) check_indexers('p4d', indexers) - + # same as above, but try to append with differnt axes _maybe_remove(store, 'p4d') store.append('p4d', p4d.ix[:, :, :10, :], axes=indexers) @@ -637,7 +637,7 @@ def check_indexers(key, indexers): store.append('p4d', p4d.ix[:, :, 10:, :]) tm.assert_panel4d_equal(store['p4d'], p4d) check_indexers('p4d', indexers) - + # different than default indexables #2 indexers = ['major_axis', 'labels', 'minor_axis'] _maybe_remove(store, 'p4d') @@ -645,7 +645,7 @@ def check_indexers(key, indexers): store.append('p4d', p4d.ix[:, :, 10:, :]) tm.assert_panel4d_equal(store['p4d'], p4d) check_indexers('p4d', indexers) - + # partial selection result = store.select('p4d', ['labels=l1']) expected = p4d.reindex(labels=['l1']) @@ -680,7 +680,7 @@ def check_col(key,name,size): expected = expected.reindex(minor_axis=sorted(expected.minor_axis)) tm.assert_panel_equal(store['s1'], expected) check_col('s1', 'minor_axis', 20) - + # test dict format store.append('s2', wp, min_itemsize={'minor_axis': 20}) store.append('s2', wp2) @@ -688,11 +688,11 @@ def check_col(key,name,size): expected = expected.reindex(minor_axis=sorted(expected.minor_axis)) tm.assert_panel_equal(store['s2'], expected) check_col('s2', 'minor_axis', 20) - + # apply the wrong field (similar to #1) store.append('s3', wp, min_itemsize={'major_axis': 20}) self.assertRaises(ValueError, store.append, 's3', wp2) - + # test truncation of bigger strings store.append('s4', wp) self.assertRaises(ValueError, store.append, 's4', wp2) @@ -785,7 +785,7 @@ def test_append_with_data_columns(self): # check that we have indicies created assert(store._handle.root.df.table.cols.index.is_indexed is True) assert(store._handle.root.df.table.cols.B.is_indexed is True) - + # data column searching result = store.select('df', [Term('B>0')]) expected = df[df.B > 0] @@ -808,11 +808,11 @@ def test_append_with_data_columns(self): result = store.select('df', [Term('string', '=', 'foo')]) expected = df_new[df_new.string == 'foo'] tm.assert_frame_equal(result, expected) - + # using min_itemsize and a data column def check_col(key,name,size): self.assert_(getattr(store.get_storer(key).table.description,name).itemsize == size) - + with ensure_clean(self.path) as store: _maybe_remove(store, 'df') store.append('df', df_new, data_columns=['string'], @@ -874,12 +874,12 @@ def check_col(key,name,size): df_dc['datetime'] = Timestamp('20010102') df_dc = df_dc.convert_objects() df_dc.ix[3:5, ['A', 'B', 'datetime']] = np.nan - + _maybe_remove(store, 'df_dc') store.append('df_dc', df_dc, data_columns=['B', 'C', 'string', 'string2', 'datetime']) result = store.select('df_dc', [Term('B>0')]) - + expected = df_dc[df_dc.B > 0] tm.assert_frame_equal(result, expected) @@ -888,9 +888,9 @@ def check_col(key,name,size): expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & ( df_dc.string == 'foo')] tm.assert_frame_equal(result, expected) - + def test_create_table_index(self): - + with ensure_clean(self.path) as store: def col(t,column): @@ -902,7 +902,7 @@ def col(t,column): store.create_table_index('p5', columns=['major_axis']) assert(col('p5', 'major_axis').is_indexed is True) assert(col('p5', 'minor_axis').is_indexed is False) - + # index=True store.append('p5i', wp, index=True) assert(col('p5i', 'major_axis').is_indexed is True) @@ -926,7 +926,7 @@ def col(t,column): store.create_table_index('p5', optlevel=1, kind='light') assert(col('p5', 'major_axis').index.optlevel == 1) assert(col('p5', 'minor_axis').index.kind == 'light') - + # data columns df = tm.makeTimeDataFrame() df['string'] = 'foo' @@ -935,27 +935,27 @@ def col(t,column): assert(col('f', 'index').is_indexed is True) assert(col('f', 'string').is_indexed is True) assert(col('f', 'string2').is_indexed is True) - + # specify index=columns store.append( 'f2', df, index=['string'], data_columns=['string', 'string2']) assert(col('f2', 'index').is_indexed is False) assert(col('f2', 'string').is_indexed is True) assert(col('f2', 'string2').is_indexed is False) - + # try to index a non-table _maybe_remove(store, 'f2') store.put('f2', df) self.assertRaises(TypeError, store.create_table_index, 'f2') - + # try to change the version supports flag from pandas.io import pytables pytables._table_supports_index = False self.assertRaises(Exception, store.create_table_index, 'f') - + # test out some versions original = tables.__version__ - + for v in ['2.2', '2.2b']: pytables._table_mod = None pytables._table_supports_index = False @@ -1018,7 +1018,7 @@ def f(chunksize): start_time = time.time() print ("big_table2 frame [chunk->%s]" % c) rows = f(c) - print ("big_table2 frame [rows->%s,chunk->%s] -> %5.2f" + print ("big_table2 frame [rows->%s,chunk->%s] -> %5.2f" % (rows, c, time.time() - start_time)) def test_big_put_frame(self): @@ -1034,7 +1034,7 @@ def test_big_put_frame(self): for x in xrange(20): df['datetime%03d' % x] = datetime.datetime(2001, 1, 2, 0, 0) - print ("\nbig_put frame (creation of df) [rows->%s] -> %5.2f" + print ("\nbig_put frame (creation of df) [rows->%s] -> %5.2f" % (len(df.index), time.time() - start_time)) with ensure_clean(self.path, mode='w') as store: @@ -1043,7 +1043,7 @@ def test_big_put_frame(self): store.put('df', df) print (df.get_dtype_counts()) - print ("big_put frame [shape->%s] -> %5.2f" + print ("big_put frame [shape->%s] -> %5.2f" % (df.shape, time.time() - start_time)) def test_big_table_panel(self): @@ -1075,7 +1075,7 @@ def test_append_diff_item_order(self): wp = tm.makePanel() wp1 = wp.ix[:, :10, :] wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :] - + with ensure_clean(self.path) as store: store.put('panel', wp1, table=True) self.assertRaises(ValueError, store.put, 'panel', wp2, @@ -1158,7 +1158,7 @@ def test_append_raise(self): df['invalid2'] = [['a']] * len(df) df['invalid3'] = [['a']] * len(df) self.assertRaises(TypeError, store.append,'df',df) - + # datetime with embedded nans as object df = tm.makeDataFrame() s = Series(datetime.datetime(2001,1,2),index=df.index,dtype=object) @@ -1176,10 +1176,10 @@ def test_append_raise(self): # appending an incompatbile table df = tm.makeDataFrame() store.append('df',df) - + df['foo'] = 'foo' self.assertRaises(ValueError, store.append,'df',df) - + def test_table_index_incompatible_dtypes(self): df1 = DataFrame({'a': [1, 2, 3]}) df2 = DataFrame({'a': [4, 5, 6]}, @@ -1196,11 +1196,11 @@ def test_table_values_dtypes_roundtrip(self): df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8') store.append('df_f8', df1) assert df1.dtypes == store['df_f8'].dtypes - + df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8') store.append('df_i8', df2) assert df2.dtypes == store['df_i8'].dtypes - + # incompatible dtype self.assertRaises(ValueError, store.append, 'df_i8', df1) @@ -1209,9 +1209,9 @@ def test_table_values_dtypes_roundtrip(self): store.append('df_f4', df1) assert df1.dtypes == store['df_f4'].dtypes assert df1.dtypes[0] == 'float32' - + # check with mixed dtypes - df1 = DataFrame(dict([ (c,Series(np.random.randn(5),dtype=c)) for c in + df1 = DataFrame(dict([ (c,Series(np.random.randn(5),dtype=c)) for c in ['float32','float64','int32','int64','int16','int8'] ])) df1['string'] = 'foo' df1['float322'] = 1. @@ -1289,7 +1289,7 @@ def test_unimplemented_dtypes_table_columns(self): # py3 ok for unicode if not py3compat.PY3: l.append(('unicode', u'\u03c3')) - + ### currently not supported dtypes #### for n, f in l: df = tm.makeDataFrame() @@ -1310,25 +1310,28 @@ def test_unimplemented_dtypes_table_columns(self): def test_table_append_with_timezones(self): - with ensure_clean(self.path) as store: - - def compare(a,b): - tm.assert_frame_equal(a,b) - - # compare the zones on each element - for c in a.columns: - for i in a.index: - a_e = a[c][i] - b_e = b[c][i] - if not (a_e == b_e and a_e.tz == b_e.tz): - raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e,b_e)) + from datetime import timedelta + + def compare(a,b): + tm.assert_frame_equal(a,b) - from datetime import timedelta + # compare the zones on each element + for c in a.columns: + for i in a.index: + a_e = a[c][i] + b_e = b[c][i] + if not (a_e == b_e and a_e.tz == b_e.tz): + raise AssertionError("invalid tz comparsion [%s] [%s]" % (a_e,b_e)) + + # as columns + with ensure_clean(self.path) as store: _maybe_remove(store, 'df_tz') df = DataFrame(dict(A = [ Timestamp('20130102 2:00:00',tz='US/Eastern') + timedelta(hours=1)*i for i in range(5) ])) store.append('df_tz',df,data_columns=['A']) - compare(store['df_tz'],df) + result = store['df_tz'] + compare(result,df) + assert_frame_equal(result,df) # select with tz aware compare(store.select('df_tz',where=Term('A','>=',df.A[3])),df[df.A>=df.A[3]]) @@ -1336,7 +1339,9 @@ def compare(a,b): _maybe_remove(store, 'df_tz') df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130103',tz='US/Eastern')),index=range(5)) store.append('df_tz',df) - compare(store['df_tz'],df) + result = store['df_tz'] + compare(result,df) + assert_frame_equal(result,df) _maybe_remove(store, 'df_tz') df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='EET')),index=range(5)) @@ -1345,12 +1350,30 @@ def compare(a,b): # this is ok _maybe_remove(store, 'df_tz') store.append('df_tz',df,data_columns=['A','B']) - compare(store['df_tz'],df) + result = store['df_tz'] + compare(result,df) + assert_frame_equal(result,df) # can't append with diff timezone df = DataFrame(dict(A = Timestamp('20130102',tz='US/Eastern'), B = Timestamp('20130102',tz='CET')),index=range(5)) self.assertRaises(ValueError, store.append, 'df_tz', df) + # as index + with ensure_clean(self.path) as store: + + # GH 4098 example + df = DataFrame(dict(A = Series(xrange(3), index=date_range('2000-1-1',periods=3,freq='H', tz='US/Eastern')))) + + _maybe_remove(store, 'df') + store.put('df',df) + result = store.select('df') + assert_frame_equal(result,df) + + _maybe_remove(store, 'df') + store.append('df',df) + result = store.select('df') + assert_frame_equal(result,df) + def test_remove(self): with ensure_clean(self.path) as store: @@ -1362,7 +1385,7 @@ def test_remove(self): _maybe_remove(store, 'a') self.assertEquals(len(store), 1) tm.assert_frame_equal(df, store['b']) - + _maybe_remove(store, 'b') self.assertEquals(len(store), 0) @@ -1375,12 +1398,12 @@ def test_remove(self): _maybe_remove(store, 'foo') _maybe_remove(store, 'b/foo') self.assertEquals(len(store), 1) - + store['a'] = ts store['b/foo'] = df _maybe_remove(store, 'b') self.assertEquals(len(store), 1) - + # __delitem__ store['a'] = ts store['b'] = df @@ -1395,7 +1418,7 @@ def test_remove_where(self): # non-existance crit1 = Term('index', '>', 'foo') self.assertRaises(KeyError, store.remove, 'a', [crit1]) - + # try to remove non-table (with crit) # non-table ok (where = None) wp = tm.makePanel() @@ -1404,7 +1427,7 @@ def test_remove_where(self): rs = store.select('wp') expected = wp.reindex(minor_axis=['B', 'C']) tm.assert_panel_equal(rs, expected) - + # empty where _maybe_remove(store, 'wp') store.put('wp', wp, table=True) @@ -1439,7 +1462,7 @@ def test_remove_crit(self): result = store.select('wp3') expected = wp.reindex(major_axis=wp.major_axis - date4) tm.assert_panel_equal(result, expected) - + # upper half store.put('wp', wp, table=True) date = wp.major_axis[len(wp.major_axis) // 2] @@ -1447,16 +1470,16 @@ def test_remove_crit(self): crit1 = Term('major_axis', '>', date) crit2 = Term('minor_axis', ['A', 'D']) n = store.remove('wp', where=[crit1]) - + assert(n == 56) - + n = store.remove('wp', where=[crit2]) assert(n == 32) - + result = store['wp'] expected = wp.truncate(after=date).reindex(minor=['B', 'C']) tm.assert_panel_equal(result, expected) - + # individual row elements store.put('wp2', wp, table=True) @@ -1466,7 +1489,7 @@ def test_remove_crit(self): result = store.select('wp2') expected = wp.reindex(major_axis=wp.major_axis - date1) tm.assert_panel_equal(result, expected) - + date2 = wp.major_axis[5] crit2 = Term('major_axis', date2) store.remove('wp2', where=[crit2]) @@ -1474,7 +1497,7 @@ def test_remove_crit(self): expected = wp.reindex( major_axis=wp.major_axis - date1 - Index([date2])) tm.assert_panel_equal(result, expected) - + date3 = [wp.major_axis[7], wp.major_axis[9]] crit3 = Term('major_axis', date3) store.remove('wp2', where=[crit3]) @@ -1482,7 +1505,7 @@ def test_remove_crit(self): expected = wp.reindex( major_axis=wp.major_axis - date1 - Index([date2]) - Index(date3)) tm.assert_panel_equal(result, expected) - + # corners store.put('wp4', wp, table=True) n = store.remove( @@ -1498,7 +1521,7 @@ def test_terms(self): p4d = tm.makePanel4D() store.put('wp', wp, table=True) store.put('p4d', p4d, table=True) - + # some invalid terms terms = [ ['minor', ['A', 'B']], @@ -1513,13 +1536,13 @@ def test_terms(self): self.assertRaises(Exception, Term.__init__, 'index') self.assertRaises(Exception, Term.__init__, 'index', '==') self.assertRaises(Exception, Term.__init__, 'index', '>', 5) - + # panel result = store.select('wp', [Term( 'major_axis<20000108'), Term('minor_axis', '=', ['A', 'B'])]) expected = wp.truncate(after='20000108').reindex(minor=['A', 'B']) tm.assert_panel_equal(result, expected) - + # p4d result = store.select('p4d', [Term('major_axis<20000108'), Term('minor_axis', '=', ['A', 'B']), @@ -1527,7 +1550,7 @@ def test_terms(self): expected = p4d.truncate(after='20000108').reindex( minor=['A', 'B'], items=['ItemA', 'ItemB']) tm.assert_panel4d_equal(result, expected) - + # valid terms terms = [ dict(field='major_axis', op='>', value='20121114'), @@ -1548,13 +1571,13 @@ def test_terms(self): for t in terms: store.select('wp', t) store.select('p4d', t) - + # valid for p4d only terms = [ (('labels', '=', ['l1', 'l2']),), Term('labels', '=', ['l1', 'l2']), ] - + for t in terms: store.select('p4d', t) @@ -1562,17 +1585,17 @@ def test_series(self): s = tm.makeStringSeries() self._check_roundtrip(s, tm.assert_series_equal) - + ts = tm.makeTimeSeries() self._check_roundtrip(ts, tm.assert_series_equal) - + ts2 = Series(ts.index, Index(ts.index, dtype=object)) self._check_roundtrip(ts2, tm.assert_series_equal) - + ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object), dtype=object)) self._check_roundtrip(ts3, tm.assert_series_equal) - + def test_sparse_series(self): s = tm.makeStringSeries() @@ -1580,11 +1603,11 @@ def test_sparse_series(self): ss = s.to_sparse() self._check_roundtrip(ss, tm.assert_series_equal, check_series_type=True) - + ss2 = s.to_sparse(kind='integer') self._check_roundtrip(ss2, tm.assert_series_equal, check_series_type=True) - + ss3 = s.to_sparse(fill_value=0) self._check_roundtrip(ss3, tm.assert_series_equal, check_series_type=True) @@ -1598,28 +1621,28 @@ def test_sparse_frame(self): self._check_double_roundtrip(ss, tm.assert_frame_equal, check_frame_type=True) - + ss2 = s.to_sparse(kind='integer') self._check_double_roundtrip(ss2, tm.assert_frame_equal, check_frame_type=True) - + ss3 = s.to_sparse(fill_value=0) self._check_double_roundtrip(ss3, tm.assert_frame_equal, check_frame_type=True) - + def test_sparse_panel(self): items = ['x', 'y', 'z'] p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items)) sp = p.to_sparse() - + self._check_double_roundtrip(sp, tm.assert_panel_equal, check_panel_type=True) - + sp2 = p.to_sparse(kind='integer') self._check_double_roundtrip(sp2, tm.assert_panel_equal, check_panel_type=True) - + sp3 = p.to_sparse(fill_value=0) self._check_double_roundtrip(sp3, tm.assert_panel_equal, check_panel_type=True) @@ -1630,7 +1653,7 @@ def test_float_index(self): index = np.random.randn(10) s = Series(np.random.randn(10), index=index) self._check_roundtrip(s, tm.assert_series_equal) - + def test_tuple_index(self): # GH #492 @@ -1641,7 +1664,7 @@ def test_tuple_index(self): warnings.filterwarnings('ignore', category=PerformanceWarning) self._check_roundtrip(DF, tm.assert_frame_equal) warnings.filterwarnings('always', category=PerformanceWarning) - + def test_index_types(self): values = np.random.randn(2) @@ -1652,45 +1675,45 @@ def test_index_types(self): ser = Series(values, [0, 'y']) self._check_roundtrip(ser, func) warnings.filterwarnings('always', category=PerformanceWarning) - + ser = Series(values, [datetime.datetime.today(), 0]) self._check_roundtrip(ser, func) - + ser = Series(values, ['y', 0]) self._check_roundtrip(ser, func) - + warnings.filterwarnings('ignore', category=PerformanceWarning) ser = Series(values, [datetime.date.today(), 'a']) self._check_roundtrip(ser, func) warnings.filterwarnings('always', category=PerformanceWarning) - + warnings.filterwarnings('ignore', category=PerformanceWarning) ser = Series(values, [1.23, 'b']) self._check_roundtrip(ser, func) warnings.filterwarnings('always', category=PerformanceWarning) - + ser = Series(values, [1, 1.53]) self._check_roundtrip(ser, func) - + ser = Series(values, [1, 5]) self._check_roundtrip(ser, func) - + ser = Series(values, [datetime.datetime( 2012, 1, 1), datetime.datetime(2012, 1, 2)]) self._check_roundtrip(ser, func) - + def test_timeseries_preepoch(self): - + if sys.version_info[0] == 2 and sys.version_info[1] < 7: raise nose.SkipTest - + dr = bdate_range('1/1/1940', '1/1/1960') ts = Series(np.random.randn(len(dr)), index=dr) try: self._check_roundtrip(ts, tm.assert_series_equal) except OverflowError: raise nose.SkipTest('known failer on some windows platforms') - + def test_frame(self): df = tm.makeDataFrame() @@ -1701,24 +1724,24 @@ def test_frame(self): self._check_roundtrip_table(df, tm.assert_frame_equal) self._check_roundtrip(df, tm.assert_frame_equal) - + self._check_roundtrip_table(df, tm.assert_frame_equal, compression=True) self._check_roundtrip(df, tm.assert_frame_equal, compression=True) - + tdf = tm.makeTimeDataFrame() self._check_roundtrip(tdf, tm.assert_frame_equal) self._check_roundtrip(tdf, tm.assert_frame_equal, compression=True) - + with ensure_clean(self.path) as store: # not consolidated df['foo'] = np.random.randn(len(df)) store['df'] = df recons = store['df'] self.assert_(recons._data.is_consolidated()) - + # empty self._check_roundtrip(df[:0], tm.assert_frame_equal) @@ -1734,7 +1757,7 @@ def test_empty_series_frame(self): self._check_roundtrip(df0, tm.assert_frame_equal) self._check_roundtrip(df1, tm.assert_frame_equal) self._check_roundtrip(df2, tm.assert_frame_equal) - + def test_can_serialize_dates(self): rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')] @@ -1822,12 +1845,12 @@ def _make_one(): tm.assert_frame_equal(store['obj'], df1) store['obj'] = df2 tm.assert_frame_equal(store['obj'], df2) - + # check that can store Series of all of these types self._check_roundtrip(df1['obj1'], tm.assert_series_equal) self._check_roundtrip(df1['bool1'], tm.assert_series_equal) self._check_roundtrip(df1['int1'], tm.assert_series_equal) - + # try with compression self._check_roundtrip(df1['obj1'], tm.assert_series_equal, compression=True) @@ -1918,7 +1941,7 @@ def test_select(self): _maybe_remove(store, 'wp') store.put('wp', wp, table=True) store.select('wp') - + # non-table ok (where = None) _maybe_remove(store, 'wp') store.put('wp2', wp, table=False) @@ -1928,18 +1951,18 @@ def test_select(self): wp = Panel( np.random.randn(100, 100, 100), items=['Item%03d' % i for i in xrange(100)], major_axis=date_range('1/1/2000', periods=100), minor_axis=['E%03d' % i for i in xrange(100)]) - + _maybe_remove(store, 'wp') store.append('wp', wp) items = ['Item%03d' % i for i in xrange(80)] result = store.select('wp', Term('items', items)) expected = wp.reindex(items=items) tm.assert_panel_equal(expected, result) - + # selectin non-table with a where # self.assertRaises(ValueError, store.select, # 'wp2', ('column', ['A', 'D'])) - + # select with columns= df = tm.makeTimeDataFrame() _maybe_remove(store, 'df') @@ -1947,7 +1970,7 @@ def test_select(self): result = store.select('df', columns=['A', 'B']) expected = df.reindex(columns=['A', 'B']) tm.assert_frame_equal(expected, result) - + # equivalentsly result = store.select('df', [('columns', ['A', 'B'])]) expected = df.reindex(columns=['A', 'B']) @@ -1966,14 +1989,14 @@ def test_select(self): result = store.select('df', ['A > 0'], columns=['A', 'B']) expected = df[df.A > 0].reindex(columns=['A', 'B']) tm.assert_frame_equal(expected, result) - + # with a data column, but different columns _maybe_remove(store, 'df') store.append('df', df, data_columns=['A']) result = store.select('df', ['A > 0'], columns=['C', 'D']) expected = df[df.A > 0].reindex(columns=['C', 'D']) tm.assert_frame_equal(expected, result) - + def test_select_dtypes(self): with ensure_clean(self.path) as store: @@ -2027,7 +2050,7 @@ def test_select_with_many_inputs(self): with ensure_clean(self.path) as store: - df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), + df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300), A=np.random.randn(300), B=range(300), users = ['a']*50 + ['b']*50 + ['c']*100 + ['a%03d' % i for i in range(100)])) @@ -2064,7 +2087,7 @@ def test_select_with_many_inputs(self): self.assert_(len(result) == 100) def test_select_iterator(self): - + # single table with ensure_clean(self.path) as store: @@ -2134,7 +2157,7 @@ def test_select_iterator(self): results.append(s) result = concat(results) tm.assert_frame_equal(expected, result) - + # where selection #expected = store.select_as_multiple( # ['df1', 'df2'], where= Term('A>0'), selector='df1') @@ -2148,7 +2171,7 @@ def test_select_iterator(self): def test_retain_index_attributes(self): # GH 3499, losing frequency info on index recreation - df = DataFrame(dict(A = Series(xrange(3), + df = DataFrame(dict(A = Series(xrange(3), index=date_range('2000-1-1',periods=3,freq='H')))) with ensure_clean(self.path) as store: @@ -2165,7 +2188,7 @@ def test_retain_index_attributes(self): # try to append a table with a different frequency warnings.filterwarnings('ignore', category=AttributeConflictWarning) - df2 = DataFrame(dict(A = Series(xrange(3), + df2 = DataFrame(dict(A = Series(xrange(3), index=date_range('2002-1-1',periods=3,freq='D')))) store.append('data',df2) warnings.filterwarnings('always', category=AttributeConflictWarning) @@ -2174,10 +2197,10 @@ def test_retain_index_attributes(self): # this is ok _maybe_remove(store,'df2') - df2 = DataFrame(dict(A = Series(xrange(3), + df2 = DataFrame(dict(A = Series(xrange(3), index=[Timestamp('20010101'),Timestamp('20010102'),Timestamp('20020101')]))) store.append('df2',df2) - df3 = DataFrame(dict(A = Series(xrange(3),index=date_range('2002-1-1',periods=3,freq='D')))) + df3 = DataFrame(dict(A = Series(xrange(3),index=date_range('2002-1-1',periods=3,freq='D')))) store.append('df2',df3) def test_retain_index_attributes2(self): @@ -2212,19 +2235,19 @@ def test_panel_select(self): with ensure_clean(self.path) as store: store.put('wp', wp, table=True) date = wp.major_axis[len(wp.major_axis) // 2] - + crit1 = ('major_axis', '>=', date) crit2 = ('minor_axis', '=', ['A', 'D']) result = store.select('wp', [crit1, crit2]) expected = wp.truncate(before=date).reindex(minor=['A', 'D']) tm.assert_panel_equal(result, expected) - + result = store.select( 'wp', ['major_axis>=20000124', ('minor_axis', '=', ['A', 'B'])]) expected = wp.truncate(before='20000124').reindex(minor=['A', 'B']) tm.assert_panel_equal(result, expected) - + def test_frame_select(self): df = tm.makeTimeDataFrame() @@ -2236,26 +2259,26 @@ def test_frame_select(self): crit1 = ('index', '>=', date) crit2 = ('columns', ['A', 'D']) crit3 = ('columns', 'A') - + result = store.select('frame', [crit1, crit2]) expected = df.ix[date:, ['A', 'D']] tm.assert_frame_equal(result, expected) - + result = store.select('frame', [crit3]) expected = df.ix[:, ['A']] tm.assert_frame_equal(result, expected) - + # invalid terms df = tm.makeTimeDataFrame() store.append('df_time', df) self.assertRaises( ValueError, store.select, 'df_time', [Term("index>0")]) - + # can't select if not written as table # store['frame'] = df # self.assertRaises(ValueError, store.select, # 'frame', [crit1, crit2]) - + def test_string_select(self): # GH 2973 @@ -2312,7 +2335,7 @@ def test_read_column(self): with ensure_clean(self.path) as store: _maybe_remove(store, 'df') store.append('df', df) - + # error self.assertRaises(KeyError, store.select_column, 'df', 'foo') @@ -2335,7 +2358,7 @@ def f(): store.append('df2', df2, data_columns=['string']) result = store.select_column('df2', 'string') tm.assert_almost_equal(result.values, df2['string'].values) - + # a data column with NaNs, result excludes the NaNs df3 = df.copy() df3['string'] = 'foo' @@ -2351,14 +2374,14 @@ def test_coordinates(self): _maybe_remove(store, 'df') store.append('df', df) - + # all c = store.select_as_coordinates('df') assert((c.values == np.arange(len(df.index))).all() == True) - + # get coordinates back & test vs frame _maybe_remove(store, 'df') - + df = DataFrame(dict(A=range(5), B=range(5))) store.append('df', df) c = store.select_as_coordinates('df', ['index<3']) @@ -2366,13 +2389,13 @@ def test_coordinates(self): result = store.select('df', where=c) expected = df.ix[0:2, :] tm.assert_frame_equal(result, expected) - + c = store.select_as_coordinates('df', ['index>=3', 'index<=4']) assert((c.values == np.arange(2) + 3).all() == True) result = store.select('df', where=c) expected = df.ix[3:4, :] tm.assert_frame_equal(result, expected) - + # multiple tables _maybe_remove(store, 'df1') _maybe_remove(store, 'df2') @@ -2380,12 +2403,12 @@ def test_coordinates(self): df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x) store.append('df1', df1, data_columns=['A', 'B']) store.append('df2', df2) - + c = store.select_as_coordinates('df1', ['A>0', 'B>0']) df1_result = store.select('df1', c) df2_result = store.select('df2', c) result = concat([df1_result, df2_result], axis=1) - + expected = concat([df1, df2], axis=1) expected = expected[(expected.A > 0) & (expected.B > 0)] tm.assert_frame_equal(result, expected) @@ -2399,13 +2422,13 @@ def test_append_to_multiple(self): with ensure_clean(self.path) as store: # exceptions - self.assertRaises(ValueError, store.append_to_multiple, + self.assertRaises(ValueError, store.append_to_multiple, {'df1': ['A', 'B'], 'df2': None}, df, selector='df3') self.assertRaises(ValueError, store.append_to_multiple, {'df1': None, 'df2': None}, df, selector='df3') self.assertRaises( ValueError, store.append_to_multiple, 'df1', df, 'df1') - + # regular operation store.append_to_multiple( {'df1': ['A', 'B'], 'df2': None}, df, selector='df1') @@ -2413,7 +2436,7 @@ def test_append_to_multiple(self): ['df1', 'df2'], where=['A>0', 'B>0'], selector='df1') expected = df[(df.A > 0) & (df.B > 0)] tm.assert_frame_equal(result, expected) - + def test_select_as_multiple(self): df1 = tm.makeTimeDataFrame() @@ -2440,7 +2463,7 @@ def test_select_as_multiple(self): ['df3'], where=['A>0', 'B>0'], selector='df1') self.assertRaises(ValueError, store.select_as_multiple, ['df1','df2'], where=['A>0', 'B>0'], selector='df4') - + # default select result = store.select('df1', ['A>0', 'B>0']) expected = store.select_as_multiple( @@ -2449,14 +2472,14 @@ def test_select_as_multiple(self): expected = store.select_as_multiple( 'df1', where=['A>0', 'B>0'], selector='df1') tm.assert_frame_equal(result, expected) - + # multiple result = store.select_as_multiple( ['df1', 'df2'], where=['A>0', 'B>0'], selector='df1') expected = concat([df1, df2], axis=1) expected = expected[(expected.A > 0) & (expected.B > 0)] tm.assert_frame_equal(result, expected) - + # multiple (diff selector) try: result = store.select_as_multiple(['df1', 'df2'], where=[Term( @@ -2473,7 +2496,7 @@ def test_select_as_multiple(self): # test excpection for diff rows store.append('df3', tm.makeTimeDataFrame(nper=50)) - self.assertRaises(ValueError, store.select_as_multiple, + self.assertRaises(ValueError, store.select_as_multiple, ['df1','df3'], where=['A>0', 'B>0'], selector='df1') def test_start_stop(self): @@ -2482,12 +2505,12 @@ def test_start_stop(self): df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20))) store.append('df', df) - + result = store.select( 'df', [Term("columns", "=", ["A"])], start=0, stop=5) expected = df.ix[0:4, ['A']] tm.assert_frame_equal(result, expected) - + # out of range result = store.select( 'df', [Term("columns", "=", ["A"])], start=30, stop=40) @@ -2506,7 +2529,7 @@ def test_select_filter_corner(self): crit = Term('columns', df.columns[:75]) result = store.select('frame', [crit]) tm.assert_frame_equal(result, df.ix[:, df.columns[:75]]) - + def _check_roundtrip(self, obj, comparator, compression=False, **kwargs): options = {} @@ -2576,7 +2599,7 @@ def test_legacy_table_read(self): store.select('df1') store.select('df2') store.select('wp1') - + # force the frame store.select('df2', typ='legacy_frame') @@ -2588,7 +2611,7 @@ def test_legacy_table_read(self): df2 = store.select('df2') store.select('df2', Term('index', '>', df2.index[2])) warnings.filterwarnings('always', category=IncompatibilityWarning) - + finally: safe_close(store) @@ -2622,7 +2645,7 @@ def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs): if f is None: f = tm.get_data_path('legacy_hdf/legacy_0.10.h5') - + store = HDFStore(f, 'r') if new_f is None: @@ -2661,7 +2684,7 @@ def do_copy(f = None, new_f = None, keys = None, propindexes = True, **kwargs): # new table df = tm.makeDataFrame() - + try: st = HDFStore(self.path) st.append('df', df, data_columns = ['A']) @@ -2708,7 +2731,7 @@ def test_tseries_indices_series(self): ser = Series(np.random.randn(len(idx)), idx) store['a'] = ser result = store['a'] - + assert_series_equal(result, ser) self.assertEquals(type(result.index), type(ser.index)) self.assertEquals(result.index.freq, ser.index.freq) @@ -2717,7 +2740,7 @@ def test_tseries_indices_series(self): ser = Series(np.random.randn(len(idx)), idx) store['a'] = ser result = store['a'] - + assert_series_equal(result, ser) self.assertEquals(type(result.index), type(ser.index)) self.assertEquals(result.index.freq, ser.index.freq) @@ -2729,16 +2752,16 @@ def test_tseries_indices_frame(self): df = DataFrame(np.random.randn(len(idx), 3), index=idx) store['a'] = df result = store['a'] - + assert_frame_equal(result, df) self.assertEquals(type(result.index), type(df.index)) self.assertEquals(result.index.freq, df.index.freq) - + idx = tm.makePeriodIndex(10) df = DataFrame(np.random.randn(len(idx), 3), idx) store['a'] = df result = store['a'] - + assert_frame_equal(result, df) self.assertEquals(type(result.index), type(df.index)) self.assertEquals(result.index.freq, df.index.freq) @@ -2758,7 +2781,7 @@ def test_store_datetime_mixed(self): ts = tm.makeTimeSeries() df['d'] = ts.index[:3] self._check_roundtrip(df, tm.assert_frame_equal) - + # def test_cant_write_multiindex_table(self): # # for now, #1848 # df = DataFrame(np.random.randn(10, 4),
closes #4098
https://api.github.com/repos/pandas-dev/pandas/pulls/4099
2013-07-01T20:09:36Z
2013-07-01T20:39:08Z
2013-07-01T20:39:08Z
2014-06-25T15:42:21Z
CLN: Refactor string special methods
diff --git a/doc/source/release.rst b/doc/source/release.rst index 691c7312dde72..facf753ced9a0 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -175,8 +175,18 @@ pandas 0.12 ``bs4`` + ``html5lib`` when lxml fails to parse. a list of parsers to try until success is also valid - more consistency in the to_datetime return types (give string/array of string inputs) (:issue:`3888`) + - The internal ``pandas`` class hierarchy has changed (slightly). The + previous ``PandasObject`` now is called ``PandasContainer`` and a new + ``PandasObject`` has become the baseclass for ``PandasContainer`` as well + as ``Index``, ``Categorical``, ``GroupBy``, ``SparseList``, and + ``SparseArray`` (+ their base classes). Currently, ``PandasObject`` + provides string methods (from ``StringMixin``). (:issue:`4090`, :issue:`4092`) + - New ``StringMixin`` that, given a ``__unicode__`` method, gets python 2 and + python 3 compatible string methods (``__str__``, ``__bytes__``, and + ``__repr__``). Plus string safety throughout. Now employed in many places + throughout the pandas library. (:issue:`4090`, :issue:`4092`) -**Experimental Feautres** +**Experimental Features** - Added experimental ``CustomBusinessDay`` class to support ``DateOffsets`` with custom holiday calendars and custom weekmasks. (:issue:`2301`) diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt index 60086e1c49ae7..f8836b4532493 100644 --- a/doc/source/v0.12.0.txt +++ b/doc/source/v0.12.0.txt @@ -8,13 +8,13 @@ enhancements along with a large number of bug fixes. Highlites include a consistent I/O API naming scheme, routines to read html, write multi-indexes to csv files, read & write STATA data files, read & write JSON format -files, Python 3 support for ``HDFStore``, filtering of groupby expressions via ``filter``, and a +files, Python 3 support for ``HDFStore``, filtering of groupby expressions via ``filter``, and a revamped ``replace`` routine that accepts regular expressions. API changes ~~~~~~~~~~~ - - The I/O API is now much more consistent with a set of top level ``reader`` functions + - The I/O API is now much more consistent with a set of top level ``reader`` functions accessed like ``pd.read_csv()`` that generally return a ``pandas`` object. * ``read_csv`` @@ -38,7 +38,7 @@ API changes * ``to_clipboard`` - - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return + - Fix modulo and integer division on Series,DataFrames to act similary to ``float`` dtypes to return ``np.nan`` or ``np.inf`` as appropriate (:issue:`3590`). This correct a numpy bug that treats ``integer`` and ``float`` dtypes differently. @@ -50,15 +50,15 @@ API changes p / p p / 0 - - Add ``squeeze`` keyword to ``groupby`` to allow reduction from + - Add ``squeeze`` keyword to ``groupby`` to allow reduction from DataFrame -> Series if groups are unique. This is a Regression from 0.10.1. - We are reverting back to the prior behavior. This means groupby will return the - same shaped objects whether the groups are unique or not. Revert this issue (:issue:`2893`) + We are reverting back to the prior behavior. This means groupby will return the + same shaped objects whether the groups are unique or not. Revert this issue (:issue:`2893`) with (:issue:`3596`). .. ipython:: python - df2 = DataFrame([{"val1": 1, "val2" : 20}, {"val1":1, "val2": 19}, + df2 = DataFrame([{"val1": 1, "val2" : 20}, {"val1":1, "val2": 19}, {"val1":1, "val2": 27}, {"val1":1, "val2": 12}]) def func(dataf): return dataf["val2"] - dataf["val2"].mean() @@ -96,9 +96,9 @@ API changes and thus you should cast to an appropriate numeric dtype if you need to plot something. - - Add ``colormap`` keyword to DataFrame plotting methods. Accepts either a - matplotlib colormap object (ie, matplotlib.cm.jet) or a string name of such - an object (ie, 'jet'). The colormap is sampled to select the color for each + - Add ``colormap`` keyword to DataFrame plotting methods. Accepts either a + matplotlib colormap object (ie, matplotlib.cm.jet) or a string name of such + an object (ie, 'jet'). The colormap is sampled to select the color for each column. Please see :ref:`visualization.colormaps` for more information. (:issue:`3860`) @@ -159,6 +159,18 @@ API changes ``bs4`` + ``html5lib`` when lxml fails to parse. a list of parsers to try until success is also valid + - The internal ``pandas`` class hierarchy has changed (slightly). The + previous ``PandasObject`` now is called ``PandasContainer`` and a new + ``PandasObject`` has become the baseclass for ``PandasContainer`` as well + as ``Index``, ``Categorical``, ``GroupBy``, ``SparseList``, and + ``SparseArray`` (+ their base classes). Currently, ``PandasObject`` + provides string methods (from ``StringMixin``). (:issue:`4090`, :issue:`4092`) + + - New ``StringMixin`` that, given a ``__unicode__`` method, gets python 2 and + python 3 compatible string methods (``__str__``, ``__bytes__``, and + ``__repr__``). Plus string safety throughout. Now employed in many places + throughout the pandas library. (:issue:`4090`, :issue:`4092`) + I/O Enhancements ~~~~~~~~~~~~~~~~ @@ -184,7 +196,7 @@ I/O Enhancements .. warning:: - You may have to install an older version of BeautifulSoup4, + You may have to install an older version of BeautifulSoup4, :ref:`See the installation docs<install.optional_dependencies>` - Added module for reading and writing Stata files: ``pandas.io.stata`` (:issue:`1512`) @@ -203,15 +215,15 @@ I/O Enhancements - The option, ``tupleize_cols`` can now be specified in both ``to_csv`` and ``read_csv``, to provide compatiblity for the pre 0.12 behavior of writing and reading multi-index columns via a list of tuples. The default in - 0.12 is to write lists of tuples and *not* interpret list of tuples as a - multi-index column. + 0.12 is to write lists of tuples and *not* interpret list of tuples as a + multi-index column. Note: The default behavior in 0.12 remains unchanged, but starting with 0.13, - the default *to* write and read multi-index columns will be in the new + the default *to* write and read multi-index columns will be in the new format. (:issue:`3571`, :issue:`1651`, :issue:`3141`) - If an ``index_col`` is not specified (e.g. you don't have an index, or wrote it - with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will + with ``df.to_csv(..., index=False``), then any ``names`` on the columns index will be *lost*. .. ipython:: python @@ -296,8 +308,8 @@ Other Enhancements pd.get_option('a.b') pd.get_option('b.c') - - The ``filter`` method for group objects returns a subset of the original - object. Suppose we want to take only elements that belong to groups with a + - The ``filter`` method for group objects returns a subset of the original + object. Suppose we want to take only elements that belong to groups with a group sum greater than 2. .. ipython:: python @@ -317,7 +329,7 @@ Other Enhancements dff.groupby('B').filter(lambda x: len(x) > 2) Alternatively, instead of dropping the offending groups, we can return a - like-indexed objects where the groups that do not pass the filter are + like-indexed objects where the groups that do not pass the filter are filled with NaNs. .. ipython:: python @@ -333,9 +345,9 @@ Experimental Features - Added experimental ``CustomBusinessDay`` class to support ``DateOffsets`` with custom holiday calendars and custom weekmasks. (:issue:`2301`) - + .. note:: - + This uses the ``numpy.busdaycalendar`` API introduced in Numpy 1.7 and therefore requires Numpy 1.7.0 or newer. @@ -416,7 +428,7 @@ Bug Fixes - Extend ``reindex`` to correctly deal with non-unique indices (:issue:`3679`) - ``DataFrame.itertuples()`` now works with frames with duplicate column names (:issue:`3873`) - - Bug in non-unique indexing via ``iloc`` (:issue:`4017`); added ``takeable`` argument to + - Bug in non-unique indexing via ``iloc`` (:issue:`4017`); added ``takeable`` argument to ``reindex`` for location-based taking - ``DataFrame.from_records`` did not accept empty recarrays (:issue:`3682`) diff --git a/pandas/core/base.py b/pandas/core/base.py new file mode 100644 index 0000000000000..6122e78fa8bce --- /dev/null +++ b/pandas/core/base.py @@ -0,0 +1,58 @@ +""" +Base class(es) for all pandas objects. +""" +from pandas.util import py3compat + +class StringMixin(object): + """implements string methods so long as object defines a `__unicode__` method. + Handles Python2/3 compatibility transparently.""" + # side note - this could be made into a metaclass if more than one object nees + def __str__(self): + """ + Return a string representation for a particular object. + + Invoked by str(obj) in both py2/py3. + Yields Bytestring in Py2, Unicode String in py3. + """ + + if py3compat.PY3: + return self.__unicode__() + return self.__bytes__() + + def __bytes__(self): + """ + Return a string representation for a particular object. + + Invoked by bytes(obj) in py3 only. + Yields a bytestring in both py2/py3. + """ + from pandas.core.config import get_option + + encoding = get_option("display.encoding") + return self.__unicode__().encode(encoding, 'replace') + + def __repr__(self): + """ + Return a string representation for a particular object. + + Yields Bytestring in Py2, Unicode String in py3. + """ + return str(self) + +class PandasObject(StringMixin): + """baseclass for various pandas objects""" + + @property + def _constructor(self): + """class constructor (for this class it's just `__class__`""" + return self.__class__ + + def __unicode__(self): + """ + Return a string representation for a particular object. + + Invoked by unicode(obj) in py2 only. Yields a Unicode String in both + py2/py3. + """ + # Should be overwritten by base classes + return object.__repr__(self) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 916bb2deb417e..b25a027adedd9 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -3,6 +3,7 @@ import numpy as np from pandas.core.algorithms import factorize +from pandas.core.base import PandasObject from pandas.core.index import Index import pandas.core.common as com from pandas.core.frame import DataFrame @@ -25,8 +26,7 @@ def f(self, other): return f - -class Categorical(object): +class Categorical(PandasObject): """ Represents a categorical variable in classic R / S-plus fashion @@ -134,9 +134,9 @@ def __array__(self, dtype=None): def __len__(self): return len(self.labels) - def __repr__(self): + def __unicode__(self): temp = 'Categorical: %s\n%s\n%s' - values = np.asarray(self) + values = com.pprint_thing(np.asarray(self)) levheader = 'Levels (%d): ' % len(self.levels) levstring = np.array_repr(self.levels, max_line_width=60) @@ -145,9 +145,9 @@ def __repr__(self): lines = levstring.split('\n') levstring = '\n'.join([lines[0]] + [indent + x.lstrip() for x in lines[1:]]) + name = '' if self.name is None else self.name + return temp % (name, values, levheader + levstring) - return temp % ('' if self.name is None else self.name, - repr(values), levheader + levstring) def __getitem__(self, key): if isinstance(key, (int, np.integer)): diff --git a/pandas/core/common.py b/pandas/core/common.py index 96c567cbb6348..ddacb98a2ddf3 100644 --- a/pandas/core/common.py +++ b/pandas/core/common.py @@ -64,10 +64,10 @@ def _isnull_new(obj): if lib.isscalar(obj): return lib.checknull(obj) - from pandas.core.generic import PandasObject + from pandas.core.generic import PandasContainer if isinstance(obj, np.ndarray): return _isnull_ndarraylike(obj) - elif isinstance(obj, PandasObject): + elif isinstance(obj, PandasContainer): # TODO: optimize for DataFrame, etc. return obj.apply(isnull) elif isinstance(obj, list) or hasattr(obj, '__array__'): @@ -91,10 +91,10 @@ def _isnull_old(obj): if lib.isscalar(obj): return lib.checknull_old(obj) - from pandas.core.generic import PandasObject + from pandas.core.generic import PandasContainer if isinstance(obj, np.ndarray): return _isnull_ndarraylike_old(obj) - elif isinstance(obj, PandasObject): + elif isinstance(obj, PandasContainer): # TODO: optimize for DataFrame, etc. return obj.apply(_isnull_old) elif isinstance(obj, list) or hasattr(obj, '__array__'): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 9c5108f747e44..5fe2d60993f2c 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -584,10 +584,6 @@ def _verbose_info(self, value): def axes(self): return [self.index, self.columns] - @property - def _constructor(self): - return self.__class__ - @property def shape(self): return (len(self.index), len(self.columns)) @@ -653,28 +649,6 @@ def _repr_fits_horizontal_(self,ignore_width=False): return repr_width < width - def __str__(self): - """ - Return a string representation for a particular DataFrame - - Invoked by str(df) in both py2/py3. - Yields Bytestring in Py2, Unicode String in py3. - """ - - if py3compat.PY3: - return self.__unicode__() - return self.__bytes__() - - def __bytes__(self): - """ - Return a string representation for a particular DataFrame - - Invoked by bytes(df) in py3 only. - Yields a bytestring in both py2/py3. - """ - encoding = com.get_option("display.encoding") - return self.__unicode__().encode(encoding, 'replace') - def __unicode__(self): """ Return a string representation for a particular DataFrame @@ -714,14 +688,6 @@ def __unicode__(self): return value - def __repr__(self): - """ - Return a string representation for a particular DataFrame - - Yields Bytestring in Py2, Unicode String in py3. - """ - return str(self) - def _repr_html_(self): """ Return a html representation for a particular DataFrame. diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c41b02fdd7b22..6be5f456b50e6 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1,20 +1,21 @@ # pylint: disable=W0231,E1101 import numpy as np +import pandas.lib as lib +from pandas.core.base import PandasObject from pandas.core.index import MultiIndex import pandas.core.indexing as indexing from pandas.core.indexing import _maybe_convert_indices from pandas.tseries.index import DatetimeIndex import pandas.core.common as com -import pandas.lib as lib class PandasError(Exception): pass -class PandasObject(object): +class PandasContainer(PandasObject): _AXIS_NUMBERS = { 'index': 0, @@ -52,6 +53,12 @@ def __hash__(self): raise TypeError('{0!r} objects are mutable, thus they cannot be' ' hashed'.format(self.__class__.__name__)) + def __unicode__(self): + # unicode representation based upon iterating over self + # (since, by definition, `PandasContainers` are iterable) + prepr = '[%s]' % ','.join(map(com.pprint_thing, self)) + return '%s(%s)' % (self.__class__.__name__, prepr) + #---------------------------------------------------------------------- # Axis name business @@ -578,9 +585,10 @@ def to_json(self, path_or_buf=None, orient=None, date_format='epoch', # install the indexerse for _name, _indexer in indexing.get_indexers_list(): - PandasObject._create_indexer(_name,_indexer) + PandasContainer._create_indexer(_name,_indexer) + -class NDFrame(PandasObject): +class NDFrame(PandasContainer): """ N-dimensional analogue of DataFrame. Store multi-dimensional in a size-mutable, labeled data structure @@ -625,17 +633,10 @@ def astype(self, dtype, copy = True, raise_on_error = True): mgr = self._data.astype(dtype, copy = copy, raise_on_error = raise_on_error) return self._constructor(mgr) - @property - def _constructor(self): - return NDFrame - @property def axes(self): return self._data.axes - def __repr__(self): - return 'NDFrame' - @property def values(self): return self._data.as_matrix() diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 9bd7923f6ec14..cc0a2b7589bb6 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -2,6 +2,7 @@ import types import numpy as np +from pandas.core.base import PandasObject from pandas.core.categorical import Categorical from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame @@ -100,7 +101,7 @@ def _last(x): return _last(x) -class GroupBy(object): +class GroupBy(PandasObject): """ Class for grouping and aggregating relational data. See aggregate, transform, and apply functions on this object. @@ -201,6 +202,10 @@ def __init__(self, obj, keys=None, axis=0, level=None, def __len__(self): return len(self.indices) + def __unicode__(self): + # TODO: Better unicode/repr for GroupBy object + return object.__repr__(self) + @property def groups(self): return self.grouper.groups diff --git a/pandas/core/index.py b/pandas/core/index.py index 43b172c6ecde9..a3aa0804bcfe2 100644 --- a/pandas/core/index.py +++ b/pandas/core/index.py @@ -9,6 +9,7 @@ import pandas.algos as _algos import pandas.index as _index from pandas.lib import Timestamp +from pandas.core.base import PandasObject from pandas.util.decorators import cache_readonly from pandas.core.common import isnull @@ -47,7 +48,7 @@ def _shouldbe_timestamp(obj): or tslib.is_timestamp_array(obj)) -class Index(np.ndarray): +class Index(PandasObject, np.ndarray): """ Immutable ndarray implementing an ordered, sliceable set. The basic object storing axis labels for all pandas objects @@ -142,28 +143,6 @@ def __array_finalize__(self, obj): def _shallow_copy(self): return self.view() - def __str__(self): - """ - Return a string representation for a particular Index - - Invoked by str(df) in both py2/py3. - Yields Bytestring in Py2, Unicode String in py3. - """ - - if py3compat.PY3: - return self.__unicode__() - return self.__bytes__() - - def __bytes__(self): - """ - Return a string representation for a particular Index - - Invoked by bytes(df) in py3 only. - Yields a bytestring in both py2/py3. - """ - encoding = com.get_option("display.encoding") - return self.__unicode__().encode(encoding, 'replace') - def __unicode__(self): """ Return a string representation for a particular Index @@ -173,14 +152,6 @@ def __unicode__(self): prepr = com.pprint_thing(self, escape_chars=('\t', '\r', '\n'),quote_strings=True) return '%s(%s, dtype=%s)' % (type(self).__name__, prepr, self.dtype) - def __repr__(self): - """ - Return a string representation for a particular Index - - Yields Bytestring in Py2, Unicode String in py3. - """ - return str(self) - def to_series(self): """ return a series with both index and values equal to the index keys @@ -237,10 +208,6 @@ def _set_names(self, values): names = property(fset=_set_names, fget=_get_names) - @property - def _constructor(self): - return Index - @property def _has_complex_internals(self): # to disable groupby tricks in MultiIndex @@ -1408,10 +1375,6 @@ def __new__(cls, data, dtype=None, copy=False, name=None): def inferred_type(self): return 'integer' - @property - def _constructor(self): - return Int64Index - @property def asi8(self): # do not cache or you'll create a memory leak @@ -1531,28 +1494,6 @@ def _array_values(self): def dtype(self): return np.dtype('O') - def __str__(self): - """ - Return a string representation for a particular Index - - Invoked by str(df) in both py2/py3. - Yields Bytestring in Py2, Unicode String in py3. - """ - - if py3compat.PY3: - return self.__unicode__() - return self.__bytes__() - - def __bytes__(self): - """ - Return a string representation for a particular Index - - Invoked by bytes(df) in py3 only. - Yields a bytestring in both py2/py3. - """ - encoding = com.get_option("display.encoding") - return self.__unicode__().encode(encoding, 'replace') - def __unicode__(self): """ Return a string representation for a particular Index @@ -1566,14 +1507,6 @@ def __unicode__(self): return output % summary - def __repr__(self): - """ - Return a string representation for a particular Index - - Yields Bytestring in Py2, Unicode String in py3. - """ - return str(self) - def __len__(self): return len(self.labels[0]) diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 7a6a13da302d1..57be20a50f7bc 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -4,6 +4,7 @@ from numpy import nan import numpy as np +from pandas.core.base import PandasObject from pandas.core.common import (_possibly_downcast_to_dtype, isnull, _NS_DTYPE, _TD_DTYPE) @@ -19,7 +20,7 @@ from pandas.util import py3compat -class Block(object): +class Block(PandasObject): """ Canonical n-dimensional unit of homogeneous dtype contained in a pandas data structure @@ -91,14 +92,12 @@ def set_ref_items(self, ref_items, maybe_rename=True): self.items = ref_items.take(self.ref_locs) self.ref_items = ref_items - def __repr__(self): + def __unicode__(self): shape = ' x '.join([com.pprint_thing(s) for s in self.shape]) name = type(self).__name__ result = '%s: %s, %s, dtype %s' % ( name, com.pprint_thing(self.items), shape, self.dtype) - if py3compat.PY3: - return unicode(result) - return com.console_encode(result) + return result def __contains__(self, item): return item in self.items @@ -969,7 +968,7 @@ def make_block(values, items, ref_items, klass=None, fastpath=False, placement=N # TODO: flexible with index=None and/or items=None -class BlockManager(object): +class BlockManager(PandasObject): """ Core internal data structure to implement DataFrame @@ -1213,7 +1212,7 @@ def __setstate__(self, state): def __len__(self): return len(self.items) - def __repr__(self): + def __unicode__(self): output = 'BlockManager' for i, ax in enumerate(self.axes): if i == 0: @@ -1222,7 +1221,7 @@ def __repr__(self): output += '\nAxis %d: %s' % (i, ax) for block in self.blocks: - output += '\n%s' % repr(block) + output += '\n%s' % com.pprint_thing(block) return output @property diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 92f69a7444aab..d33f7144c27b0 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -186,10 +186,6 @@ class Panel(NDFrame): major_axis = lib.AxisProperty(1) minor_axis = lib.AxisProperty(2) - @property - def _constructor(self): - return type(self) - # return the type of the slice constructor _constructor_sliced = DataFrame @@ -466,28 +462,6 @@ def __invert__(self): #---------------------------------------------------------------------- # Magic methods - def __str__(self): - """ - Return a string representation for a particular Panel - - Invoked by str(df) in both py2/py3. - Yields Bytestring in Py2, Unicode String in py3. - """ - - if py3compat.PY3: - return self.__unicode__() - return self.__bytes__() - - def __bytes__(self): - """ - Return a string representation for a particular Panel - - Invoked by bytes(df) in py3 only. - Yields a bytestring in both py2/py3. - """ - encoding = com.get_option("display.encoding") - return self.__unicode__().encode(encoding, 'replace') - def __unicode__(self): """ Return a string representation for a particular Panel @@ -515,14 +489,6 @@ def axis_pretty(a): [class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS]) return output - def __repr__(self): - """ - Return a string representation for a particular Panel - - Yields Bytestring in Py2, Unicode String in py3. - """ - return str(self) - def __iter__(self): return iter(getattr(self, self._info_axis)) diff --git a/pandas/core/series.py b/pandas/core/series.py index 7684acfe85470..5ea029b414fef 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -394,8 +394,7 @@ def f(self, axis=0, dtype=None, out=None, skipna=True, level=None): #---------------------------------------------------------------------- # Series class - -class Series(pa.Array, generic.PandasObject): +class Series(generic.PandasContainer, pa.Array): """ One-dimensional ndarray with axis labels (including time series). Labels need not be unique but must be any hashable type. The object @@ -520,10 +519,6 @@ def __init__(self, data=None, index=None, dtype=None, name=None, copy=False): pass - @property - def _constructor(self): - return Series - @property def _can_hold_na(self): return not is_integer_dtype(self.dtype) @@ -1096,28 +1091,6 @@ def reset_index(self, level=None, drop=False, name=None, inplace=False): return df.reset_index(level=level, drop=drop) - def __str__(self): - """ - Return a string representation for a particular DataFrame - - Invoked by str(df) in both py2/py3. - Yields Bytestring in Py2, Unicode String in py3. - """ - - if py3compat.PY3: - return self.__unicode__() - return self.__bytes__() - - def __bytes__(self): - """ - Return a string representation for a particular DataFrame - - Invoked by bytes(df) in py3 only. - Yields a bytestring in both py2/py3. - """ - encoding = com.get_option("display.encoding") - return self.__unicode__().encode(encoding, 'replace') - def __unicode__(self): """ Return a string representation for a particular DataFrame @@ -1142,14 +1115,6 @@ def __unicode__(self): raise AssertionError() return result - def __repr__(self): - """ - Return a string representation for a particular Series - - Yields Bytestring in Py2, Unicode String in py3. - """ - return str(self) - def _tidy_repr(self, max_vals=20): """ diff --git a/pandas/io/excel.py b/pandas/io/excel.py index 95702847d9c7f..f61db447f2cfc 100644 --- a/pandas/io/excel.py +++ b/pandas/io/excel.py @@ -73,9 +73,6 @@ def __init__(self, path_or_buf, kind=None, **kwds): data = path_or_buf.read() self.book = xlrd.open_workbook(file_contents=data) - def __repr__(self): - return object.__repr__(self) - def parse(self, sheetname, header=0, skiprows=None, skip_footer=0, index_col=None, parse_cols=None, parse_dates=False, date_parser=None, na_values=None, thousands=None, chunksize=None, diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index 5bf309edffa74..fdb86c43b7160 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -17,7 +17,8 @@ from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel from pandas.sparse.array import BlockIndex, IntIndex from pandas.tseries.api import PeriodIndex, DatetimeIndex -from pandas.core.common import adjoin, is_list_like +from pandas.core.base import StringMixin +from pandas.core.common import adjoin, is_list_like, pprint_thing from pandas.core.algorithms import match, unique from pandas.core.categorical import Categorical from pandas.core.common import _asarray_tuplesafe @@ -218,7 +219,7 @@ def read_hdf(path_or_buf, key, **kwargs): # a passed store; user controls open/close f(path_or_buf, False) -class HDFStore(object): +class HDFStore(StringMixin): """ dict-like IO interface for storing pandas objects in PyTables format. @@ -315,8 +316,8 @@ def __contains__(self, key): def __len__(self): return len(self.groups()) - def __repr__(self): - output = '%s\nFile path: %s\n' % (type(self), self._path) + def __unicode__(self): + output = '%s\nFile path: %s\n' % (type(self), pprint_thing(self._path)) if len(self.keys()): keys = [] @@ -326,11 +327,11 @@ def __repr__(self): try: s = self.get_storer(k) if s is not None: - keys.append(str(s.pathname or k)) - values.append(str(s or 'invalid_HDFStore node')) - except (Exception), detail: + keys.append(pprint_thing(s.pathname or k)) + values.append(pprint_thing(s or 'invalid_HDFStore node')) + except Exception as detail: keys.append(k) - values.append("[invalid_HDFStore node: %s]" % str(detail)) + values.append("[invalid_HDFStore node: %s]" % pprint_thing(detail)) output += adjoin(12, keys, values) else: @@ -984,7 +985,7 @@ def get_values(self): self.close() return results -class IndexCol(object): +class IndexCol(StringMixin): """ an index column description class Parameters @@ -1050,10 +1051,9 @@ def set_table(self, table): self.table = table return self - def __repr__(self): - return "name->%s,cname->%s,axis->%s,pos->%s,kind->%s" % (self.name, self.cname, self.axis, self.pos, self.kind) - - __str__ = __repr__ + def __unicode__(self): + temp = tuple(map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind))) + return "name->%s,cname->%s,axis->%s,pos->%s,kind->%s" % temp def __eq__(self, other): """ compare 2 col items """ @@ -1570,7 +1570,7 @@ class GenericDataIndexableCol(DataIndexableCol): def get_attr(self): pass -class Storer(object): +class Storer(StringMixin): """ represent an object in my store facilitate read/write of various types of objects this is an abstract base class @@ -1610,19 +1610,16 @@ def set_version(self): def pandas_type(self): return _ensure_decoded(getattr(self.group._v_attrs, 'pandas_type', None)) - def __repr__(self): - """ return a pretty representatgion of myself """ + def __unicode__(self): + """ return a pretty representation of myself """ self.infer_axes() s = self.shape if s is not None: if isinstance(s, (list,tuple)): - s = "[%s]" % ','.join([ str(x) for x in s ]) + s = "[%s]" % ','.join([pprint_thing(x) for x in s]) return "%-12.12s (shape->%s)" % (self.pandas_type,s) return self.pandas_type - def __str__(self): - return self.__repr__() - def set_object_info(self): """ set my pandas type & version """ self.attrs.pandas_type = self.pandas_kind @@ -3435,7 +3432,7 @@ def _need_convert(kind): return True return False -class Term(object): +class Term(StringMixin): """create a term object that holds a field, op, and value Parameters @@ -3540,10 +3537,9 @@ def __init__(self, field, op=None, value=None, queryables=None, encoding=None): if len(self.q): self.eval() - def __str__(self): - return "field->%s,op->%s,value->%s" % (self.field, self.op, self.value) - - __repr__ = __str__ + def __unicode__(self): + attrs = map(pprint_thing, (self.field, self.op, self.value)) + return "field->%s,op->%s,value->%s" % tuple(attrs) @property def is_valid(self): diff --git a/pandas/io/stata.py b/pandas/io/stata.py index 632e97c24721f..603924ac6a292 100644 --- a/pandas/io/stata.py +++ b/pandas/io/stata.py @@ -15,6 +15,7 @@ import sys import struct +from pandas.core.base import StringMixin from pandas.core.frame import DataFrame from pandas.core.series import Series from pandas.core.categorical import Categorical @@ -163,7 +164,7 @@ def _datetime_to_stata_elapsed(date, fmt): raise ValueError("fmt %s not understood" % fmt) -class StataMissingValue(object): +class StataMissingValue(StringMixin): """ An observation's missing value. @@ -192,10 +193,12 @@ def __init__(self, offset, value): string = property(lambda self: self._str, doc="The Stata representation of the missing value: '.', '.a'..'.z'") value = property(lambda self: self._value, doc='The binary representation of the missing value.') - def __str__(self): - return self._str + def __unicode__(self): + return self.string - __str__.__doc__ = string.__doc__ + def __repr__(self): + # not perfect :-/ + return "%s(%s)" % (self.__class__, self) class StataParser(object): diff --git a/pandas/io/tests/test_json/test_pandas.py b/pandas/io/tests/test_json/test_pandas.py index 997229487e1b9..e57eacc80647f 100644 --- a/pandas/io/tests/test_json/test_pandas.py +++ b/pandas/io/tests/test_json/test_pandas.py @@ -33,7 +33,7 @@ _mixed_frame = _frame.copy() -class TestPandasObjects(unittest.TestCase): +class TestPandasContainer(unittest.TestCase): def setUp(self): self.ts = tm.makeTimeSeries() @@ -68,7 +68,7 @@ def _check_orient(df, orient, dtype=None, numpy=False, convert_axes=True, check_ if type(detail) == raise_ok: return raise - + unser = unser.sort() if dtype is False: @@ -104,7 +104,7 @@ def _check_all_orients(df, dtype=None, convert_axes=True, raise_ok=None): _check_orient(df, "split", dtype=dtype) _check_orient(df, "index", dtype=dtype) _check_orient(df, "values", dtype=dtype) - + _check_orient(df, "columns", dtype=dtype, convert_axes=False) _check_orient(df, "records", dtype=dtype, convert_axes=False) _check_orient(df, "split", dtype=dtype, convert_axes=False) @@ -347,7 +347,7 @@ def test_convert_dates(self): assert_series_equal(result,ts) def test_date_format(self): - + df = self.tsframe.copy() df['date'] = Timestamp('20130101') df_orig = df.copy() @@ -412,7 +412,7 @@ def test_misc_example(self): @network @slow def test_round_trip_exception_(self): - # GH 3867 + # GH 3867 df = pd.read_csv('https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv') s = df.to_json() @@ -429,9 +429,9 @@ def test_url(self): result = read_json(url,convert_dates=True) for c in ['created_at','closed_at','updated_at']: self.assert_(result[c].dtype == 'datetime64[ns]') - + url = 'http://search.twitter.com/search.json?q=pandas%20python' result = read_json(url) - + except urllib2.URLError: raise nose.SkipTest diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py index de510aa155412..48fa9caa0a05c 100644 --- a/pandas/sparse/array.py +++ b/pandas/sparse/array.py @@ -8,6 +8,7 @@ import numpy as np import operator +from pandas.core.base import PandasObject import pandas.core.common as com from pandas.util import py3compat @@ -86,8 +87,7 @@ def _sparse_fillop(this, other, name): return result, result_index - -class SparseArray(np.ndarray): +class SparseArray(PandasObject, np.ndarray): """Data structure for labeled, sparse floating point data Parameters @@ -184,9 +184,9 @@ def __setstate__(self, state): def __len__(self): return self.sp_index.length - def __repr__(self): - return '%s\n%s' % (np.ndarray.__repr__(self), - repr(self.sp_index)) + def __unicode__(self): + return '%s\n%s' % (com.pprint_thing(self), + com.pprint_thing(self.sp_index)) # Arithmetic operators diff --git a/pandas/sparse/list.py b/pandas/sparse/list.py index 9f59b9108a6b0..ceb03eae5d282 100644 --- a/pandas/sparse/list.py +++ b/pandas/sparse/list.py @@ -1,10 +1,12 @@ import numpy as np +from pandas.core.base import PandasObject +from pandas.core.common import pprint_thing from pandas.sparse.array import SparseArray import pandas._sparse as splib -class SparseList(object): +class SparseList(PandasObject): """ Data structure for accumulating data to be converted into a SparseArray. Has similar API to the standard Python list @@ -21,9 +23,9 @@ def __init__(self, data=None, fill_value=np.nan): if data is not None: self.append(data) - def __repr__(self): + def __unicode__(self): contents = '\n'.join(repr(c) for c in self._chunks) - return '%s\n%s' % (object.__repr__(self), contents) + return '%s\n%s' % (object.__repr__(self), pprint_thing(contents)) def __len__(self): return sum(len(c) for c in self._chunks) diff --git a/pandas/sparse/series.py b/pandas/sparse/series.py index 1b8d3541da289..802808954c8f4 100644 --- a/pandas/sparse/series.py +++ b/pandas/sparse/series.py @@ -241,8 +241,9 @@ def __setstate__(self, state): def __len__(self): return self.sp_index.length - def __repr__(self): - series_rep = Series.__repr__(self) + def __unicode__(self): + # currently, unicode is same as repr...fixes infinite loop + series_rep = Series.__unicode__(self) rep = '%s\n%s' % (series_rep, repr(self.sp_index)) return rep diff --git a/pandas/stats/fama_macbeth.py b/pandas/stats/fama_macbeth.py index b75029c615735..967199c0bcf69 100644 --- a/pandas/stats/fama_macbeth.py +++ b/pandas/stats/fama_macbeth.py @@ -1,3 +1,4 @@ +from pandas.core.base import StringMixin from pandas.util.py3compat import StringIO import numpy as np @@ -26,7 +27,7 @@ def fama_macbeth(**kwargs): return klass(**kwargs) -class FamaMacBeth(object): +class FamaMacBeth(StringMixin): def __init__(self, y, x, intercept=True, nw_lags=None, nw_lags_beta=None, entity_effects=False, time_effects=False, x_effects=None, @@ -114,7 +115,7 @@ def _coef_table(self): return buffer.getvalue() - def __repr__(self): + def __unicode__(self): return self.summary @cache_readonly diff --git a/pandas/stats/ols.py b/pandas/stats/ols.py index cdcf1ab2ab036..742d832a923d8 100644 --- a/pandas/stats/ols.py +++ b/pandas/stats/ols.py @@ -10,6 +10,7 @@ import numpy as np from pandas.core.api import DataFrame, Series, isnull +from pandas.core.base import StringMixin from pandas.core.common import _ensure_float64 from pandas.core.index import MultiIndex from pandas.core.panel import Panel @@ -22,7 +23,7 @@ _FP_ERR = 1e-8 -class OLS(object): +class OLS(StringMixin): """ Runs a full sample ordinary least squares regression. @@ -581,7 +582,7 @@ def summary(self): return template % params - def __repr__(self): + def __unicode__(self): return self.summary @cache_readonly diff --git a/pandas/stats/var.py b/pandas/stats/var.py index e993b60e18a39..8953f7badfefb 100644 --- a/pandas/stats/var.py +++ b/pandas/stats/var.py @@ -1,7 +1,7 @@ from __future__ import division import numpy as np - +from pandas.core.base import StringMixin from pandas.util.decorators import cache_readonly from pandas.core.frame import DataFrame from pandas.core.panel import Panel @@ -11,7 +11,7 @@ from pandas.stats.ols import _combine_rhs -class VAR(object): +class VAR(StringMixin): """ Estimates VAR(p) regression on multivariate time series data presented in pandas data structures. @@ -477,7 +477,7 @@ def _sigma(self): return np.dot(resid, resid.T) / (n - k) - def __repr__(self): + def __unicode__(self): return self.summary diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py index 56df301b5b027..7fdb6d9d2603d 100644 --- a/pandas/tseries/index.py +++ b/pandas/tseries/index.py @@ -488,7 +488,7 @@ def _mpl_repr(self): # how to represent ourselves to matplotlib return tslib.ints_to_pydatetime(self.asi8, self.tz) - def __repr__(self): + def __unicode__(self): from pandas.core.format import _format_datetime64 values = self.values @@ -514,8 +514,6 @@ def __repr__(self): return summary - __str__ = __repr__ - def __reduce__(self): """Necessary for making this object picklable""" object_state = list(np.ndarray.__reduce__(self)) diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py index 34c640392bda9..ac79fbd6bfb37 100644 --- a/pandas/tseries/period.py +++ b/pandas/tseries/period.py @@ -3,6 +3,7 @@ from datetime import datetime, date import numpy as np +from pandas.core.base import PandasObject import pandas.tseries.offsets as offsets from pandas.tseries.frequencies import (get_freq_code as _gfc, @@ -40,7 +41,7 @@ def f(self): return property(f) -class Period(object): +class Period(PandasObject): """ Represents an period of time @@ -272,28 +273,6 @@ def __repr__(self): return "Period('%s', '%s')" % (formatted, freqstr) - def __str__(self): - """ - Return a string representation for a particular DataFrame - - Invoked by str(df) in both py2/py3. - Yields Bytestring in Py2, Unicode String in py3. - """ - - if py3compat.PY3: - return self.__unicode__() - return self.__bytes__() - - def __bytes__(self): - """ - Return a string representation for a particular DataFrame - - Invoked by bytes(df) in py3 only. - Yields a bytestring in both py2/py3. - """ - encoding = com.get_option("display.encoding") - return self.__unicode__().encode(encoding, 'replace') - def __unicode__(self): """ Return a string representation for a particular DataFrame @@ -303,9 +282,7 @@ def __unicode__(self): """ base, mult = _gfc(self.freq) formatted = tslib.period_format(self.ordinal, base) - value = (u"%s" % formatted) - assert type(value) == unicode - + value = ("%s" % formatted) return value
closes #4090, #3231 - The internal `pandas` class hierarchy has changed (slightly). The previous `PandasObject` now is called `PandasContainer` and a new `PandasObject` has become the baseclass for `PandasContainer` as well as `Index`, `Categorical`, `GroupBy`, `SparseList`, and `SparseArray` (+ their base classes). Currently, `PandasObject` provides string methods (from `StringMixin`). - New `StringMixin` that, given a `__unicode__` method, gets python 2 and python 3 compatible string methods (`__str__`, `__bytes__`, and `__repr__`). Now employed in many places throughout the pandas library. - Tried to improve safe unicode handling (e.g., where user/external input is used in the string or repr methods of a library). I'm hoping to use this for the new objects in the MultiIndex naming PR, so hopefully it's okay to merge for v0.12.
https://api.github.com/repos/pandas-dev/pandas/pulls/4092
2013-07-01T02:47:14Z
2013-07-02T01:42:28Z
2013-07-02T01:42:28Z
2014-06-14T19:01:58Z
BUG: pass sharex and sharey in grouped_hist
diff --git a/doc/source/release.rst b/doc/source/release.rst index 513540698023f..2aa6e1a80085f 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -297,6 +297,8 @@ pandas 0.12 - Fixed bug where a 1xN DataFrame would barf on a 1xN mask (:issue:`4071`) - Fixed running of ``tox`` under python3 where the pickle import was getting rewritten in an incompatible way (:issue:`4062`, :issue:`4063`) + - Fixed bug where sharex and sharey were not being passed to grouped_hist + (:issue:`4089`) pandas 0.11.0 diff --git a/doc/source/v0.12.0.txt b/doc/source/v0.12.0.txt index b9fbc4d9cf806..60086e1c49ae7 100644 --- a/doc/source/v0.12.0.txt +++ b/doc/source/v0.12.0.txt @@ -439,6 +439,8 @@ Bug Fixes - Fixed bug where a 1xN DataFrame would barf on a 1xN mask (:issue:`4071`) - Fixed running of ``tox`` under python3 where the pickle import was getting rewritten in an incompatible way (:issue:`4062`, :issue:`4063`) + - Fixed bug where sharex and sharey were not being passed to grouped_hist + (:issue:`4089`) See the :ref:`full release notes <release>` or issue tracker diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py index 15389ef687951..f1e4ef1106080 100644 --- a/pandas/tests/test_graphics.py +++ b/pandas/tests/test_graphics.py @@ -12,6 +12,7 @@ import numpy as np +from numpy import random from numpy.testing import assert_array_equal from numpy.testing.decorators import slow @@ -947,6 +948,38 @@ def test_grouped_hist(self): self.assertRaises(AttributeError, plotting.grouped_hist, df.A, by=df.C, foo='bar') + @slow + def test_axis_shared(self): + # GH4089 + import matplotlib.pyplot as plt + def tick_text(tl): + return [x.get_text() for x in tl] + + n = 100 + df = DataFrame({'gender': np.array(['Male', 'Female'])[random.randint(2, size=n)], + 'height': random.normal(66, 4, size=n), + 'weight': random.normal(161, 32, size=n)}) + ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True) + self.assert_(ax1._shared_x_axes.joined(ax1, ax2)) + self.assertFalse(ax1._shared_y_axes.joined(ax1, ax2)) + self.assert_(ax2._shared_x_axes.joined(ax1, ax2)) + self.assertFalse(ax2._shared_y_axes.joined(ax1, ax2)) + plt.close('all') + + ax1, ax2 = df.hist(column='height', by=df.gender, sharey=True) + self.assertFalse(ax1._shared_x_axes.joined(ax1, ax2)) + self.assert_(ax1._shared_y_axes.joined(ax1, ax2)) + self.assertFalse(ax2._shared_x_axes.joined(ax1, ax2)) + self.assert_(ax2._shared_y_axes.joined(ax1, ax2)) + plt.close('all') + + ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True, + sharey=True) + self.assert_(ax1._shared_x_axes.joined(ax1, ax2)) + self.assert_(ax1._shared_y_axes.joined(ax1, ax2)) + self.assert_(ax2._shared_x_axes.joined(ax1, ax2)) + self.assert_(ax2._shared_y_axes.joined(ax1, ax2)) + def test_option_mpl_style(self): set_option('display.mpl_style', 'default') set_option('display.mpl_style', None) diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py index ef55319da185c..0052ed1cecbe4 100644 --- a/pandas/tools/plotting.py +++ b/pandas/tools/plotting.py @@ -1928,7 +1928,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None, if by is not None: axes = grouped_hist(data, by=by, ax=ax, grid=grid, figsize=figsize, - **kwds) + sharex=sharex, sharey=sharey, **kwds) for ax in axes.ravel(): if xlabelsize is not None:
closes #4089
https://api.github.com/repos/pandas-dev/pandas/pulls/4091
2013-07-01T01:26:14Z
2013-07-01T03:28:07Z
2013-07-01T03:28:07Z
2014-07-16T08:16:48Z
ENH: really really really fix the failing data.py tests
diff --git a/pandas/io/common.py b/pandas/io/common.py index 1fc572dbf1a5e..33958ade2bcd6 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -1,9 +1,14 @@ -""" Common api utilities """ +"""Common IO api utilities""" +import sys import urlparse -from pandas.util import py3compat +import urllib2 +import zipfile +from contextlib import contextmanager, closing from StringIO import StringIO +from pandas.util import py3compat + _VALID_URLS = set(urlparse.uses_relative + urlparse.uses_netloc + urlparse.uses_params) _VALID_URLS.discard('') @@ -84,3 +89,24 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None): return filepath_or_buffer, None return filepath_or_buffer, None + + +# ---------------------- +# Prevent double closing +if py3compat.PY3: + urlopen = urllib2.urlopen +else: + @contextmanager + def urlopen(*args, **kwargs): + with closing(urllib2.urlopen(*args, **kwargs)) as f: + yield f + +# ZipFile is not a context manager for <= 2.6 +# must be tuple index here since 2.6 doesn't use namedtuple for version_info +if sys.version_info[1] <= 6: + @contextmanager + def ZipFile(*args, **kwargs): + with closing(zipfile.ZipFile(*args, **kwargs)) as zf: + yield zf +else: + ZipFile = zipfile.ZipFile diff --git a/pandas/io/data.py b/pandas/io/data.py index 278fc2fc6dd4d..2d91bd4cd383c 100644 --- a/pandas/io/data.py +++ b/pandas/io/data.py @@ -5,19 +5,29 @@ """ import warnings import tempfile - -import numpy as np +import itertools import datetime as dt import urllib import time + from collections import defaultdict -from contextlib import closing -from urllib2 import urlopen -from zipfile import ZipFile +import numpy as np + from pandas.util.py3compat import StringIO, bytes_to_str from pandas import Panel, DataFrame, Series, read_csv, concat +from pandas.core.common import PandasError from pandas.io.parsers import TextParser +from pandas.io.common import urlopen, ZipFile +from pandas.util.testing import _network_error_classes + + +class SymbolWarning(UserWarning): + pass + + +class RemoteDataError(PandasError, IOError): + pass def DataReader(name, data_source=None, start=None, end=None, @@ -58,16 +68,16 @@ def DataReader(name, data_source=None, start=None, end=None, if data_source == "yahoo": return get_data_yahoo(symbols=name, start=start, end=end, - adjust_price=False, chunk=25, + adjust_price=False, chunksize=25, retry_count=retry_count, pause=pause) elif data_source == "google": return get_data_google(symbols=name, start=start, end=end, - adjust_price=False, chunk=25, + adjust_price=False, chunksize=25, retry_count=retry_count, pause=pause) elif data_source == "fred": - return get_data_fred(name=name, start=start, end=end) + return get_data_fred(name, start, end) elif data_source == "famafrench": - return get_data_famafrench(name=name) + return get_data_famafrench(name) def _sanitize_dates(start, end): @@ -88,6 +98,9 @@ def _in_chunks(seq, size): return (seq[pos:pos + size] for pos in xrange(0, len(seq), size)) +_yahoo_codes = {'symbol': 's', 'last': 'l1', 'change_pct': 'p2', 'PE': 'r', + 'time': 't1', 'short_ratio': 's7'} + def get_quote_yahoo(symbols): """ Get current yahoo quote @@ -96,24 +109,19 @@ def get_quote_yahoo(symbols): """ if isinstance(symbols, basestring): sym_list = symbols - elif not isinstance(symbols, Series): - symbols = Series(symbols) - sym_list = '+'.join(symbols) else: sym_list = '+'.join(symbols) # for codes see: http://www.gummy-stuff.org/Yahoo-data.htm - codes = {'symbol': 's', 'last': 'l1', 'change_pct': 'p2', 'PE': 'r', - 'time': 't1', 'short_ratio': 's7'} - request = ''.join(codes.itervalues()) # code request string - header = codes.keys() + request = ''.join(_yahoo_codes.itervalues()) # code request string + header = _yahoo_codes.keys() data = defaultdict(list) url_str = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (sym_list, request) - with closing(urlopen(url_str)) as url: + with urlopen(url_str) as url: lines = url.readlines() for line in lines: @@ -131,7 +139,6 @@ def get_quote_yahoo(symbols): data[header[i]].append(v) idx = data.pop('symbol') - return DataFrame(data, index=idx) @@ -139,8 +146,30 @@ def get_quote_google(symbols): raise NotImplementedError("Google Finance doesn't have this functionality") -def _get_hist_yahoo(sym, start=None, end=None, retry_count=3, pause=0.001, - **kwargs): +def _retry_read_url(url, retry_count, pause, name): + for _ in xrange(retry_count): + time.sleep(pause) + + # kludge to close the socket ASAP + try: + with urlopen(url) as resp: + lines = resp.read() + except _network_error_classes: + pass + else: + rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0, + parse_dates=True)[::-1] + # Yahoo! Finance sometimes does this awesome thing where they + # return 2 rows for the most recent business day + if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover + rs = rs[:-1] + return rs + + raise IOError("after %d tries, %s did not " + "return a 200 for url %r" % (retry_count, name, url)) + + +def _get_hist_yahoo(sym, start, end, retry_count, pause): """ Get historical data for the given name from yahoo. Date format is datetime @@ -148,10 +177,8 @@ def _get_hist_yahoo(sym, start=None, end=None, retry_count=3, pause=0.001, Returns a DataFrame. """ start, end = _sanitize_dates(start, end) - - yahoo_URL = 'http://ichart.yahoo.com/table.csv?' - - url = (yahoo_URL + 's=%s' % sym + + yahoo_url = 'http://ichart.yahoo.com/table.csv?' + url = (yahoo_url + 's=%s' % sym + '&a=%s' % (start.month - 1) + '&b=%s' % start.day + '&c=%s' % start.year + @@ -160,29 +187,10 @@ def _get_hist_yahoo(sym, start=None, end=None, retry_count=3, pause=0.001, '&f=%s' % end.year + '&g=d' + '&ignore=.csv') - - for _ in xrange(retry_count): - with closing(urlopen(url)) as resp: - if resp.code == 200: - lines = resp.read() - rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0, - parse_dates=True)[::-1] - - # Yahoo! Finance sometimes does this awesome thing where they - # return 2 rows for the most recent business day - if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover - rs = rs[:-1] - - return rs - - time.sleep(pause) - - raise IOError("after %d tries, Yahoo did not " - "return a 200 for url %r" % (retry_count, url)) + return _retry_read_url(url, retry_count, pause, 'Yahoo!') -def _get_hist_google(sym, start=None, end=None, retry_count=3, pause=0.001, - **kwargs): +def _get_hist_google(sym, start, end, retry_count, pause): """ Get historical data for the given name from google. Date format is datetime @@ -190,7 +198,6 @@ def _get_hist_google(sym, start=None, end=None, retry_count=3, pause=0.001, Returns a DataFrame. """ start, end = _sanitize_dates(start, end) - google_URL = 'http://www.google.com/finance/historical?' # www.google.com/finance/historical?q=GOOG&startdate=Jun+9%2C+2011&enddate=Jun+8%2C+2013&output=csv @@ -199,25 +206,16 @@ def _get_hist_google(sym, start=None, end=None, retry_count=3, pause=0.001, '%Y'), "enddate": end.strftime('%b %d, %Y'), "output": "csv"}) - for _ in xrange(retry_count): - with closing(urlopen(url)) as resp: - if resp.code == 200: - rs = read_csv(StringIO(bytes_to_str(resp.read())), index_col=0, - parse_dates=True)[::-1] - - return rs - - time.sleep(pause) - - raise IOError("after %d tries, Google did not " - "return a 200 for url %s" % (retry_count, url)) + return _retry_read_url(url, retry_count, pause, 'Google') -def _adjust_prices(hist_data, price_list=['Open', 'High', 'Low', 'Close']): +def _adjust_prices(hist_data, price_list=None): """ Return modifed DataFrame or Panel with adjusted prices based on 'Adj Close' price. Adds 'Adj_Ratio' column. """ + if price_list is None: + price_list = 'Open', 'High', 'Low', 'Close' adj_ratio = hist_data['Adj Close'] / hist_data['Close'] data = hist_data.copy() @@ -234,7 +232,7 @@ def _calc_return_index(price_df): (typically NaN) is set to 1. """ df = price_df.pct_change().add(1).cumprod() - mask = ~df.ix[1].isnull() & df.ix[0].isnull() + mask = df.ix[1].notnull() & df.ix[0].isnull() df.ix[0][mask] = 1 # Check for first stock listings after starting date of index in ret_index @@ -245,8 +243,7 @@ def _calc_return_index(price_df): t_idx = df.index.get_loc(tstamp) - 1 df[sym].ix[t_idx] = 1 - ret_index = df - return ret_index + return df def get_components_yahoo(idx_sym): @@ -287,7 +284,7 @@ def get_components_yahoo(idx_sym): # break when no new components are found while True in mask: url_str = url.format(idx_mod, stats, comp_idx) - with closing(urlopen(url_str)) as resp: + with urlopen(url_str) as resp: raw = resp.read() lines = raw.decode('utf-8').strip().strip('"').split('"\r\n"') lines = [line.strip().split('","') for line in lines] @@ -303,22 +300,54 @@ def get_components_yahoo(idx_sym): return idx_df -def _dl_mult_symbols(symbols, start, end, chunksize, pause, method, **kwargs): +def _dl_mult_symbols(symbols, start, end, chunksize, retry_count, pause, + method): stocks = {} for sym_group in _in_chunks(symbols, chunksize): for sym in sym_group: try: - stocks[sym] = method(sym, start=start, end=end, pause=pause, - **kwargs) + stocks[sym] = method(sym, start, end, retry_count, pause) except IOError: - warnings.warn('ERROR with symbol: {0}, skipping.'.format(sym)) + warnings.warn('Failed to read symbol: {0!r}, replacing with ' + 'NaN.'.format(sym), SymbolWarning) + stocks[sym] = np.nan return Panel(stocks).swapaxes('items', 'minor') +_source_functions = {'google': _get_hist_google, 'yahoo': _get_hist_yahoo} + +def _get_data_from(symbols, start, end, retry_count, pause, adjust_price, + ret_index, chunksize, source, name): + if name is not None: + warnings.warn("Arg 'name' is deprecated, please use 'symbols' " + "instead.", FutureWarning) + symbols = name + + src_fn = _source_functions[source] + + # If a single symbol, (e.g., 'GOOG') + if isinstance(symbols, (basestring, int)): + hist_data = src_fn(symbols, start, end, retry_count, pause) + # Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT']) + elif isinstance(symbols, DataFrame): + hist_data = _dl_mult_symbols(symbols.index, start, end, chunksize, + retry_count, pause, src_fn) + else: + hist_data = _dl_mult_symbols(symbols, start, end, chunksize, + retry_count, pause, src_fn) + if source.lower() == 'yahoo': + if ret_index: + hist_data['Ret_Index'] = _calc_return_index(hist_data['Adj Close']) + if adjust_price: + hist_data = _adjust_prices(hist_data) + + return hist_data + + def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3, pause=0.001, adjust_price=False, ret_index=False, - chunksize=25, **kwargs): + chunksize=25, name=None): """ Returns DataFrame/Panel of historical stock prices from symbols, over date range, start to end. To avoid being penalized by Yahoo! Finance servers, @@ -352,32 +381,13 @@ def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3, ------- hist_data : DataFrame (str) or Panel (array-like object, DataFrame) """ - if 'name' in kwargs: - warnings.warn("Arg 'name' is deprecated, please use 'symbols' " - "instead.", FutureWarning) - symbols = kwargs['name'] - - # If a single symbol, (e.g., 'GOOG') - if isinstance(symbols, (basestring, int)): - hist_data = _get_hist_yahoo(symbols, start=start, end=end) - # Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT']) - elif isinstance(symbols, DataFrame): - hist_data = _dl_mult_symbols(symbols.index, start, end, chunksize, - pause, _get_hist_yahoo, **kwargs) - else: - hist_data = _dl_mult_symbols(symbols, start, end, chunksize, pause, - _get_hist_yahoo, **kwargs) - - if ret_index: - hist_data['Ret_Index'] = _calc_return_index(hist_data['Adj Close']) - if adjust_price: - hist_data = _adjust_prices(hist_data) - - return hist_data + return _get_data_from(symbols, start, end, retry_count, pause, + adjust_price, ret_index, chunksize, 'yahoo', name) def get_data_google(symbols=None, start=None, end=None, retry_count=3, - pause=0.001, chunksize=25, **kwargs): + pause=0.001, adjust_price=False, ret_index=False, + chunksize=25, name=None): """ Returns DataFrame/Panel of historical stock prices from symbols, over date range, start to end. To avoid being penalized by Google Finance servers, @@ -405,21 +415,8 @@ def get_data_google(symbols=None, start=None, end=None, retry_count=3, ------- hist_data : DataFrame (str) or Panel (array-like object, DataFrame) """ - if 'name' in kwargs: - warnings.warn("Arg 'name' is deprecated, please use 'symbols' " - "instead.", FutureWarning) - symbols = kwargs['name'] - - # If a single symbol, (e.g., 'GOOG') - if isinstance(symbols, (basestring, int)): - return _get_hist_google(symbols, start=start, end=end) - # Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT']) - elif isinstance(symbols, DataFrame): - symbs = symbols.index - else: # Guess a Series - symbs = symbols - return _dl_mult_symbols(symbs, start, end, chunksize, pause, - _get_hist_google, **kwargs) + return _get_data_from(symbols, start, end, retry_count, pause, + adjust_price, ret_index, chunksize, 'google', name) def get_data_fred(name, start=dt.datetime(2010, 1, 1), @@ -435,7 +432,7 @@ def get_data_fred(name, start=dt.datetime(2010, 1, 1), fred_URL = "http://research.stlouisfed.org/fred2/series/" url = fred_URL + '%s' % name + '/downloaddata/%s' % name + '.csv' - with closing(urlopen(url)) as resp: + with urlopen(url) as resp: data = read_csv(resp, index_col=0, parse_dates=True, header=None, skiprows=1, names=["DATE", name], na_values='.') @@ -448,39 +445,39 @@ def get_data_fred(name, start=dt.datetime(2010, 1, 1), raise -def get_data_famafrench(name, start=None, end=None): - start, end = _sanitize_dates(start, end) - +def get_data_famafrench(name): # path of zip files - zipFileURL = "http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/" + zip_file_url = ('http://mba.tuck.dartmouth.edu/pages/faculty/' + 'ken.french/ftp/') + zip_file_path = '{0}{1}.zip'.format(zip_file_url, name) - with closing(urlopen(zipFileURL + name + ".zip")) as url: + with urlopen(zip_file_path) as url: raw = url.read() with tempfile.TemporaryFile() as tmpf: tmpf.write(raw) - with closing(ZipFile(tmpf, 'r')) as zf: + with ZipFile(tmpf, 'r') as zf: data = zf.read(name + '.txt').splitlines() - file_edges = np.where(np.array([len(d) for d in data]) == 2)[0] + line_lengths = np.array(map(len, data)) + file_edges = np.where(line_lengths)[0] datasets = {} - for i in xrange(len(file_edges) - 1): - dataset = [d.split() for d in data[(file_edges[i] + 1): - file_edges[i + 1]]] + edges = itertools.izip(file_edges[:-1], file_edges[1:]) + for i, (left_edge, right_edge) in enumerate(edges): + dataset = [d.split() for d in data[left_edge:right_edge]] if len(dataset) > 10: - ncol = np.median(np.array([len(d) for d in dataset])) - header_index = np.where( - np.array([len(d) for d in dataset]) == (ncol - 1))[0][-1] + ncol_raw = np.array(map(len, dataset)) + ncol = np.median(ncol_raw) + header_index = np.where(ncol_raw == ncol - 1)[0][-1] header = dataset[header_index] + ds_header = dataset[header_index + 1:] # to ensure the header is unique - header = ['{0} {1}'.format(j + 1, header_j) for j, header_j in - enumerate(header)] - index = np.array( - [d[0] for d in dataset[(header_index + 1):]], dtype=int) - dataset = np.array( - [d[1:] for d in dataset[(header_index + 1):]], dtype=float) + header = ['{0} {1}'.format(*items) for items in enumerate(header, + start=1)] + index = np.fromiter((d[0] for d in ds_header), dtype=int) + dataset = np.fromiter((d[1:] for d in ds_header), dtype=float) datasets[i] = DataFrame(dataset, index, columns=header) return datasets @@ -490,7 +487,7 @@ def get_data_famafrench(name, start=None, end=None): CUR_YEAR = dt.datetime.now().year -def _unpack(row, kind='td'): +def _unpack(row, kind): els = row.xpath('.//%s' % kind) return [val.text_content() for val in els] @@ -498,7 +495,7 @@ def _unpack(row, kind='td'): def _parse_options_data(table): rows = table.xpath('.//tr') header = _unpack(rows[0], kind='th') - data = map(_unpack, rows[1:]) + data = [_unpack(row, kind='td') for row in rows[1:]] # Use ',' as a thousands separator as we're pulling from the US site. return TextParser(data, names=header, na_values=['N/A'], thousands=',').get_chunk() @@ -615,13 +612,18 @@ def _get_option_data(self, month, year, expiry, table_loc, name): from lxml.html import parse except ImportError: raise ImportError("Please install lxml if you want to use the " - "{0} class".format(self.__class__.__name__)) + "{0!r} class".format(self.__class__.__name__)) try: - tables = parse(url).xpath('.//table') - except (AttributeError, IOError): - raise IndexError("Table location {0} invalid, unable to parse " - "tables".format(table_loc)) + doc = parse(url) + except _network_error_classes: + raise RemoteDataError("Unable to parse tables from URL " + "{0!r}".format(url)) else: + root = doc.getroot() + if root is None: + raise RemoteDataError("Parsed URL {0!r} has no root" + "element".format(url)) + tables = root.xpath('.//table') ntables = len(tables) if table_loc - 1 > ntables: raise IndexError("Table location {0} invalid, {1} tables" @@ -758,7 +760,7 @@ def get_near_stock_price(self, above_below=2, call=True, put=False, chop = df[get_range].dropna() chop.reset_index(inplace=True) data[nam] = chop - return [data[nam] for nam in sorted(to_ret)] + return [data[nam] for nam in to_ret] def _try_parse_dates(self, year, month, expiry): if year is not None or month is not None: @@ -852,7 +854,7 @@ def get_forward_data(self, months, call=True, put=False, near=False, else: all_data = concat([all_data, frame]) data[name] = all_data - ret = [data[k] for k in sorted(data.keys())] + ret = [data[k] for k in to_ret] if len(ret) == 1: return ret.pop() if len(ret) != 2: diff --git a/pandas/io/tests/test_cparser.py b/pandas/io/tests/test_cparser.py index 71c0367cf5da3..7fa8d06f48ea3 100644 --- a/pandas/io/tests/test_cparser.py +++ b/pandas/io/tests/test_cparser.py @@ -34,7 +34,7 @@ class TestCParser(unittest.TestCase): def setUp(self): - self.dirpath = tm.get_data_path('/') + self.dirpath = tm.get_data_path() self.csv1 = os.path.join(self.dirpath, 'test1.csv') self.csv2 = os.path.join(self.dirpath, 'test2.csv') self.xls1 = os.path.join(self.dirpath, 'test.xls') diff --git a/pandas/io/tests/test_yahoo.py b/pandas/io/tests/test_data.py similarity index 61% rename from pandas/io/tests/test_yahoo.py rename to pandas/io/tests/test_data.py index d6b65e7379d0a..2f4185154b8e6 100644 --- a/pandas/io/tests/test_yahoo.py +++ b/pandas/io/tests/test_data.py @@ -1,16 +1,88 @@ import unittest import warnings import nose +from nose.tools import assert_equal from datetime import datetime -import pandas as pd import numpy as np -import pandas.io.data as web -from pandas.util.testing import (network, assert_series_equal, - assert_produces_warning, assert_frame_equal) +import pandas as pd +from pandas import DataFrame +from pandas.io import data as web +from pandas.io.data import DataReader, SymbolWarning +from pandas.util.testing import (assert_series_equal, assert_produces_warning, + assert_frame_equal, network) from numpy.testing import assert_array_equal +def assert_n_failed_equals_n_null_columns(wngs, obj, cls=SymbolWarning): + all_nan_cols = pd.Series(dict((k, pd.isnull(v).all()) for k, v in + obj.iteritems())) + n_all_nan_cols = all_nan_cols.sum() + valid_warnings = pd.Series([wng for wng in wngs if isinstance(wng, cls)]) + assert_equal(len(valid_warnings), n_all_nan_cols) + failed_symbols = all_nan_cols[all_nan_cols].index + msgs = valid_warnings.map(lambda x: x.message) + assert msgs.str.contains('|'.join(failed_symbols)).all() + + +class TestGoogle(unittest.TestCase): + + @network + def test_google(self): + # asserts that google is minimally working and that it throws + # an exception when DataReader can't get a 200 response from + # google + start = datetime(2010, 1, 1) + end = datetime(2013, 01, 27) + + self.assertEquals( + web.DataReader("F", 'google', start, end)['Close'][-1], + 13.68) + + self.assertRaises(Exception, web.DataReader, "NON EXISTENT TICKER", + 'google', start, end) + + @network + def test_get_quote_fails(self): + self.assertRaises(NotImplementedError, web.get_quote_google, + pd.Series(['GOOG', 'AAPL', 'GOOG'])) + + @network + def test_get_goog_volume(self): + df = web.get_data_google('GOOG') + self.assertEqual(df.Volume.ix['OCT-08-2010'], 2863473) + + @network + def test_get_multi1(self): + sl = ['AAPL', 'AMZN', 'GOOG'] + pan = web.get_data_google(sl, '2012') + + def testit(): + ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG] + self.assertEquals(ts[0].dayofyear, 96) + + if (hasattr(pan, 'Close') and hasattr(pan.Close, 'GOOG') and + hasattr(pan.Close, 'AAPL')): + testit() + else: + self.assertRaises(AttributeError, testit) + + @network + def test_get_multi2(self): + with warnings.catch_warnings(record=True) as w: + pan = web.get_data_google(['GE', 'MSFT', 'INTC'], 'JAN-01-12', + 'JAN-31-12') + result = pan.Close.ix['01-18-12'] + assert_n_failed_equals_n_null_columns(w, result) + + # sanity checking + + assert np.issubdtype(result.dtype, np.floating) + result = pan.Open.ix['Jan-15-12':'Jan-20-12'] + self.assertEqual((4, 3), result.shape) + assert_n_failed_equals_n_null_columns(w, result) + + class TestYahoo(unittest.TestCase): @classmethod def setUpClass(cls): @@ -111,7 +183,7 @@ def test_get_data_multiple_symbols_two_dates(self): [ 19.03, 28.16, 25.52], [ 18.81, 28.82, 25.87]]) result = pan.Open.ix['Jan-15-12':'Jan-20-12'] - assert_array_equal(np.array(expected).shape, result.shape) + self.assertEqual(expected.shape, result.shape) @network def test_get_date_ret_index(self): @@ -251,6 +323,82 @@ def test_get_put_data_warning(self): warnings.warn("IndexError thrown no tables found") +class TestDataReader(unittest.TestCase): + @network + def test_read_yahoo(self): + gs = DataReader("GS", "yahoo") + assert isinstance(gs, DataFrame) + + @network + def test_read_google(self): + gs = DataReader("GS", "google") + assert isinstance(gs, DataFrame) + + @network + def test_read_fred(self): + vix = DataReader("VIXCLS", "fred") + assert isinstance(vix, DataFrame) + + @network + def test_read_famafrench(self): + for name in ("F-F_Research_Data_Factors", + "F-F_Research_Data_Factors_weekly", "6_Portfolios_2x3", + "F-F_ST_Reversal_Factor"): + ff = DataReader(name, "famafrench") + assert isinstance(ff, dict) + + +class TestFred(unittest.TestCase): + @network + def test_fred(self): + """ + Throws an exception when DataReader can't get a 200 response from + FRED. + """ + start = datetime(2010, 1, 1) + end = datetime(2013, 01, 27) + + self.assertEquals( + web.DataReader("GDP", "fred", start, end)['GDP'].tail(1), + 15984.1) + + self.assertRaises(Exception, web.DataReader, "NON EXISTENT SERIES", + 'fred', start, end) + + @network + def test_fred_nan(self): + start = datetime(2010, 1, 1) + end = datetime(2013, 01, 27) + df = web.DataReader("DFII5", "fred", start, end) + assert pd.isnull(df.ix['2010-01-01']) + + @network + def test_fred_parts(self): + start = datetime(2010, 1, 1) + end = datetime(2013, 01, 27) + df = web.get_data_fred("CPIAUCSL", start, end) + self.assertEqual(df.ix['2010-05-01'], 217.23) + + t = df.CPIAUCSL.values + assert np.issubdtype(t.dtype, np.floating) + self.assertEqual(t.shape, (37,)) + + @network + def test_fred_part2(self): + expected = [[576.7], + [962.9], + [684.7], + [848.3], + [933.3]] + result = web.get_data_fred("A09024USA144NNBR", start="1915").ix[:5] + assert_array_equal(result.values, np.array(expected)) + + @network + def test_invalid_series(self): + name = "NOT A REAL SERIES" + self.assertRaises(Exception, web.get_data_fred, name) + + if __name__ == '__main__': nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False) diff --git a/pandas/io/tests/test_data_reader.py b/pandas/io/tests/test_data_reader.py deleted file mode 100644 index 129e35921335c..0000000000000 --- a/pandas/io/tests/test_data_reader.py +++ /dev/null @@ -1,30 +0,0 @@ -import unittest - -from pandas.core.generic import PandasObject -from pandas.io.data import DataReader -from pandas.util.testing import network - - -class TestDataReader(unittest.TestCase): - @network - def test_read_yahoo(self): - gs = DataReader("GS", "yahoo") - assert isinstance(gs, PandasObject) - - @network - def test_read_google(self): - gs = DataReader("GS", "google") - assert isinstance(gs, PandasObject) - - @network - def test_read_fred(self): - vix = DataReader("VIXCLS", "fred") - assert isinstance(vix, PandasObject) - - @network - def test_read_famafrench(self): - for name in ("F-F_Research_Data_Factors", - "F-F_Research_Data_Factors_weekly", "6_Portfolios_2x3", - "F-F_ST_Reversal_Factor"): - ff = DataReader(name, "famafrench") - assert isinstance(ff, dict) diff --git a/pandas/io/tests/test_fred.py b/pandas/io/tests/test_fred.py deleted file mode 100644 index e06f8f91e82a7..0000000000000 --- a/pandas/io/tests/test_fred.py +++ /dev/null @@ -1,65 +0,0 @@ -import unittest -import nose -from datetime import datetime - -import pandas as pd -import numpy as np -import pandas.io.data as web -from pandas.util.testing import network -from numpy.testing import assert_array_equal - - -class TestFred(unittest.TestCase): - @network - def test_fred(self): - """ - Throws an exception when DataReader can't get a 200 response from - FRED. - """ - start = datetime(2010, 1, 1) - end = datetime(2013, 01, 27) - - self.assertEquals( - web.DataReader("GDP", "fred", start, end)['GDP'].tail(1), - 15984.1) - - self.assertRaises(Exception, web.DataReader, "NON EXISTENT SERIES", - 'fred', start, end) - - @network - def test_fred_nan(self): - start = datetime(2010, 1, 1) - end = datetime(2013, 01, 27) - df = web.DataReader("DFII5", "fred", start, end) - assert pd.isnull(df.ix['2010-01-01']) - - @network - def test_fred_parts(self): - start = datetime(2010, 1, 1) - end = datetime(2013, 01, 27) - df = web.get_data_fred("CPIAUCSL", start, end) - self.assertEqual(df.ix['2010-05-01'], 217.23) - - t = df.CPIAUCSL.values - assert np.issubdtype(t.dtype, np.floating) - self.assertEqual(t.shape, (37,)) - - @network - def test_fred_part2(self): - expected = [[576.7], - [962.9], - [684.7], - [848.3], - [933.3]] - result = web.get_data_fred("A09024USA144NNBR", start="1915").ix[:5] - assert_array_equal(result.values, np.array(expected)) - - @network - def test_invalid_series(self): - name = "NOT A REAL SERIES" - self.assertRaises(Exception, web.get_data_fred, name) - - -if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) diff --git a/pandas/io/tests/test_google.py b/pandas/io/tests/test_google.py deleted file mode 100644 index 65ae20fb5b505..0000000000000 --- a/pandas/io/tests/test_google.py +++ /dev/null @@ -1,73 +0,0 @@ -import unittest -import nose -from datetime import datetime - -import numpy as np -import pandas as pd -import pandas.io.data as web -from pandas.util.testing import network, with_connectivity_check - - -class TestGoogle(unittest.TestCase): - - @network - def test_google(self): - # asserts that google is minimally working and that it throws - # an exception when DataReader can't get a 200 response from - # google - start = datetime(2010, 1, 1) - end = datetime(2013, 01, 27) - - self.assertEquals( - web.DataReader("F", 'google', start, end)['Close'][-1], - 13.68) - - self.assertRaises(Exception, web.DataReader, "NON EXISTENT TICKER", - 'google', start, end) - - @network - def test_get_quote_fails(self): - self.assertRaises(NotImplementedError, web.get_quote_google, - pd.Series(['GOOG', 'AAPL', 'GOOG'])) - - @network - def test_get_goog_volume(self): - df = web.get_data_google('GOOG') - self.assertEqual(df.Volume.ix['OCT-08-2010'], 2863473) - - @network - def test_get_multi1(self): - sl = ['AAPL', 'AMZN', 'GOOG'] - pan = web.get_data_google(sl, '2012') - - def testit(): - ts = pan.Close.GOOG.index[pan.Close.AAPL > pan.Close.GOOG] - self.assertEquals(ts[0].dayofyear, 96) - - if (hasattr(pan, 'Close') and hasattr(pan.Close, 'GOOG') and - hasattr(pan.Close, 'AAPL')): - testit() - else: - self.assertRaises(AttributeError, testit) - - @network - def test_get_multi2(self): - pan = web.get_data_google(['GE', 'MSFT', 'INTC'], 'JAN-01-12', - 'JAN-31-12') - result = pan.Close.ix['01-18-12'] - self.assertEqual(len(result), 3) - - # sanity checking - assert np.issubdtype(result.dtype, np.floating) - - expected = np.array([[ 18.99, 28.4 , 25.18], - [ 18.58, 28.31, 25.13], - [ 19.03, 28.16, 25.52], - [ 18.81, 28.82, 25.87]]) - result = pan.Open.ix['Jan-15-12':'Jan-20-12'] - self.assertEqual(np.array(expected).shape, result.shape) - - -if __name__ == '__main__': - nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], - exit=False) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index c871e573719b9..47bde4ecb32a7 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -2,18 +2,20 @@ # pylint: disable-msg=W0402 -from datetime import datetime -from functools import wraps import random import string import sys import tempfile import warnings +import inspect +import os -from contextlib import contextmanager # contextlib is available since 2.5 - +from datetime import datetime +from functools import wraps +from contextlib import contextmanager, closing +from httplib import HTTPException +from urllib2 import urlopen from distutils.version import LooseVersion -import urllib2 from numpy.random import randn import numpy as np @@ -29,6 +31,8 @@ from pandas.tseries.index import DatetimeIndex from pandas.tseries.period import PeriodIndex +from pandas.io.common import urlopen + Index = index.Index MultiIndex = index.MultiIndex Series = series.Series @@ -81,7 +85,6 @@ def set_trace(): #------------------------------------------------------------------------------ # contextmanager to ensure the file cleanup -from contextlib import contextmanager @contextmanager def ensure_clean(filename = None): # if we are not passed a filename, generate a temporary @@ -91,30 +94,23 @@ def ensure_clean(filename = None): try: yield filename finally: - import os try: os.remove(filename) except: pass -def get_data_path(f = None): - """ return the path of a data file, these are relative to the current test dir """ - - if f is None: - f = '' - import inspect, os +def get_data_path(f=''): + """Return the path of a data file, these are relative to the current test + directory. + """ # get our callers file - frame,filename,line_number,function_name,lines,index = \ - inspect.getouterframes(inspect.currentframe())[1] - + _, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1] base_dir = os.path.abspath(os.path.dirname(filename)) - return os.path.join(base_dir, 'data/%s' % f) + return os.path.join(base_dir, 'data', f) #------------------------------------------------------------------------------ # Comparators - - def equalContents(arr1, arr2): """Checks if the set of unique elements of arr1 and arr2 are equivalent. """ @@ -692,9 +688,11 @@ def dec(f): return wrapper +_network_error_classes = IOError, HTTPException + @optional_args def network(t, raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, - error_classes=(IOError,)): + error_classes=_network_error_classes): """ Label a test as requiring network connection and skip test if it encounters a ``URLError``. @@ -727,6 +725,7 @@ def network(t, raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, >>> from pandas.util.testing import network >>> import urllib2 + >>> import nose >>> @network ... def test_network(): ... urllib2.urlopen("rabbit://bonanza.com") @@ -770,12 +769,25 @@ def network_wrapper(*args, **kwargs): return network_wrapper -def can_connect(url): - """tries to connect to the given url. True if succeeds, False if IOError raised""" +def can_connect(url, error_classes=_network_error_classes): + """Try to connect to the given url. True if succeeds, False if IOError + raised + + Parameters + ---------- + url : basestring + The URL to try to connect to + + Returns + ------- + connectable : bool + Return True if no IOError (unable to connect) or URLError (bad url) was + raised + """ try: - with closing(urllib2.urlopen(url)) as resp: + with urlopen(url): pass - except IOError: + except error_classes: return False else: return True @@ -783,8 +795,9 @@ def can_connect(url): @optional_args def with_connectivity_check(t, url="http://www.google.com", - raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, check_before_test=False, - error_classes=IOError): + raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT, + check_before_test=False, + error_classes=_network_error_classes): """ Label a test as requiring network connection and, if an error is encountered, only raise if it does not find a network connection. @@ -811,7 +824,10 @@ def with_connectivity_check(t, url="http://www.google.com", error classes to ignore. If not in ``error_classes``, raises the error. defaults to IOError. Be careful about changing the error classes here. - NOTE: ``raise_on_error`` supercedes ``check_before_test`` + Notes + ----- + * ``raise_on_error`` supercedes ``check_before_test`` + Returns ------- t : callable @@ -846,12 +862,12 @@ def with_connectivity_check(t, url="http://www.google.com", @wraps(t) def wrapper(*args, **kwargs): if check_before_test and not raise_on_error: - if not can_connect(url): + if not can_connect(url, error_classes): raise SkipTest try: return t(*args, **kwargs) except error_classes as e: - if raise_on_error or can_connect(url): + if raise_on_error or can_connect(url, error_classes): raise else: raise SkipTest("Skipping test due to lack of connectivity"
I feel like a broken record
https://api.github.com/repos/pandas-dev/pandas/pulls/4085
2013-06-29T17:52:20Z
2013-07-01T01:29:02Z
2013-07-01T01:29:02Z
2014-07-16T08:16:40Z