title
stringlengths
1
185
diff
stringlengths
0
32.2M
body
stringlengths
0
123k
url
stringlengths
57
58
created_at
stringlengths
20
20
closed_at
stringlengths
20
20
merged_at
stringlengths
20
20
updated_at
stringlengths
20
20
PERF: sort_values speedup for multiple columns with random numeric values
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index 4cecf12a27042..4bdf033d6d5c6 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -2,7 +2,7 @@ import numpy as np import pandas.util.testing as tm from pandas import (DataFrame, Series, MultiIndex, date_range, period_range, - isnull, NaT) + isnull, NaT, timedelta_range) from .pandas_vb_common import setup # noqa @@ -440,6 +440,55 @@ def time_frame_xs(self, axis): self.df.xs(self.N / 2, axis=axis) +class SortValuesMultipleColumns(object): + goal_time = 0.1 + + param_names = ['columns'] + # params = generate_column_combinations( + # column_names=['less_repeated_strings', + # 'repeated_category', 'less_repeated_category', + # 'float', 'int_sorted', 'int_random', + # 'date', 'timedelta'], r=[2, 5], num=20) + params = ['repeated_strings|int_same_cardinality_as_repeated_strings', + 'int_same_cardinality_as_repeated_strings|' + 'int_same_cardinality_as_repeated_strings_copy'] + + def setup(self, columns): + N = 1000000 + + self.df = DataFrame( + {'repeated_strings': Series(tm.makeStringIndex(100).take( + np.random.randint(0, 100, size=N))), + 'less_repeated_strings': Series( + tm.makeStringIndex(10000).take( + np.random.randint(0, 10000, size=N))), + 'float': np.random.randn(N), + 'int_sorted': np.arange(N), + 'int_random': np.random.randint(0, 10000000, N), + 'date': date_range('20110101', freq='s', periods=N), + 'timedelta': timedelta_range('1 day', freq='s', periods=N), + }) + self.df['repeated_category'] = \ + self.df['repeated_strings'].astype('category') + self.df['less_repeated_category'] = \ + self.df['less_repeated_strings'].astype('category') + self.df['int_same_cardinality_as_repeated_strings'] = \ + self.df['repeated_strings'].rank(method='dense') + self.df['int_same_cardinality_as_repeated_strings_copy'] = \ + self.df['repeated_strings'].rank(method='dense') + self.df['int_same_cardinality_as_less_repeated_strings'] = \ + self.df['less_repeated_strings'].rank(method='dense') + assert self.df['repeated_strings'].nunique() == \ + self.df['int_same_cardinality_as_repeated_strings'].nunique() + assert self.df['less_repeated_strings'].nunique() == \ + self.df['int_same_cardinality_as_less_repeated_strings']\ + .nunique() + + def time_frame_sort_values_by_multiple_columns(self, columns): + columns_list = columns.split('|') + DataFrame(self.df[columns_list]).sort_values(by=columns_list) + + class SortValues(object): goal_time = 0.2 @@ -447,11 +496,14 @@ class SortValues(object): param_names = ['ascending'] def setup(self, ascending): - self.df = DataFrame(np.random.randn(1000000, 2), columns=list('AB')) + self.df = DataFrame(np.random.randn(1000000, 5), columns=list('ABCDE')) def time_frame_sort_values(self, ascending): self.df.sort_values(by='A', ascending=ascending) + def time_frame_sort_values_two_columns(self, ascending): + self.df.sort_values(by=['A', 'B', 'C', 'D', 'E'], ascending=ascending) + class SortIndexByColumns(object): diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 4d907180da00a..e699552aa9674 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -3637,16 +3637,45 @@ def sort_values(self, by, axis=0, ascending=True, inplace=False, raise ValueError('Length of ascending (%d) != length of by (%d)' % (len(ascending), len(by))) if len(by) > 1: - from pandas.core.sorting import lexsort_indexer + if any([is_object_dtype(self._get_label_or_level_values( + x, axis=axis, stacklevel=stacklevel)) for x in by]): + from pandas.core.sorting import lexsort_indexer + + keys = [] + for x in by: + k = self._get_label_or_level_values(x, axis=axis, + stacklevel=stacklevel) + keys.append(k) + indexer = lexsort_indexer(keys, orders=ascending, + na_position=na_position) + indexer = _ensure_platform_int(indexer) + + new_data = self._data.take(indexer, + axis=self._get_block_manager_axis( + axis), + verify=False) + else: + if not is_list_like(ascending): + ascending = [ascending] * len(by) + ascending = ascending[::-1] + new_data = self + from pandas.core.sorting import nargsort + kind = 'mergesort' + + for i, by_step in enumerate(by[::-1]): + k = self._get_label_or_level_values( + by_step, axis=axis, stacklevel=stacklevel) + + indexer = nargsort(k, kind=kind, ascending=ascending[i], + na_position=na_position) + + new_data = new_data._data.take( + indexer, + axis=self._get_block_manager_axis(axis), + convert=False, verify=False) + + new_data = self._constructor(new_data).__finalize__(self) - keys = [] - for x in by: - k = self._get_label_or_level_values(x, axis=axis, - stacklevel=stacklevel) - keys.append(k) - indexer = lexsort_indexer(keys, orders=ascending, - na_position=na_position) - indexer = _ensure_platform_int(indexer) else: from pandas.core.sorting import nargsort @@ -3660,9 +3689,9 @@ def sort_values(self, by, axis=0, ascending=True, inplace=False, indexer = nargsort(k, kind=kind, ascending=ascending, na_position=na_position) - new_data = self._data.take(indexer, - axis=self._get_block_manager_axis(axis), - verify=False) + new_data = self._data.take(indexer, + axis=self._get_block_manager_axis(axis), + verify=False) if inplace: return self._update_inplace(new_data)
Continue work on #17141. Speedup achieved for sorting by multiple columns: lexsort over multiple columns replaced with mergesort of columns in reverse order than specified in "by" argument of DataFrame.sort_values(). Issues: - only works for numerical data - only random enough data achieve sorting speedup (no factorization) - ASV of one-column sort is worse, why? - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19237
2018-01-14T08:03:40Z
2018-03-25T11:38:27Z
null
2018-03-25T11:38:27Z
CI: Check ASV for failed benchmarks
diff --git a/.travis.yml b/.travis.yml index bd5cac8955c8d..4cbe7f86bd2fa 100644 --- a/.travis.yml +++ b/.travis.yml @@ -73,6 +73,10 @@ matrix: env: - JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" # In allow_failures + - dist: trusty + env: + - JOB="3.6_ASV" ASV=true + # In allow_failures - dist: trusty env: - JOB="3.6_DOC" DOC=true @@ -93,6 +97,9 @@ matrix: - dist: trusty env: - JOB="3.6_NUMPY_DEV" TEST_ARGS="--skip-slow --skip-network" PANDAS_TESTING_MODE="deprecate" + - dist: trusty + env: + - JOB="3.6_ASV" ASV=true - dist: trusty env: - JOB="3.6_DOC" DOC=true @@ -128,6 +135,7 @@ script: - ci/script_single.sh - ci/script_multi.sh - ci/lint.sh + - ci/asv.sh - echo "checking imports" - source activate pandas && python ci/check_imports.py - echo "script done" diff --git a/asv_bench/benchmarks/algorithms.py b/asv_bench/benchmarks/algorithms.py index 45d62163ae80b..cccd38ef11251 100644 --- a/asv_bench/benchmarks/algorithms.py +++ b/asv_bench/benchmarks/algorithms.py @@ -1,3 +1,4 @@ +import warnings from importlib import import_module import numpy as np @@ -83,7 +84,8 @@ def setup(self): self.all = self.uniques.repeat(10) def time_match_string(self): - pd.match(self.all, self.uniques) + with warnings.catch_warnings(record=True): + pd.match(self.all, self.uniques) class Hashing(object): diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py index 1613ca1b97f4b..7743921003353 100644 --- a/asv_bench/benchmarks/categoricals.py +++ b/asv_bench/benchmarks/categoricals.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np import pandas as pd import pandas.util.testing as tm @@ -119,11 +121,15 @@ def setup(self): self.s_str = pd.Series(tm.makeCategoricalIndex(N, ncats)).astype(str) self.s_str_cat = self.s_str.astype('category') - self.s_str_cat_ordered = self.s_str.astype('category', ordered=True) + with warnings.catch_warnings(record=True): + self.s_str_cat_ordered = self.s_str.astype('category', + ordered=True) self.s_int = pd.Series(np.random.randint(0, ncats, size=N)) self.s_int_cat = self.s_int.astype('category') - self.s_int_cat_ordered = self.s_int.astype('category', ordered=True) + with warnings.catch_warnings(record=True): + self.s_int_cat_ordered = self.s_int.astype('category', + ordered=True) def time_rank_string(self): self.s_str.rank() diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index 4cecf12a27042..4ff71c706cd34 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -1,4 +1,6 @@ import string +import warnings + import numpy as np import pandas.util.testing as tm from pandas import (DataFrame, Series, MultiIndex, date_range, period_range, @@ -15,7 +17,8 @@ def setup(self): self.df = DataFrame(np.random.randn(10000, 25)) self.df['foo'] = 'bar' self.df['bar'] = 'baz' - self.df = self.df.consolidate() + with warnings.catch_warnings(record=True): + self.df = self.df.consolidate() def time_frame_get_numeric_data(self): self.df._get_numeric_data() @@ -141,8 +144,8 @@ class Repr(object): def setup(self): nrows = 10000 data = np.random.randn(nrows, 10) - idx = MultiIndex.from_arrays(np.tile(np.random.randn(3, nrows / 100), - 100)) + arrays = np.tile(np.random.randn(3, int(nrows / 100)), 100) + idx = MultiIndex.from_arrays(arrays) self.df3 = DataFrame(data, index=idx) self.df4 = DataFrame(data, index=np.random.randn(nrows)) self.df_tall = DataFrame(np.random.randn(nrows, 10)) diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py index 7d63d78084270..21c1ccf46e1c4 100644 --- a/asv_bench/benchmarks/gil.py +++ b/asv_bench/benchmarks/gil.py @@ -1,9 +1,13 @@ import numpy as np import pandas.util.testing as tm -from pandas import (DataFrame, Series, rolling_median, rolling_mean, - rolling_min, rolling_max, rolling_var, rolling_skew, - rolling_kurt, rolling_std, read_csv, factorize, date_range) +from pandas import DataFrame, Series, read_csv, factorize, date_range from pandas.core.algorithms import take_1d +try: + from pandas import (rolling_median, rolling_mean, rolling_min, rolling_max, + rolling_var, rolling_skew, rolling_kurt, rolling_std) + have_rolling_methods = True +except ImportError: + have_rolling_methods = False try: from pandas._libs import algos except ImportError: @@ -171,8 +175,7 @@ def run(period): class ParallelRolling(object): goal_time = 0.2 - params = ['rolling_median', 'rolling_mean', 'rolling_min', 'rolling_max', - 'rolling_var', 'rolling_skew', 'rolling_kurt', 'rolling_std'] + params = ['median', 'mean', 'min', 'max', 'var', 'skew', 'kurt', 'std'] param_names = ['method'] def setup(self, method): @@ -181,34 +184,28 @@ def setup(self, method): win = 100 arr = np.random.rand(100000) if hasattr(DataFrame, 'rolling'): - rolling = {'rolling_median': 'median', - 'rolling_mean': 'mean', - 'rolling_min': 'min', - 'rolling_max': 'max', - 'rolling_var': 'var', - 'rolling_skew': 'skew', - 'rolling_kurt': 'kurt', - 'rolling_std': 'std'} df = DataFrame(arr).rolling(win) @test_parallel(num_threads=2) def parallel_rolling(): - getattr(df, rolling[method])() + getattr(df, method)() self.parallel_rolling = parallel_rolling - else: - rolling = {'rolling_median': rolling_median, - 'rolling_mean': rolling_mean, - 'rolling_min': rolling_min, - 'rolling_max': rolling_max, - 'rolling_var': rolling_var, - 'rolling_skew': rolling_skew, - 'rolling_kurt': rolling_kurt, - 'rolling_std': rolling_std} + elif have_rolling_methods: + rolling = {'median': rolling_median, + 'mean': rolling_mean, + 'min': rolling_min, + 'max': rolling_max, + 'var': rolling_var, + 'skew': rolling_skew, + 'kurt': rolling_kurt, + 'std': rolling_std} @test_parallel(num_threads=2) def parallel_rolling(): rolling[method](arr, win) self.parallel_rolling = parallel_rolling + else: + raise NotImplementedError def time_rolling(self, method): self.parallel_rolling() diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 4dfd215e6dc3a..8aa67d8bc6a6a 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -1,3 +1,4 @@ +import warnings from string import ascii_letters from itertools import product from functools import partial @@ -340,7 +341,8 @@ def time_dt_size(self): self.df.groupby(['dates']).size() def time_dt_timegrouper_size(self): - self.df.groupby(TimeGrouper(key='dates', freq='M')).size() + with warnings.catch_warnings(record=True): + self.df.groupby(TimeGrouper(key='dates', freq='M')).size() def time_category_size(self): self.draws.groupby(self.cats).size() @@ -467,7 +469,7 @@ class SumMultiLevel(object): def setup(self): N = 50 - self.df = DataFrame({'A': range(N) * 2, + self.df = DataFrame({'A': list(range(N)) * 2, 'B': range(N * 2), 'C': 1}).set_index(['A', 'B']) diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index b35f00db2b054..77e013e1e4fb0 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np import pandas.util.testing as tm from pandas import (Series, DataFrame, MultiIndex, Int64Index, Float64Index, @@ -91,7 +93,8 @@ def time_getitem_pos_slice(self, index): self.s[:80000] def time_get_value(self, index): - self.s.get_value(self.lbl) + with warnings.catch_warnings(record=True): + self.s.get_value(self.lbl) def time_getitem_scalar(self, index): self.s[self.lbl] @@ -112,7 +115,8 @@ def setup(self): self.bool_obj_indexer = self.bool_indexer.astype(object) def time_get_value(self): - self.df.get_value(self.idx_scalar, self.col_scalar) + with warnings.catch_warnings(record=True): + self.df.get_value(self.idx_scalar, self.col_scalar) def time_ix(self): self.df.ix[self.idx_scalar, self.col_scalar] @@ -231,11 +235,13 @@ class PanelIndexing(object): goal_time = 0.2 def setup(self): - self.p = Panel(np.random.randn(100, 100, 100)) - self.inds = range(0, 100, 10) + with warnings.catch_warnings(record=True): + self.p = Panel(np.random.randn(100, 100, 100)) + self.inds = range(0, 100, 10) def time_subset(self): - self.p.ix[(self.inds, self.inds, self.inds)] + with warnings.catch_warnings(record=True): + self.p.ix[(self.inds, self.inds, self.inds)] class MethodLookup(object): @@ -295,7 +301,8 @@ def setup(self): def time_insert(self): np.random.seed(1234) for i in range(100): - self.df.insert(0, i, np.random.randn(self.N)) + self.df.insert(0, i, np.random.randn(self.N), + allow_duplicates=True) def time_assign_with_setitem(self): np.random.seed(1234) diff --git a/asv_bench/benchmarks/io/hdf.py b/asv_bench/benchmarks/io/hdf.py index 5c0e9586c1cb5..4b6e1d69af92d 100644 --- a/asv_bench/benchmarks/io/hdf.py +++ b/asv_bench/benchmarks/io/hdf.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np from pandas import DataFrame, Panel, date_range, HDFStore, read_hdf import pandas.util.testing as tm @@ -105,22 +107,25 @@ class HDFStorePanel(BaseIO): def setup(self): self.fname = '__test__.h5' - self.p = Panel(np.random.randn(20, 1000, 25), - items=['Item%03d' % i for i in range(20)], - major_axis=date_range('1/1/2000', periods=1000), - minor_axis=['E%03d' % i for i in range(25)]) - self.store = HDFStore(self.fname) - self.store.append('p1', self.p) + with warnings.catch_warnings(record=True): + self.p = Panel(np.random.randn(20, 1000, 25), + items=['Item%03d' % i for i in range(20)], + major_axis=date_range('1/1/2000', periods=1000), + minor_axis=['E%03d' % i for i in range(25)]) + self.store = HDFStore(self.fname) + self.store.append('p1', self.p) def teardown(self): self.store.close() self.remove(self.fname) def time_read_store_table_panel(self): - self.store.select('p1') + with warnings.catch_warnings(record=True): + self.store.select('p1') def time_write_store_table_panel(self): - self.store.append('p2', self.p) + with warnings.catch_warnings(record=True): + self.store.append('p2', self.p) class HDF(BaseIO): diff --git a/asv_bench/benchmarks/join_merge.py b/asv_bench/benchmarks/join_merge.py index 5b40a29d54683..de0a3b33da147 100644 --- a/asv_bench/benchmarks/join_merge.py +++ b/asv_bench/benchmarks/join_merge.py @@ -1,3 +1,4 @@ +import warnings import string import numpy as np @@ -26,7 +27,8 @@ def setup(self): self.mdf1['obj2'] = 'bar' self.mdf1['int1'] = 5 try: - self.mdf1.consolidate(inplace=True) + with warnings.catch_warnings(record=True): + self.mdf1.consolidate(inplace=True) except: pass self.mdf2 = self.mdf1.copy() @@ -75,16 +77,23 @@ class ConcatPanels(object): param_names = ['axis', 'ignore_index'] def setup(self, axis, ignore_index): - panel_c = Panel(np.zeros((10000, 200, 2), dtype=np.float32, order='C')) - self.panels_c = [panel_c] * 20 - panel_f = Panel(np.zeros((10000, 200, 2), dtype=np.float32, order='F')) - self.panels_f = [panel_f] * 20 + with warnings.catch_warnings(record=True): + panel_c = Panel(np.zeros((10000, 200, 2), + dtype=np.float32, + order='C')) + self.panels_c = [panel_c] * 20 + panel_f = Panel(np.zeros((10000, 200, 2), + dtype=np.float32, + order='F')) + self.panels_f = [panel_f] * 20 def time_c_ordered(self, axis, ignore_index): - concat(self.panels_c, axis=axis, ignore_index=ignore_index) + with warnings.catch_warnings(record=True): + concat(self.panels_c, axis=axis, ignore_index=ignore_index) def time_f_ordered(self, axis, ignore_index): - concat(self.panels_f, axis=axis, ignore_index=ignore_index) + with warnings.catch_warnings(record=True): + concat(self.panels_f, axis=axis, ignore_index=ignore_index) class ConcatDataFrames(object): diff --git a/asv_bench/benchmarks/offset.py b/asv_bench/benchmarks/offset.py index 034e861e7fc01..e161b887ee86f 100644 --- a/asv_bench/benchmarks/offset.py +++ b/asv_bench/benchmarks/offset.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +import warnings from datetime import datetime import numpy as np @@ -76,7 +77,8 @@ def setup(self, offset): self.data = pd.Series(rng) def time_add_offset(self, offset): - self.data + offset + with warnings.catch_warnings(record=True): + self.data + offset class OffsetDatetimeIndexArithmetic(object): @@ -90,7 +92,8 @@ def setup(self, offset): self.data = pd.date_range(start='1/1/2000', periods=N, freq='T') def time_add_offset(self, offset): - self.data + offset + with warnings.catch_warnings(record=True): + self.data + offset class OffestDatetimeArithmetic(object): diff --git a/asv_bench/benchmarks/panel_ctor.py b/asv_bench/benchmarks/panel_ctor.py index 456fe959c5aa3..ce946c76ed199 100644 --- a/asv_bench/benchmarks/panel_ctor.py +++ b/asv_bench/benchmarks/panel_ctor.py @@ -1,3 +1,4 @@ +import warnings from datetime import datetime, timedelta from pandas import DataFrame, DatetimeIndex, date_range @@ -19,7 +20,8 @@ def setup(self): self.data_frames[x] = df def time_from_dict(self): - Panel.from_dict(self.data_frames) + with warnings.catch_warnings(record=True): + Panel.from_dict(self.data_frames) class SameIndexes(object): @@ -34,7 +36,8 @@ def setup(self): self.data_frames = dict(enumerate([df] * 100)) def time_from_dict(self): - Panel.from_dict(self.data_frames) + with warnings.catch_warnings(record=True): + Panel.from_dict(self.data_frames) class TwoIndexes(object): @@ -53,4 +56,5 @@ def setup(self): self.data_frames = dict(enumerate(dfs)) def time_from_dict(self): - Panel.from_dict(self.data_frames) + with warnings.catch_warnings(record=True): + Panel.from_dict(self.data_frames) diff --git a/asv_bench/benchmarks/panel_methods.py b/asv_bench/benchmarks/panel_methods.py index 9ee1949b311db..a5b1a92e9cf67 100644 --- a/asv_bench/benchmarks/panel_methods.py +++ b/asv_bench/benchmarks/panel_methods.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np from .pandas_vb_common import Panel, setup # noqa @@ -10,10 +12,13 @@ class PanelMethods(object): param_names = ['axis'] def setup(self, axis): - self.panel = Panel(np.random.randn(100, 1000, 100)) + with warnings.catch_warnings(record=True): + self.panel = Panel(np.random.randn(100, 1000, 100)) def time_pct_change(self, axis): - self.panel.pct_change(1, axis=axis) + with warnings.catch_warnings(record=True): + self.panel.pct_change(1, axis=axis) def time_shift(self, axis): - self.panel.shift(1, axis=axis) + with warnings.catch_warnings(record=True): + self.panel.shift(1, axis=axis) diff --git a/asv_bench/benchmarks/reindex.py b/asv_bench/benchmarks/reindex.py index 69a1a604b1ccc..413427a16f40b 100644 --- a/asv_bench/benchmarks/reindex.py +++ b/asv_bench/benchmarks/reindex.py @@ -167,10 +167,6 @@ def setup(self): col_array2 = col_array.copy() col_array2[:, :10000] = np.nan self.col_array_list = list(col_array) - self.col_array_list2 = list(col_array2) def time_lib_fast_zip(self): lib.fast_zip(self.col_array_list) - - def time_lib_fast_zip_fillna(self): - lib.fast_zip_fillna(self.col_array_list2) diff --git a/asv_bench/benchmarks/reshape.py b/asv_bench/benchmarks/reshape.py index bd3b580d9d130..9044b080c45f9 100644 --- a/asv_bench/benchmarks/reshape.py +++ b/asv_bench/benchmarks/reshape.py @@ -104,9 +104,9 @@ def setup(self): self.letters = list('ABCD') yrvars = [l + str(num) for l, num in product(self.letters, range(1, nyrs + 1))] - + columns = [str(i) for i in range(nidvars)] + yrvars self.df = DataFrame(np.random.randn(N, nidvars + len(yrvars)), - columns=list(range(nidvars)) + yrvars) + columns=columns) self.df['id'] = self.df.index def time_wide_to_long_big(self): diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py index 4435327e1eb38..b203c8b0fa5c9 100644 --- a/asv_bench/benchmarks/strings.py +++ b/asv_bench/benchmarks/strings.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np from pandas import Series import pandas.util.testing as tm @@ -23,7 +25,8 @@ def time_endswith(self): self.s.str.endswith('A') def time_extract(self): - self.s.str.extract('(\\w*)A(\\w*)') + with warnings.catch_warnings(record=True): + self.s.str.extract('(\\w*)A(\\w*)') def time_findall(self): self.s.str.findall('[A-Z]+') diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index ea2f077f980d0..e1a6bc7a68e9d 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -1,3 +1,4 @@ +import warnings from datetime import timedelta import numpy as np @@ -74,7 +75,8 @@ def setup(self): freq='S')) def time_infer_dst(self): - self.index.tz_localize('US/Eastern', infer_dst=True) + with warnings.catch_warnings(record=True): + self.index.tz_localize('US/Eastern', infer_dst=True) class ResetIndex(object): @@ -365,7 +367,7 @@ class ToDatetimeCache(object): def setup(self, cache): N = 10000 - self.unique_numeric_seconds = range(N) + self.unique_numeric_seconds = list(range(N)) self.dup_numeric_seconds = [1000] * N self.dup_string_dates = ['2000-02-11'] * N self.dup_string_with_tz = ['2000-02-11 15:00:00-0800'] * N diff --git a/ci/asv.sh b/ci/asv.sh new file mode 100755 index 0000000000000..1e9a8d6380eb5 --- /dev/null +++ b/ci/asv.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +echo "inside $0" + +source activate pandas + +RET=0 + +if [ "$ASV" ]; then + echo "Check for failed asv benchmarks" + + cd asv_bench + + asv machine --yes + + time asv dev | tee failed_asv.txt + + echo "The following asvs benchmarks (if any) failed." + + cat failed_asv.txt | grep "failed" failed_asv.txt + + if [ $? = "0" ]; then + RET=1 + fi + + echo "DONE displaying failed asvs benchmarks." + + rm failed_asv.txt + + echo "Check for failed asv benchmarks DONE" +else + echo "NOT checking for failed asv benchmarks" +fi + +exit $RET diff --git a/ci/requirements-3.6_ASV.build b/ci/requirements-3.6_ASV.build new file mode 100644 index 0000000000000..bc72eed2a0d4e --- /dev/null +++ b/ci/requirements-3.6_ASV.build @@ -0,0 +1,5 @@ +python=3.6* +python-dateutil +pytz +numpy=1.13* +cython diff --git a/ci/requirements-3.6_ASV.run b/ci/requirements-3.6_ASV.run new file mode 100644 index 0000000000000..6c45e3371e9cf --- /dev/null +++ b/ci/requirements-3.6_ASV.run @@ -0,0 +1,25 @@ +ipython +ipykernel +ipywidgets +sphinx=1.5* +nbconvert +nbformat +notebook +matplotlib +seaborn +scipy +lxml +beautifulsoup4 +html5lib +pytables +python-snappy +openpyxl +xlrd +xlwt +xlsxwriter +sqlalchemy +numexpr +bottleneck +statsmodels +xarray +pyqt diff --git a/ci/requirements-3.6_ASV.sh b/ci/requirements-3.6_ASV.sh new file mode 100755 index 0000000000000..8a46f85dbb6bc --- /dev/null +++ b/ci/requirements-3.6_ASV.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +source activate pandas + +echo "[install ASV_BUILD deps]" + +pip install git+https://github.com/spacetelescope/asv diff --git a/ci/script_multi.sh b/ci/script_multi.sh index c1fa756ece965..766e51625fbe6 100755 --- a/ci/script_multi.sh +++ b/ci/script_multi.sh @@ -37,6 +37,9 @@ if [ "$PIP_BUILD_TEST" ] || [ "$CONDA_BUILD_TEST" ]; then elif [ "$DOC" ]; then echo "We are not running pytest as this is a doc-build" +elif [ "$ASV" ]; then + echo "We are not running pytest as this is an asv-build" + elif [ "$COVERAGE" ]; then echo pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas pytest -s -n 2 -m "not single" --cov=pandas --cov-report xml:/tmp/cov-multiple.xml --junitxml=/tmp/multiple.xml --strict $TEST_ARGS pandas diff --git a/ci/script_single.sh b/ci/script_single.sh index 005c648ee025f..153847ab2e8c9 100755 --- a/ci/script_single.sh +++ b/ci/script_single.sh @@ -22,6 +22,9 @@ if [ "$PIP_BUILD_TEST" ] || [ "$CONDA_BUILD_TEST" ]; then elif [ "$DOC" ]; then echo "We are not running pytest as this is a doc-build" +elif [ "$ASV" ]; then + echo "We are not running pytest as this is an asv-build" + elif [ "$COVERAGE" ]; then echo pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas pytest -s -m "single" --strict --cov=pandas --cov-report xml:/tmp/cov-single.xml --junitxml=/tmp/single.xml $TEST_ARGS pandas
closes #19518 xref #19104 Not sure if this should belong in `ci/lint.sh`, but testing the possibility of running the asv benchmarks on ci for failed benchmarks.
https://api.github.com/repos/pandas-dev/pandas/pulls/19236
2018-01-14T07:25:32Z
2018-02-08T01:17:25Z
2018-02-08T01:17:25Z
2018-02-08T07:36:06Z
Doc: Adds example of joining a series to a dataframe.
diff --git a/doc/source/merging.rst b/doc/source/merging.rst index ebade853313ab..6c8061f5cbdfd 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -714,6 +714,31 @@ either the left or right tables, the values in the joined table will be labels=['left', 'right'], vertical=False); plt.close('all'); +To join a Series and a DataFrame, the Series has to be transformed into a DataFrame first: + +.. ipython:: python + + df = pd.DataFrame({"Let": ["A", "B", "C"], "Num": [1, 2, 3]}) + df + + # The series has a multi-index with levels corresponding to columns in the DataFrame we want to merge with + ser = pd.Series( + ['a', 'b', 'c', 'd', 'e', 'f'], + index=pd.MultiIndex.from_arrays([["A", "B", "C"]*2, [1, 2, 3, 4, 5, 6]]) + ) + ser + + # Name the row index levels + ser.index.names=['Let','Num'] + ser + + # reset_index turns the multi-level row index into columns, which requires a DataFrame + df2 = ser.reset_index() + type(df2) + + # Now we merge the DataFrames + pd.merge(df, df2, on=['Let','Num']) + Here is another example with duplicate join keys in ``DataFrame``s: .. ipython:: python
- [X] closes: #12550
https://api.github.com/repos/pandas-dev/pandas/pulls/19235
2018-01-14T03:12:59Z
2018-11-03T05:28:27Z
null
2018-11-03T05:28:27Z
Adding missing sphinx dependencies to build the documentation
diff --git a/ci/environment-dev.yaml b/ci/environment-dev.yaml index c72abd0c19516..d11bee97dbe5e 100644 --- a/ci/environment-dev.yaml +++ b/ci/environment-dev.yaml @@ -4,8 +4,11 @@ channels: - conda-forge dependencies: - Cython + - ipython + - matplotlib - NumPy - moto + - nbsphinx - pytest>=3.1 - python-dateutil>=2.5.0 - python=3 diff --git a/ci/requirements_dev.txt b/ci/requirements_dev.txt index 82f8de277c57b..3f589e9ec5c4c 100644 --- a/ci/requirements_dev.txt +++ b/ci/requirements_dev.txt @@ -1,10 +1,13 @@ # This file was autogenerated by scripts/convert_deps.py # Do not modify directly Cython +ipython +matplotlib NumPy moto +nbsphinx pytest>=3.1 python-dateutil>=2.5.0 pytz setuptools>=3.3 -sphinx +sphinx \ No newline at end of file
Seems like the documentation fails to build in a clean environement (with `make.py html`) because sphinx and its sphinx ipython directive have some dependencies that are not in the dev requirements. These dependencies are. * ipython * nbsphinx * matplotlib Next are the crashes: ``` Traceback (most recent call last): File "/home/mgarcia/.anaconda3/envs/pandas_dev/lib/python3.6/site-packages/sphinx/registry.py", line 196, in load_extension mod = __import__(extname, None, None, ['setup']) File "/home/mgarcia/src/forks/pandas/doc/sphinxext/ipython_sphinxext/ipython_directive.py", line 133, in <module> from IPython import Config ModuleNotFoundError: No module named 'IPython' ``` ``` Traceback (most recent call last): File "/home/mgarcia/.anaconda3/envs/pandas_dev/lib/python3.6/site-packages/sphinx/registry.py", line 196, in load_extension mod = __import__(extname, None, None, ['setup']) ModuleNotFoundError: No module named 'nbsphinx' ``` ``` Traceback (most recent call last): File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/sphinx/cmdline.py", line 306, in main app.build(opts.force_all, filenames) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/sphinx/application.py", line 339, in build self.builder.build_update() File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 329, in build_update 'out of date' % len(to_build)) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/sphinx/builders/__init__.py", line 342, in build updated_docnames = set(self.env.update(self.config, self.srcdir, self.doctreedir)) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/sphinx/environment/__init__.py", line 601, in update self._read_serial(docnames, self.app) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/sphinx/environment/__init__.py", line 621, in _read_serial self.read_doc(docname, app) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/sphinx/environment/__init__.py", line 758, in read_doc pub.publish() File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/core.py", line 217, in publish self.settings) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/sphinx/io.py", line 74, in read self.parse() File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/readers/__init__.py", line 78, in parse self.parser.parse(self.input, document) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/parsers/rst/__init__.py", line 191, in parse self.statemachine.run(inputlines, document, inliner=self.inliner) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/parsers/rst/states.py", line 171, in run input_source=document['source']) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/statemachine.py", line 239, in run context, state, transitions) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/statemachine.py", line 460, in check_line return method(match, context, next_state) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/parsers/rst/states.py", line 2328, in explicit_markup self.explicit_list(blank_finish) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/parsers/rst/states.py", line 2358, in explicit_list match_titles=self.state_machine.match_titles) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/parsers/rst/states.py", line 319, in nested_list_parse node=node, match_titles=match_titles) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/parsers/rst/states.py", line 196, in run results = StateMachineWS.run(self, input_lines, input_offset) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/statemachine.py", line 239, in run context, state, transitions) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/statemachine.py", line 460, in check_line return method(match, context, next_state) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/parsers/rst/states.py", line 2631, in explicit_markup nodelist, blank_finish = self.explicit_construct(match) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/parsers/rst/states.py", line 2338, in explicit_construct return method(self, expmatch) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/parsers/rst/states.py", line 2081, in directive directive_class, match, type_name, option_presets) File "/home/mgarcia/.anaconda3/envs/pandas_dev2/lib/python3.6/site-packages/docutils/parsers/rst/states.py", line 2130, in run_directive result = directive_instance.run() File "/home/mgarcia/src/forks/pandas/doc/sphinxext/ipython_sphinxext/ipython_directive.py", line 840, in run rgxin, rgxout, promptin, promptout = self.setup() File "/home/mgarcia/src/forks/pandas/doc/sphinxext/ipython_sphinxext/ipython_directive.py", line 788, in setup import matplotlib ModuleNotFoundError: No module named 'matplotlib' ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19234
2018-01-14T01:07:39Z
2018-01-14T01:40:23Z
null
2018-01-14T01:40:23Z
COMPAT: properly handle objects of datetimelike in astype_nansafe
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 672e60c9dcde5..5155662d2f97d 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -649,10 +649,12 @@ def astype_nansafe(arr, dtype, copy=True): if issubclass(dtype.type, text_type): # in Py3 that's str, in Py2 that's unicode return lib.astype_unicode(arr.ravel()).reshape(arr.shape) + elif issubclass(dtype.type, string_types): return lib.astype_str(arr.ravel()).reshape(arr.shape) + elif is_datetime64_dtype(arr): - if dtype == object: + if is_object_dtype(dtype): return tslib.ints_to_pydatetime(arr.view(np.int64)) elif dtype == np.int64: return arr.view(dtype) @@ -666,10 +668,10 @@ def astype_nansafe(arr, dtype, copy=True): to_dtype=dtype)) elif is_timedelta64_dtype(arr): - if dtype == np.int64: - return arr.view(dtype) - elif dtype == object: + if is_object_dtype(dtype): return tslib.ints_to_pytimedelta(arr.view(np.int64)) + elif dtype == np.int64: + return arr.view(dtype) # in py3, timedelta64[ns] are int64 if ((PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or @@ -696,9 +698,21 @@ def astype_nansafe(arr, dtype, copy=True): raise ValueError('Cannot convert non-finite values (NA or inf) to ' 'integer') - elif is_object_dtype(arr.dtype) and np.issubdtype(dtype.type, np.integer): + elif is_object_dtype(arr): + # work around NumPy brokenness, #1987 - return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape) + if np.issubdtype(dtype.type, np.integer): + return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape) + + # if we have a datetime/timedelta array of objects + # then coerce to a proper dtype and recall astype_nansafe + + elif is_datetime64_dtype(dtype): + from pandas import to_datetime + return astype_nansafe(to_datetime(arr).values, dtype, copy=copy) + elif is_timedelta64_dtype(dtype): + from pandas import to_timedelta + return astype_nansafe(to_timedelta(arr).values, dtype, copy=copy) if dtype.name in ("datetime64", "timedelta64"): msg = ("Passing in '{dtype}' dtype with no frequency is " @@ -709,7 +723,6 @@ def astype_nansafe(arr, dtype, copy=True): dtype = np.dtype(dtype.name + "[ns]") if copy: - return arr.astype(dtype, copy=True) return arr.view(dtype) diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 70eee5984b438..38bdecc9eb88f 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -640,6 +640,22 @@ def test_astype_categoricaldtype_class_raises(self, cls): with tm.assert_raises_regex(TypeError, xpr): df['A'].astype(cls) + @pytest.mark.parametrize("dtype", ["M8", "m8"]) + @pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D']) + def test_astype_from_datetimelike_to_objectt(self, dtype, unit): + # tests astype to object dtype + # gh-19223 / gh-12425 + dtype = "{}[{}]".format(dtype, unit) + arr = np.array([[1, 2, 3]], dtype=dtype) + df = DataFrame(arr) + result = df.astype(object) + assert (result.dtypes == object).all() + + if dtype.startswith('M8'): + assert result.iloc[0, 0] == pd.to_datetime(1, unit=unit) + else: + assert result.iloc[0, 0] == pd.to_timedelta(1, unit=unit) + @pytest.mark.parametrize("arr_dtype", [np.int64, np.float64]) @pytest.mark.parametrize("dtype", ["M8", "m8"]) @pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D'])
re-close #19116
https://api.github.com/repos/pandas-dev/pandas/pulls/19232
2018-01-13T23:07:50Z
2018-01-14T14:10:23Z
2018-01-14T14:10:23Z
2018-01-14T14:10:49Z
ENH: Add IntervalDtype support to IntervalIndex.astype
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a0205a8d64cb7..6fe15133914da 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -202,6 +202,7 @@ Other Enhancements - ``Resampler`` objects now have a functioning :attr:`~pandas.core.resample.Resampler.pipe` method. Previously, calls to ``pipe`` were diverted to the ``mean`` method (:issue:`17905`). - :func:`~pandas.api.types.is_scalar` now returns ``True`` for ``DateOffset`` objects (:issue:`18943`). +- ``IntervalIndex.astype`` now supports conversions between subtypes when passed an ``IntervalDtype`` (:issue:`19197`) .. _whatsnew_0230.api_breaking: diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 2ec35889d6a7a..1eb87aa99fd1e 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -710,7 +710,8 @@ def __eq__(self, other): # None should match any subtype return True else: - return self.subtype == other.subtype + from pandas.core.dtypes.common import is_dtype_equal + return is_dtype_equal(self.subtype, other.subtype) @classmethod def is_dtype(cls, dtype): diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index baf80173d7362..58b1bdb3f55ea 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -20,7 +20,8 @@ is_scalar, is_float, is_number, - is_integer) + is_integer, + pandas_dtype) from pandas.core.indexes.base import ( Index, _ensure_index, default_pprint, _index_shared_docs) @@ -699,8 +700,16 @@ def copy(self, deep=False, name=None): @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): - if is_interval_dtype(dtype): - return self.copy() if copy else self + dtype = pandas_dtype(dtype) + if is_interval_dtype(dtype) and dtype != self.dtype: + try: + new_left = self.left.astype(dtype.subtype) + new_right = self.right.astype(dtype.subtype) + except TypeError: + msg = ('Cannot convert {dtype} to {new_dtype}; subtypes are ' + 'incompatible') + raise TypeError(msg.format(dtype=self.dtype, new_dtype=dtype)) + return self._shallow_copy(new_left, new_right) return super(IntervalIndex, self).astype(dtype, copy=copy) @cache_readonly diff --git a/pandas/tests/dtypes/test_dtypes.py b/pandas/tests/dtypes/test_dtypes.py index 692fb3271cfda..d800a7b92b559 100644 --- a/pandas/tests/dtypes/test_dtypes.py +++ b/pandas/tests/dtypes/test_dtypes.py @@ -534,6 +534,12 @@ def test_equality(self): assert not is_dtype_equal(IntervalDtype('int64'), IntervalDtype('float64')) + # invalid subtype comparisons do not raise when directly compared + dtype1 = IntervalDtype('float64') + dtype2 = IntervalDtype('datetime64[ns, US/Eastern]') + assert dtype1 != dtype2 + assert dtype2 != dtype1 + @pytest.mark.parametrize('subtype', [ None, 'interval', 'Interval', 'int64', 'uint64', 'float64', 'complex128', 'datetime64', 'timedelta64', PeriodDtype('Q')]) diff --git a/pandas/tests/indexes/interval/test_astype.py b/pandas/tests/indexes/interval/test_astype.py new file mode 100644 index 0000000000000..b3a4bfa878c3f --- /dev/null +++ b/pandas/tests/indexes/interval/test_astype.py @@ -0,0 +1,209 @@ +from __future__ import division + +import pytest +import numpy as np +from pandas import ( + Index, + IntervalIndex, + interval_range, + CategoricalIndex, + Timestamp, + Timedelta, + NaT) +from pandas.core.dtypes.dtypes import CategoricalDtype, IntervalDtype +import pandas.util.testing as tm + + +class Base(object): + """Tests common to IntervalIndex with any subtype""" + + def test_astype_idempotent(self, index): + result = index.astype('interval') + tm.assert_index_equal(result, index) + + result = index.astype(index.dtype) + tm.assert_index_equal(result, index) + + def test_astype_object(self, index): + result = index.astype(object) + expected = Index(index.values, dtype='object') + tm.assert_index_equal(result, expected) + assert not result.equals(index) + + def test_astype_category(self, index): + result = index.astype('category') + expected = CategoricalIndex(index.values) + tm.assert_index_equal(result, expected) + + result = index.astype(CategoricalDtype()) + tm.assert_index_equal(result, expected) + + # non-default params + categories = index.dropna().unique().values[:-1] + dtype = CategoricalDtype(categories=categories, ordered=True) + result = index.astype(dtype) + expected = CategoricalIndex( + index.values, categories=categories, ordered=True) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize('dtype', [ + 'int64', 'uint64', 'float64', 'complex128', 'period[M]', + 'timedelta64', 'timedelta64[ns]', 'datetime64', 'datetime64[ns]', + 'datetime64[ns, US/Eastern]']) + def test_astype_cannot_cast(self, index, dtype): + msg = 'Cannot cast IntervalIndex to dtype' + with tm.assert_raises_regex(TypeError, msg): + index.astype(dtype) + + def test_astype_invalid_dtype(self, index): + msg = 'data type "fake_dtype" not understood' + with tm.assert_raises_regex(TypeError, msg): + index.astype('fake_dtype') + + +class TestIntSubtype(Base): + """Tests specific to IntervalIndex with integer-like subtype""" + + indexes = [ + IntervalIndex.from_breaks(np.arange(-10, 11, dtype='int64')), + IntervalIndex.from_breaks( + np.arange(100, dtype='uint64'), closed='left'), + ] + + @pytest.fixture(params=indexes) + def index(self, request): + return request.param + + @pytest.mark.parametrize('subtype', [ + 'float64', 'datetime64[ns]', 'timedelta64[ns]']) + def test_subtype_conversion(self, index, subtype): + dtype = IntervalDtype(subtype) + result = index.astype(dtype) + expected = IntervalIndex.from_arrays(index.left.astype(subtype), + index.right.astype(subtype), + closed=index.closed) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize('subtype_start, subtype_end', [ + ('int64', 'uint64'), ('uint64', 'int64')]) + def test_subtype_integer(self, subtype_start, subtype_end): + index = IntervalIndex.from_breaks(np.arange(100, dtype=subtype_start)) + dtype = IntervalDtype(subtype_end) + result = index.astype(dtype) + expected = IntervalIndex.from_arrays(index.left.astype(subtype_end), + index.right.astype(subtype_end), + closed=index.closed) + tm.assert_index_equal(result, expected) + + @pytest.mark.xfail(reason='GH 15832') + def test_subtype_integer_errors(self): + # int64 -> uint64 fails with negative values + index = interval_range(-10, 10) + dtype = IntervalDtype('uint64') + with pytest.raises(ValueError): + index.astype(dtype) + + +class TestFloatSubtype(Base): + """Tests specific to IntervalIndex with float subtype""" + + indexes = [ + interval_range(-10.0, 10.0, closed='neither'), + IntervalIndex.from_arrays([-1.5, np.nan, 0., 0., 1.5], + [-0.5, np.nan, 1., 1., 3.], + closed='both'), + ] + + @pytest.fixture(params=indexes) + def index(self, request): + return request.param + + @pytest.mark.parametrize('subtype', ['int64', 'uint64']) + def test_subtype_integer(self, subtype): + index = interval_range(0.0, 10.0) + dtype = IntervalDtype(subtype) + result = index.astype(dtype) + expected = IntervalIndex.from_arrays(index.left.astype(subtype), + index.right.astype(subtype), + closed=index.closed) + tm.assert_index_equal(result, expected) + + # raises with NA + msg = 'Cannot convert NA to integer' + with tm.assert_raises_regex(ValueError, msg): + index.insert(0, np.nan).astype(dtype) + + @pytest.mark.xfail(reason='GH 15832') + def test_subtype_integer_errors(self): + # float64 -> uint64 fails with negative values + index = interval_range(-10.0, 10.0) + dtype = IntervalDtype('uint64') + with pytest.raises(ValueError): + index.astype(dtype) + + # float64 -> integer-like fails with non-integer valued floats + index = interval_range(0.0, 10.0, freq=0.25) + dtype = IntervalDtype('int64') + with pytest.raises(ValueError): + index.astype(dtype) + + dtype = IntervalDtype('uint64') + with pytest.raises(ValueError): + index.astype(dtype) + + @pytest.mark.parametrize('subtype', ['datetime64[ns]', 'timedelta64[ns]']) + def test_subtype_datetimelike(self, index, subtype): + dtype = IntervalDtype(subtype) + msg = 'Cannot convert .* to .*; subtypes are incompatible' + with tm.assert_raises_regex(TypeError, msg): + index.astype(dtype) + + +class TestDatetimelikeSubtype(Base): + """Tests specific to IntervalIndex with datetime-like subtype""" + + indexes = [ + interval_range(Timestamp('2018-01-01'), periods=10, closed='neither'), + interval_range(Timestamp('2018-01-01'), periods=10).insert(2, NaT), + interval_range(Timestamp('2018-01-01', tz='US/Eastern'), periods=10), + interval_range(Timedelta('0 days'), periods=10, closed='both'), + interval_range(Timedelta('0 days'), periods=10).insert(2, NaT), + ] + + @pytest.fixture(params=indexes) + def index(self, request): + return request.param + + @pytest.mark.parametrize('subtype', ['int64', 'uint64']) + def test_subtype_integer(self, index, subtype): + dtype = IntervalDtype(subtype) + result = index.astype(dtype) + expected = IntervalIndex.from_arrays(index.left.astype(subtype), + index.right.astype(subtype), + closed=index.closed) + tm.assert_index_equal(result, expected) + + def test_subtype_float(self, index): + dtype = IntervalDtype('float64') + msg = 'Cannot convert .* to .*; subtypes are incompatible' + with tm.assert_raises_regex(TypeError, msg): + index.astype(dtype) + + def test_subtype_datetimelike(self): + # datetime -> timedelta raises + dtype = IntervalDtype('timedelta64[ns]') + msg = 'Cannot convert .* to .*; subtypes are incompatible' + + index = interval_range(Timestamp('2018-01-01'), periods=10) + with tm.assert_raises_regex(TypeError, msg): + index.astype(dtype) + + index = interval_range(Timestamp('2018-01-01', tz='CET'), periods=10) + with tm.assert_raises_regex(TypeError, msg): + index.astype(dtype) + + # timedelta -> datetime raises + dtype = IntervalDtype('datetime64[ns]') + index = interval_range(Timedelta('0 days'), periods=10) + with tm.assert_raises_regex(TypeError, msg): + index.astype(dtype) diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index b6d49c9e7ba19..9895ee06a22c0 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -415,26 +415,6 @@ def test_equals(self, closed): np.arange(5), closed=other_closed) assert not expected.equals(expected_other_closed) - def test_astype(self, closed): - idx = self.create_index(closed=closed) - result = idx.astype(object) - tm.assert_index_equal(result, Index(idx.values, dtype='object')) - assert not idx.equals(result) - assert idx.equals(IntervalIndex.from_intervals(result)) - - result = idx.astype('interval') - tm.assert_index_equal(result, idx) - assert result.equals(idx) - - @pytest.mark.parametrize('dtype', [ - np.int64, np.float64, 'period[M]', 'timedelta64', 'datetime64[ns]', - 'datetime64[ns, US/Eastern]']) - def test_astype_errors(self, closed, dtype): - idx = self.create_index(closed=closed) - msg = 'Cannot cast IntervalIndex to dtype' - with tm.assert_raises_regex(TypeError, msg): - idx.astype(dtype) - @pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series]) def test_where(self, closed, klass): idx = self.create_index(closed=closed)
- [X] closes #19197 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Summary: - Added support for `IntervalIndex.astype(IntervalDtype(...))` - Allows for subtype conversion, e.g. `interval[int64]` --> `interval[float64]` - Created a new `/interval/test_astype.py` file for the tests - Similar to how these tests were split into a separate file for `/datetimes`, and `/timedeltas` - Created a base class to hold tests that are common to any subtype - Created separate subtypes classes that hold tests for behavior unique to the subtype - Significantly expanded the `astype` related tests for `IntervalIndex` - Didn't parametrize over `closed`, since it doesn't really come into play for `astype`, though each variation of closed is accounted for among the indexes tested - Minor fix to `IntervalDtype` class when an invalid subtype comparison is made - See comment on the relevant code change for example
https://api.github.com/repos/pandas-dev/pandas/pulls/19231
2018-01-13T21:46:52Z
2018-01-14T21:27:25Z
2018-01-14T21:27:25Z
2018-01-15T18:26:12Z
Fix no raise dup index when using drop with axis=0
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 14949267fc37d..450afa01a576e 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -272,6 +272,7 @@ Other API Changes - :class:`IntervalIndex` and ``IntervalDtype`` no longer support categorical, object, and string subtypes (:issue:`19016`) - The default ``Timedelta`` constructor now accepts an ``ISO 8601 Duration`` string as an argument (:issue:`19040`) - ``IntervalDtype`` now returns ``True`` when compared against ``'interval'`` regardless of subtype, and ``IntervalDtype.name`` now returns ``'interval'`` regardless of subtype (:issue:`18980`) +- ``KeyError`` now raises instead of ``ValueError`` in :meth:`~DataFrame.drop`, :meth:`~Panel.drop`, :meth:`~Series.drop`, :meth:`~Index.drop` when dropping a non-existent element in an axis with duplicates (:issue:`19186`) - :func:`Series.to_csv` now accepts a ``compression`` argument that works in the same way as the ``compression`` argument in :func:`DataFrame.to_csv` (:issue:`18958`) .. _whatsnew_0230.deprecations: @@ -415,6 +416,8 @@ Indexing - Bug in :func:`MultiIndex.set_labels` which would cause casting (and potentially clipping) of the new labels if the ``level`` argument is not 0 or a list like [0, 1, ... ] (:issue:`19057`) - Bug in ``str.extractall`` when there were no matches empty :class:`Index` was returned instead of appropriate :class:`MultiIndex` (:issue:`19034`) - Bug in :class:`IntervalIndex` where set operations that returned an empty ``IntervalIndex`` had the wrong dtype (:issue:`19101`) +- Bug in :meth:`~DataFrame.drop`, :meth:`~Panel.drop`, :meth:`~Series.drop`, :meth:`~Index.drop` where no ``KeyError`` is raised when dropping a non-existent element from an axis that contains duplicates (:issue:`19186`) +- I/O ^^^ diff --git a/pandas/core/generic.py b/pandas/core/generic.py index cef1e551f948e..7ffef9c8a86d7 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -2806,6 +2806,11 @@ def drop(self, labels=None, axis=0, index=None, columns=None, level=None, ------- dropped : type of caller + Raises + ------ + KeyError + If none of the labels are found in the selected axis + Examples -------- >>> df = pd.DataFrame(np.arange(12).reshape(3,4), @@ -2909,6 +2914,9 @@ def _drop_axis(self, labels, axis, level=None, errors='raise'): else: indexer = ~axis.isin(labels) + if errors == 'raise' and indexer.all(): + raise KeyError('{} not found in axis'.format(labels)) + slicer = [slice(None)] * self.ndim slicer[self._get_axis_number(axis_name)] = indexer diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index f634d809560ee..10c22884d7acf 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3760,6 +3760,11 @@ def drop(self, labels, errors='raise'): Returns ------- dropped : Index + + Raises + ------ + KeyError + If none of the labels are found in the selected axis """ arr_dtype = 'object' if self.dtype == 'object' else None labels = _index_labels_to_array(labels, dtype=arr_dtype) @@ -3767,8 +3772,8 @@ def drop(self, labels, errors='raise'): mask = indexer == -1 if mask.any(): if errors != 'ignore': - raise ValueError('labels %s not contained in axis' % - labels[mask]) + raise KeyError( + 'labels %s not contained in axis' % labels[mask]) indexer = indexer[~mask] return self.delete(indexer) diff --git a/pandas/core/reshape/pivot.py b/pandas/core/reshape/pivot.py index 77babf718d78c..0e92fc4edce85 100644 --- a/pandas/core/reshape/pivot.py +++ b/pandas/core/reshape/pivot.py @@ -75,7 +75,7 @@ def pivot_table(data, values=None, index=None, columns=None, aggfunc='mean', for key in keys: try: values = values.drop(key) - except (TypeError, ValueError): + except (TypeError, ValueError, KeyError): pass values = list(values) diff --git a/pandas/tests/frame/test_axis_select_reindex.py b/pandas/tests/frame/test_axis_select_reindex.py index 343e235fb741c..28e82f7585850 100644 --- a/pandas/tests/frame/test_axis_select_reindex.py +++ b/pandas/tests/frame/test_axis_select_reindex.py @@ -41,8 +41,8 @@ def test_drop_names(self): assert obj.columns.name == 'second' assert list(df.columns) == ['d', 'e', 'f'] - pytest.raises(ValueError, df.drop, ['g']) - pytest.raises(ValueError, df.drop, ['g'], 1) + pytest.raises(KeyError, df.drop, ['g']) + pytest.raises(KeyError, df.drop, ['g'], 1) # errors = 'ignore' dropped = df.drop(['g'], errors='ignore') @@ -87,10 +87,10 @@ def test_drop(self): assert_frame_equal(simple.drop( [0, 3], axis='index'), simple.loc[[1, 2], :]) - pytest.raises(ValueError, simple.drop, 5) - pytest.raises(ValueError, simple.drop, 'C', 1) - pytest.raises(ValueError, simple.drop, [1, 5]) - pytest.raises(ValueError, simple.drop, ['A', 'C'], 1) + pytest.raises(KeyError, simple.drop, 5) + pytest.raises(KeyError, simple.drop, 'C', 1) + pytest.raises(KeyError, simple.drop, [1, 5]) + pytest.raises(KeyError, simple.drop, ['A', 'C'], 1) # errors = 'ignore' assert_frame_equal(simple.drop(5, errors='ignore'), simple) @@ -1128,3 +1128,26 @@ def test_reindex_multi(self): expected = df.reindex([0, 1]).reindex(columns=['a', 'b']) assert_frame_equal(result, expected) + + data = [[1, 2, 3], [1, 2, 3]] + + @pytest.mark.parametrize('actual', [ + DataFrame(data=data, index=['a', 'a']), + DataFrame(data=data, index=['a', 'b']), + DataFrame(data=data, index=['a', 'b']).set_index([0, 1]), + DataFrame(data=data, index=['a', 'a']).set_index([0, 1]) + ]) + def test_raise_on_drop_duplicate_index(self, actual): + + # issue 19186 + level = 0 if isinstance(actual.index, MultiIndex) else None + with pytest.raises(KeyError): + actual.drop('c', level=level, axis=0) + with pytest.raises(KeyError): + actual.T.drop('c', level=level, axis=1) + expected_no_err = actual.drop('c', axis=0, level=level, + errors='ignore') + assert_frame_equal(expected_no_err, actual) + expected_no_err = actual.T.drop('c', axis=1, level=level, + errors='ignore') + assert_frame_equal(expected_no_err.T, actual) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index c4e8682934369..508c3a73f48c7 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -1396,8 +1396,8 @@ def test_drop(self): expected = self.strIndex[lrange(5) + lrange(10, n)] tm.assert_index_equal(dropped, expected) - pytest.raises(ValueError, self.strIndex.drop, ['foo', 'bar']) - pytest.raises(ValueError, self.strIndex.drop, ['1', 'bar']) + pytest.raises(KeyError, self.strIndex.drop, ['foo', 'bar']) + pytest.raises(KeyError, self.strIndex.drop, ['1', 'bar']) # errors='ignore' mixed = drop.tolist() + ['foo'] @@ -1419,7 +1419,7 @@ def test_drop(self): tm.assert_index_equal(dropped, expected) # errors='ignore' - pytest.raises(ValueError, ser.drop, [3, 4]) + pytest.raises(KeyError, ser.drop, [3, 4]) dropped = ser.drop(4, errors='ignore') expected = Index([1, 2, 3]) @@ -1448,7 +1448,7 @@ def test_drop_tuple(self, values, to_drop): removed = index.drop(to_drop[1]) for drop_me in to_drop[1], [to_drop[1]]: - pytest.raises(ValueError, removed.drop, drop_me) + pytest.raises(KeyError, removed.drop, drop_me) def test_tuple_union_bug(self): import pandas diff --git a/pandas/tests/series/test_indexing.py b/pandas/tests/series/test_indexing.py index 29b4363ec70b9..bafc6d268c266 100644 --- a/pandas/tests/series/test_indexing.py +++ b/pandas/tests/series/test_indexing.py @@ -1838,8 +1838,8 @@ def test_drop(self): # single string/tuple-like s = Series(range(3), index=list('abc')) - pytest.raises(ValueError, s.drop, 'bc') - pytest.raises(ValueError, s.drop, ('a', )) + pytest.raises(KeyError, s.drop, 'bc') + pytest.raises(KeyError, s.drop, ('a', )) # errors='ignore' s = Series(range(3), index=list('abc')) @@ -1861,7 +1861,7 @@ def test_drop(self): # GH 16877 s = Series([2, 3], index=[0, 1]) - with tm.assert_raises_regex(ValueError, 'not contained in axis'): + with tm.assert_raises_regex(KeyError, 'not contained in axis'): s.drop([False, True]) def test_align(self): diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py index 770560134d8d6..1955fc301be9b 100644 --- a/pandas/tests/test_panel.py +++ b/pandas/tests/test_panel.py @@ -2302,7 +2302,7 @@ def check_drop(drop_val, axis_number, aliases, expected): expected = Panel({"One": df}) check_drop('Two', 0, ['items'], expected) - pytest.raises(ValueError, panel.drop, 'Three') + pytest.raises(KeyError, panel.drop, 'Three') # errors = 'ignore' dropped = panel.drop('Three', errors='ignore')
- [x] closes #19186 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19230
2018-01-13T19:58:42Z
2018-01-18T00:53:08Z
2018-01-18T00:53:07Z
2018-01-18T16:59:24Z
DOC: Exposed arguments in plot.kde
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 66e88e181ac0f..5feebe3ba2a22 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -536,7 +536,7 @@ Plotting - :func: `DataFrame.plot` now raises a ``ValueError`` when the ``x`` or ``y`` argument is improperly formed (:issue:`18671`) - Bug in formatting tick labels with ``datetime.time()`` and fractional seconds (:issue:`18478`). -- +- :meth:`Series.plot.kde` has exposed the args ``ind`` and ``bw_method`` in the docstring (:issue:`18461`). The argument ``ind`` may now also be an integer (number of sample points). - Groupby/Resample/Rolling diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py index 88b899ad60313..b15c5271ae321 100644 --- a/pandas/plotting/_core.py +++ b/pandas/plotting/_core.py @@ -1398,6 +1398,10 @@ def _get_ind(self, y): sample_range = np.nanmax(y) - np.nanmin(y) ind = np.linspace(np.nanmin(y) - 0.5 * sample_range, np.nanmax(y) + 0.5 * sample_range, 1000) + elif is_integer(self.ind): + sample_range = np.nanmax(y) - np.nanmin(y) + ind = np.linspace(np.nanmin(y) - 0.5 * sample_range, + np.nanmax(y) + 0.5 * sample_range, self.ind) else: ind = self.ind return ind @@ -2598,12 +2602,22 @@ def hist(self, bins=10, **kwds): """ return self(kind='hist', bins=bins, **kwds) - def kde(self, **kwds): + def kde(self, bw_method=None, ind=None, **kwds): """ Kernel Density Estimate plot Parameters ---------- + bw_method: str, scalar or callable, optional + The method used to calculate the estimator bandwidth. This can be + 'scott', 'silverman', a scalar constant or a callable. + If None (default), 'scott' is used. + See :class:`scipy.stats.gaussian_kde` for more information. + ind : NumPy array or integer, optional + Evaluation points. If None (default), 1000 equally spaced points + are used. If `ind` is a NumPy array, the kde is evaluated at the + points passed. If `ind` is an integer, `ind` number of equally + spaced points are used. `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.Series.plot`. @@ -2611,7 +2625,7 @@ def kde(self, **kwds): ------- axes : matplotlib.AxesSubplot or np.array of them """ - return self(kind='kde', **kwds) + return self(kind='kde', bw_method=bw_method, ind=ind, **kwds) density = kde @@ -2766,12 +2780,22 @@ def hist(self, by=None, bins=10, **kwds): """ return self(kind='hist', by=by, bins=bins, **kwds) - def kde(self, **kwds): + def kde(self, bw_method=None, ind=None, **kwds): """ Kernel Density Estimate plot Parameters ---------- + bw_method: str, scalar or callable, optional + The method used to calculate the estimator bandwidth. This can be + 'scott', 'silverman', a scalar constant or a callable. + If None (default), 'scott' is used. + See :class:`scipy.stats.gaussian_kde` for more information. + ind : NumPy array or integer, optional + Evaluation points. If None (default), 1000 equally spaced points + are used. If `ind` is a NumPy array, the kde is evaluated at the + points passed. If `ind` is an integer, `ind` number of equally + spaced points are used. `**kwds` : optional Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`. @@ -2779,7 +2803,7 @@ def kde(self, **kwds): ------- axes : matplotlib.AxesSubplot or np.array of them """ - return self(kind='kde', **kwds) + return self(kind='kde', bw_method=bw_method, ind=ind, **kwds) density = kde diff --git a/pandas/tests/plotting/test_series.py b/pandas/tests/plotting/test_series.py index 2458fc0dc992c..278be433183fa 100644 --- a/pandas/tests/plotting/test_series.py +++ b/pandas/tests/plotting/test_series.py @@ -621,14 +621,16 @@ def test_kde_kwargs(self): if not self.mpl_ge_1_5_0: pytest.skip("mpl is not supported") - from numpy import linspace - _check_plot_works(self.ts.plot.kde, bw_method=.5, - ind=linspace(-100, 100, 20)) + sample_points = np.linspace(-100, 100, 20) + _check_plot_works(self.ts.plot.kde, bw_method='scott', ind=20) + _check_plot_works(self.ts.plot.kde, bw_method=None, ind=20) + _check_plot_works(self.ts.plot.kde, bw_method=None, ind=np.int(20)) + _check_plot_works(self.ts.plot.kde, bw_method=.5, ind=sample_points) _check_plot_works(self.ts.plot.density, bw_method=.5, - ind=linspace(-100, 100, 20)) + ind=sample_points) _, ax = self.plt.subplots() - ax = self.ts.plot.kde(logy=True, bw_method=.5, - ind=linspace(-100, 100, 20), ax=ax) + ax = self.ts.plot.kde(logy=True, bw_method=.5, ind=sample_points, + ax=ax) self._check_ax_scales(ax, yaxis='log') self._check_text_labels(ax.yaxis.get_label(), 'Density')
The documentation for `plot.kde` did not show the `bw_method` and `ind` arguments, which are specific to `plot.kde` (and `plot.density`, which refers to the same method). There is also a change in the actual code, and a corresponding test added. I added an option for `ind` to be an integer number of sample points, instead of necessarily a `np.array`. I image that a user typically just wants a number of equidistant sample points, and does not want to construct an array. - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry Comments very welcome.
https://api.github.com/repos/pandas-dev/pandas/pulls/19229
2018-01-13T18:47:14Z
2018-02-02T12:50:47Z
2018-02-02T12:50:47Z
2018-02-02T12:50:54Z
BUG/TST: assure conversions of datetimelikes for object, numeric dtypes
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 3a3c2bf0c5ae4..a0205a8d64cb7 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -385,6 +385,11 @@ Conversion - Bug in localization of a naive, datetime string in a ``Series`` constructor with a ``datetime64[ns, tz]`` dtype (:issue:`174151`) - :func:`Timestamp.replace` will now handle Daylight Savings transitions gracefully (:issue:`18319`) + + +- Bug in ``.astype()`` to non-ns timedelta units would hold the incorrect dtype (:issue:`19176`, :issue:`19223`, :issue:`12425`) + + Indexing ^^^^^^^^ diff --git a/pandas/_libs/tslibs/conversion.pyx b/pandas/_libs/tslibs/conversion.pyx index 11e1787cd77da..53abdd013ec37 100644 --- a/pandas/_libs/tslibs/conversion.pyx +++ b/pandas/_libs/tslibs/conversion.pyx @@ -29,7 +29,7 @@ from np_datetime cimport (check_dts_bounds, from util cimport (is_string_object, is_datetime64_object, - is_integer_object, is_float_object) + is_integer_object, is_float_object, is_array) from timedeltas cimport cast_from_unit from timezones cimport (is_utc, is_tzlocal, is_fixed_offset, @@ -45,6 +45,8 @@ from nattype cimport NPY_NAT, checknull_with_nat # Constants cdef int64_t DAY_NS = 86400000000000LL +NS_DTYPE = np.dtype('M8[ns]') +TD_DTYPE = np.dtype('m8[ns]') UTC = pytz.UTC @@ -73,13 +75,14 @@ cdef inline int64_t get_datetime64_nanos(object val) except? -1: return ival -def ensure_datetime64ns(ndarray arr): +def ensure_datetime64ns(ndarray arr, copy=True): """ Ensure a np.datetime64 array has dtype specifically 'datetime64[ns]' Parameters ---------- arr : ndarray + copy : boolean, default True Returns ------- @@ -104,6 +107,8 @@ def ensure_datetime64ns(ndarray arr): unit = get_datetime64_unit(arr.flat[0]) if unit == PANDAS_FR_ns: + if copy: + arr = arr.copy() result = arr else: for i in range(n): @@ -117,6 +122,23 @@ def ensure_datetime64ns(ndarray arr): return result +def ensure_timedelta64ns(ndarray arr, copy=True): + """ + Ensure a np.timedelta64 array has dtype specifically 'timedelta64[ns]' + + Parameters + ---------- + arr : ndarray + copy : boolean, default True + + Returns + ------- + result : ndarray with dtype timedelta64[ns] + + """ + return arr.astype(TD_DTYPE, copy=copy) + + def datetime_to_datetime64(ndarray[object] values): """ Convert ndarray of datetime-like objects to int64 array representing diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index b3ae8aae53b35..672e60c9dcde5 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -656,11 +656,15 @@ def astype_nansafe(arr, dtype, copy=True): return tslib.ints_to_pydatetime(arr.view(np.int64)) elif dtype == np.int64: return arr.view(dtype) - elif dtype != _NS_DTYPE: - raise TypeError("cannot astype a datetimelike from [{from_dtype}] " - "to [{to_dtype}]".format(from_dtype=arr.dtype, - to_dtype=dtype)) - return arr.astype(_NS_DTYPE) + + # allow frequency conversions + if dtype.kind == 'M': + return arr.astype(dtype) + + raise TypeError("cannot astype a datetimelike from [{from_dtype}] " + "to [{to_dtype}]".format(from_dtype=arr.dtype, + to_dtype=dtype)) + elif is_timedelta64_dtype(arr): if dtype == np.int64: return arr.view(dtype) @@ -668,21 +672,23 @@ def astype_nansafe(arr, dtype, copy=True): return tslib.ints_to_pytimedelta(arr.view(np.int64)) # in py3, timedelta64[ns] are int64 - elif ((PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or - (not PY3 and dtype != _TD_DTYPE)): + if ((PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or + (not PY3 and dtype != _TD_DTYPE)): # allow frequency conversions + # we return a float here! if dtype.kind == 'm': mask = isna(arr) result = arr.astype(dtype).astype(np.float64) result[mask] = np.nan return result + elif dtype == _TD_DTYPE: + return arr.astype(_TD_DTYPE, copy=copy) - raise TypeError("cannot astype a timedelta from [{from_dtype}] " - "to [{to_dtype}]".format(from_dtype=arr.dtype, - to_dtype=dtype)) + raise TypeError("cannot astype a timedelta from [{from_dtype}] " + "to [{to_dtype}]".format(from_dtype=arr.dtype, + to_dtype=dtype)) - return arr.astype(_TD_DTYPE) elif (np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer)): @@ -704,19 +710,7 @@ def astype_nansafe(arr, dtype, copy=True): if copy: - if arr.dtype == dtype: - return arr.copy() - - # we handle datetimelikes with pandas machinery - # to be robust to the input type - elif is_datetime64_dtype(dtype): - from pandas import to_datetime - return to_datetime(arr).values - elif is_timedelta64_dtype(dtype): - from pandas import to_timedelta - return to_timedelta(arr).values - - return arr.astype(dtype) + return arr.astype(dtype, copy=True) return arr.view(dtype) diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 5d6fc7487eeb5..dca9a5fde0d74 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -4,6 +4,7 @@ from pandas.compat import (string_types, text_type, binary_type, PY3, PY36) from pandas._libs import algos, lib +from pandas._libs.tslibs import conversion from .dtypes import (CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, DatetimeTZDtypeType, PeriodDtype, PeriodDtypeType, @@ -21,8 +22,8 @@ for t in ['O', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64']]) -_NS_DTYPE = np.dtype('M8[ns]') -_TD_DTYPE = np.dtype('m8[ns]') +_NS_DTYPE = conversion.NS_DTYPE +_TD_DTYPE = conversion.TD_DTYPE _INT64_DTYPE = np.dtype(np.int64) # oh the troubles to reduce import time @@ -31,6 +32,9 @@ _ensure_float64 = algos.ensure_float64 _ensure_float32 = algos.ensure_float32 +_ensure_datetime64ns = conversion.ensure_datetime64ns +_ensure_timedelta64ns = conversion.ensure_timedelta64ns + def _ensure_float(arr): """ diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 5a4778ae4e629..3c923133477df 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -631,7 +631,7 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, values = astype_nansafe(values.ravel(), dtype, copy=True) values = values.reshape(self.shape) - newb = make_block(values, placement=self.mgr_locs, dtype=dtype, + newb = make_block(values, placement=self.mgr_locs, klass=klass) except: if errors == 'raise': @@ -1954,6 +1954,13 @@ class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): _can_hold_na = True is_numeric = False + def __init__(self, values, placement, fastpath=False, **kwargs): + if values.dtype != _TD_DTYPE: + values = conversion.ensure_timedelta64ns(values) + + super(TimeDeltaBlock, self).__init__(values, fastpath=True, + placement=placement, **kwargs) + @property def _box_func(self): return lambda x: tslib.Timedelta(x, unit='ns') diff --git a/pandas/tests/frame/test_dtypes.py b/pandas/tests/frame/test_dtypes.py index 21c028e634bc0..70eee5984b438 100644 --- a/pandas/tests/frame/test_dtypes.py +++ b/pandas/tests/frame/test_dtypes.py @@ -640,6 +640,71 @@ def test_astype_categoricaldtype_class_raises(self, cls): with tm.assert_raises_regex(TypeError, xpr): df['A'].astype(cls) + @pytest.mark.parametrize("arr_dtype", [np.int64, np.float64]) + @pytest.mark.parametrize("dtype", ["M8", "m8"]) + @pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D']) + def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit): + # tests all units from numeric origination + # gh-19223 / gh-12425 + dtype = "{}[{}]".format(dtype, unit) + arr = np.array([[1, 2, 3]], dtype=arr_dtype) + df = DataFrame(arr) + result = df.astype(dtype) + expected = DataFrame(arr.astype(dtype)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D']) + def test_astype_to_datetime_unit(self, unit): + # tests all units from datetime origination + # gh-19223 + dtype = "M8[{}]".format(unit) + arr = np.array([[1, 2, 3]], dtype=dtype) + df = DataFrame(arr) + result = df.astype(dtype) + expected = DataFrame(arr.astype(dtype)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("unit", ['ns']) + def test_astype_to_timedelta_unit_ns(self, unit): + # preserver the timedelta conversion + # gh-19223 + dtype = "m8[{}]".format(unit) + arr = np.array([[1, 2, 3]], dtype=dtype) + df = DataFrame(arr) + result = df.astype(dtype) + expected = DataFrame(arr.astype(dtype)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("unit", ['us', 'ms', 's', 'h', 'm', 'D']) + def test_astype_to_timedelta_unit(self, unit): + # coerce to float + # gh-19223 + dtype = "m8[{}]".format(unit) + arr = np.array([[1, 2, 3]], dtype=dtype) + df = DataFrame(arr) + result = df.astype(dtype) + expected = DataFrame(df.values.astype(dtype).astype(float)) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D']) + def test_astype_to_incorrect_datetimelike(self, unit): + # trying to astype a m to a M, or vice-versa + # gh-19224 + dtype = "M8[{}]".format(unit) + other = "m8[{}]".format(unit) + + df = DataFrame(np.array([[1, 2, 3]], dtype=dtype)) + with pytest.raises(TypeError): + df.astype(other) + + df = DataFrame(np.array([[1, 2, 3]], dtype=other)) + with pytest.raises(TypeError): + df.astype(dtype) + def test_timedeltas(self): df = DataFrame(dict(A=Series(date_range('2012-1-1', periods=3, freq='D')), diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index b9a667499b7a0..a8319339c6435 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -523,25 +523,23 @@ def test_other_datetime_unit(self): columns=['entity_id', 'days']) tm.assert_frame_equal(result, exp) - def test_other_timedelta_unit(self): + @pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns']) + def test_other_timedelta_unit(self, unit): # GH 13389 df1 = pd.DataFrame({'entity_id': [101, 102]}) s = pd.Series([None, None], index=[101, 102], name='days') - for dtype in ['timedelta64[D]', 'timedelta64[h]', 'timedelta64[m]', - 'timedelta64[s]', 'timedelta64[ms]', 'timedelta64[us]', - 'timedelta64[ns]']: + dtype = "m8[{}]".format(unit) + df2 = s.astype(dtype).to_frame('days') + assert df2['days'].dtype == 'm8[ns]' - df2 = s.astype(dtype).to_frame('days') - assert df2['days'].dtype == dtype - - result = df1.merge(df2, left_on='entity_id', right_index=True) + result = df1.merge(df2, left_on='entity_id', right_index=True) - exp = pd.DataFrame({'entity_id': [101, 102], - 'days': np.array(['nat', 'nat'], - dtype=dtype)}, - columns=['entity_id', 'days']) - tm.assert_frame_equal(result, exp) + exp = pd.DataFrame({'entity_id': [101, 102], + 'days': np.array(['nat', 'nat'], + dtype=dtype)}, + columns=['entity_id', 'days']) + tm.assert_frame_equal(result, exp) def test_overlapping_columns_error_message(self): df = DataFrame({'key': [1, 2, 3], diff --git a/pandas/tests/series/test_constructors.py b/pandas/tests/series/test_constructors.py index a3e40f65e922f..33737387edffa 100644 --- a/pandas/tests/series/test_constructors.py +++ b/pandas/tests/series/test_constructors.py @@ -552,10 +552,6 @@ def test_constructor_dtype_datetime64(self): s.iloc[0] = np.nan assert s.dtype == 'M8[ns]' - # invalid astypes - for t in ['s', 'D', 'us', 'ms']: - pytest.raises(TypeError, s.astype, 'M8[%s]' % t) - # GH3414 related pytest.raises(TypeError, lambda x: Series( Series(dates).astype('int') / 1000000, dtype='M8[ms]')) @@ -707,6 +703,20 @@ def test_constructor_with_datetime_tz(self): expected = Series(pd.DatetimeIndex(['NaT', 'NaT'], tz='US/Eastern')) assert_series_equal(s, expected) + @pytest.mark.parametrize("arr_dtype", [np.int64, np.float64]) + @pytest.mark.parametrize("dtype", ["M8", "m8"]) + @pytest.mark.parametrize("unit", ['ns', 'us', 'ms', 's', 'h', 'm', 'D']) + def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit): + # tests all units + # gh-19223 + dtype = "{}[{}]".format(dtype, unit) + arr = np.array([1, 2, 3], dtype=arr_dtype) + s = Series(arr) + result = s.astype(dtype) + expected = Series(arr.astype(dtype)) + + tm.assert_series_equal(result, expected) + @pytest.mark.parametrize('arg', ['2013-01-01 00:00:00', pd.NaT, np.nan, None]) def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg): diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 783fcddac1280..ed9307d50521f 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -1649,32 +1649,26 @@ def test_invalid_ops(self): pytest.raises(Exception, self.objSeries.__sub__, np.array(1, dtype=np.int64)) - def test_timedelta64_conversions(self): + @pytest.mark.parametrize("m", [1, 3, 10]) + @pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns']) + def test_timedelta64_conversions(self, m, unit): + startdate = Series(date_range('2013-01-01', '2013-01-03')) enddate = Series(date_range('2013-03-01', '2013-03-03')) s1 = enddate - startdate s1[2] = np.nan - for m in [1, 3, 10]: - for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']: - - # op - expected = s1.apply(lambda x: x / np.timedelta64(m, unit)) - result = s1 / np.timedelta64(m, unit) - assert_series_equal(result, expected) - - if m == 1 and unit != 'ns': - - # astype - result = s1.astype("timedelta64[{0}]".format(unit)) - assert_series_equal(result, expected) + # op + expected = s1.apply(lambda x: x / np.timedelta64(m, unit)) + result = s1 / np.timedelta64(m, unit) + assert_series_equal(result, expected) - # reverse op - expected = s1.apply( - lambda x: Timedelta(np.timedelta64(m, unit)) / x) - result = np.timedelta64(m, unit) / s1 - assert_series_equal(result, expected) + # reverse op + expected = s1.apply( + lambda x: Timedelta(np.timedelta64(m, unit)) / x) + result = np.timedelta64(m, unit) / s1 + assert_series_equal(result, expected) # astype s = Series(date_range('20130101', periods=3))
closes #19223 closes #12425
https://api.github.com/repos/pandas-dev/pandas/pulls/19224
2018-01-13T15:43:16Z
2018-01-13T18:18:43Z
2018-01-13T18:18:42Z
2018-01-13T19:50:00Z
move DatetimeBlock and DatetimeTZBlock adjacent to DatetimeLikeBlockMixin
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 5a4778ae4e629..af65c1c57e0e2 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -1948,35 +1948,52 @@ def get_values(self, dtype=None): return self.values -class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): +class DatetimeBlock(DatetimeLikeBlockMixin, Block): __slots__ = () - is_timedelta = True + is_datetime = True _can_hold_na = True - is_numeric = False - @property - def _box_func(self): - return lambda x: tslib.Timedelta(x, unit='ns') + def __init__(self, values, placement, fastpath=False, **kwargs): + if values.dtype != _NS_DTYPE: + values = conversion.ensure_datetime64ns(values) + + super(DatetimeBlock, self).__init__(values, fastpath=True, + placement=placement, **kwargs) + + def _astype(self, dtype, mgr=None, **kwargs): + """ + these automatically copy, so copy=True has no effect + raise on an except if raise == True + """ + + # if we are passed a datetime64[ns, tz] + if is_datetime64tz_dtype(dtype): + dtype = DatetimeTZDtype(dtype) + + values = self.values + if getattr(values, 'tz', None) is None: + values = DatetimeIndex(values).tz_localize('UTC') + values = values.tz_convert(dtype.tz) + return self.make_block(values) + + # delegate + return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs) def _can_hold_element(self, element): tipo = maybe_infer_dtype_type(element) if tipo is not None: - return issubclass(tipo.type, np.timedelta64) - return is_integer(element) or isinstance( - element, (timedelta, np.timedelta64)) - - def fillna(self, value, **kwargs): - - # allow filling with integers to be - # interpreted as seconds - if is_integer(value) and not isinstance(value, np.timedelta64): - value = Timedelta(value, unit='s') - return super(TimeDeltaBlock, self).fillna(value, **kwargs) + # TODO: this still uses asarray, instead of dtype.type + element = np.array(element) + return element.dtype == _NS_DTYPE or element.dtype == np.int64 + return (is_integer(element) or isinstance(element, datetime) or + isna(element)) def _try_coerce_args(self, values, other): """ - Coerce values and other to int64, with null values converted to - iNaT. values is always ndarray-like, other may not be + Coerce values and other to dtype 'i8'. NaN and NaT convert to + the smallest i8, and will correctly round-trip to NaT if converted + back in _try_coerce_result. values is always ndarray-like, other + may not be Parameters ---------- @@ -1997,15 +2014,14 @@ def _try_coerce_args(self, values, other): elif is_null_datelike_scalar(other): other = tslib.iNaT other_mask = True - elif isinstance(other, Timedelta): - other_mask = isna(other) - other = other.value - elif isinstance(other, timedelta): - other = Timedelta(other).value - elif isinstance(other, np.timedelta64): + elif isinstance(other, (datetime, np.datetime64, date)): + other = self._box_func(other) + if getattr(other, 'tz') is not None: + raise TypeError("cannot coerce a Timestamp with a tz on a " + "naive Block") other_mask = isna(other) - other = Timedelta(other).value - elif hasattr(other, 'dtype') and is_timedelta64_dtype(other): + other = other.asm8.view('i8') + elif hasattr(other, 'dtype') and is_datetime64_dtype(other): other_mask = isna(other) other = other.astype('i8', copy=False).view('i8') else: @@ -2016,723 +2032,706 @@ def _try_coerce_args(self, values, other): return values, values_mask, other, other_mask def _try_coerce_result(self, result): - """ reverse of try_coerce_args / try_operate """ + """ reverse of try_coerce_args """ if isinstance(result, np.ndarray): - mask = isna(result) if result.dtype.kind in ['i', 'f', 'O']: - result = result.astype('m8[ns]') - result[mask] = tslib.iNaT - elif isinstance(result, (np.integer, np.float)): + try: + result = result.astype('M8[ns]') + except ValueError: + pass + elif isinstance(result, (np.integer, np.float, np.datetime64)): result = self._box_func(result) return result - def should_store(self, value): - return issubclass(value.dtype.type, np.timedelta64) + @property + def _box_func(self): + return tslib.Timestamp - def to_native_types(self, slicer=None, na_rep=None, quoting=None, - **kwargs): + def to_native_types(self, slicer=None, na_rep=None, date_format=None, + quoting=None, **kwargs): """ convert to our native types format, slicing if desired """ values = self.values if slicer is not None: - values = values[:, slicer] - mask = isna(values) - - rvalues = np.empty(values.shape, dtype=object) - if na_rep is None: - na_rep = 'NaT' - rvalues[mask] = na_rep - imask = (~mask).ravel() - - # FIXME: - # should use the formats.format.Timedelta64Formatter here - # to figure what format to pass to the Timedelta - # e.g. to not show the decimals say - rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all') - for val in values.ravel()[imask]], - dtype=object) - return rvalues - + values = values[..., slicer] -class BoolBlock(NumericBlock): - __slots__ = () - is_bool = True - _can_hold_na = False + from pandas.io.formats.format import _get_format_datetime64_from_values + format = _get_format_datetime64_from_values(values, date_format) - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - return issubclass(tipo.type, np.bool_) - return isinstance(element, (bool, np.bool_)) + result = tslib.format_array_from_datetime( + values.view('i8').ravel(), tz=getattr(self.values, 'tz', None), + format=format, na_rep=na_rep).reshape(values.shape) + return np.atleast_2d(result) def should_store(self, value): - return issubclass(value.dtype.type, np.bool_) - - def replace(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True, mgr=None): - inplace = validate_bool_kwarg(inplace, 'inplace') - to_replace_values = np.atleast_1d(to_replace) - if not np.can_cast(to_replace_values, bool): - return self - return super(BoolBlock, self).replace(to_replace, value, - inplace=inplace, filter=filter, - regex=regex, convert=convert, - mgr=mgr) + return (issubclass(value.dtype.type, np.datetime64) and + not is_datetimetz(value)) + def set(self, locs, values, check=False): + """ + Modify Block in-place with new item value -class ObjectBlock(Block): - __slots__ = () - is_object = True - _can_hold_na = True + Returns + ------- + None + """ + if values.dtype != _NS_DTYPE: + # Workaround for numpy 1.6 bug + values = conversion.ensure_datetime64ns(values) - def __init__(self, values, ndim=2, fastpath=False, placement=None, - **kwargs): - if issubclass(values.dtype.type, compat.string_types): - values = np.array(values, dtype=object) + self.values[locs] = values - super(ObjectBlock, self).__init__(values, ndim=ndim, fastpath=fastpath, - placement=placement, **kwargs) - @property - def is_bool(self): - """ we can be a bool if we have only bool values but are of type - object - """ - return lib.is_bool_array(self.values.ravel()) +class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock): + """ implement a datetime64 block with a tz attribute """ + __slots__ = () + _holder = DatetimeIndex + _concatenator = staticmethod(_concat._concat_datetime) + is_datetimetz = True - # TODO: Refactor when convert_objects is removed since there will be 1 path - def convert(self, *args, **kwargs): - """ attempt to coerce any object types to better types return a copy of - the block (if copy = True) by definition we ARE an ObjectBlock!!!!! + def __init__(self, values, placement, ndim=2, **kwargs): - can return multiple blocks! - """ + if not isinstance(values, self._holder): + values = self._holder(values) - if args: - raise NotImplementedError - by_item = True if 'by_item' not in kwargs else kwargs['by_item'] + dtype = kwargs.pop('dtype', None) - new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta'] - new_style = False - for kw in new_inputs: - new_style |= kw in kwargs + if dtype is not None: + if isinstance(dtype, compat.string_types): + dtype = DatetimeTZDtype.construct_from_string(dtype) + values = values._shallow_copy(tz=dtype.tz) - if new_style: - fn = soft_convert_objects - fn_inputs = new_inputs - else: - fn = maybe_convert_objects - fn_inputs = ['convert_dates', 'convert_numeric', - 'convert_timedeltas'] - fn_inputs += ['copy'] + if values.tz is None: + raise ValueError("cannot create a DatetimeTZBlock without a tz") - fn_kwargs = {} - for key in fn_inputs: - if key in kwargs: - fn_kwargs[key] = kwargs[key] + super(DatetimeTZBlock, self).__init__(values, placement=placement, + ndim=ndim, **kwargs) - # operate column-by-column - def f(m, v, i): - shape = v.shape - values = fn(v.ravel(), **fn_kwargs) - try: - values = values.reshape(shape) - values = _block_shape(values, ndim=self.ndim) - except (AttributeError, NotImplementedError): - pass + def copy(self, deep=True, mgr=None): + """ copy constructor """ + values = self.values + if deep: + values = values.copy(deep=True) + return self.make_block_same_class(values) - return values + def external_values(self): + """ we internally represent the data as a DatetimeIndex, but for + external compat with ndarray, export as a ndarray of Timestamps + """ + return self.values.astype('datetime64[ns]').values - if by_item and not self._is_single_block: - blocks = self.split_and_operate(None, f, False) - else: - values = f(None, self.values.ravel(), None) - blocks = [make_block(values, ndim=self.ndim, - placement=self.mgr_locs)] + def get_values(self, dtype=None): + # return object dtype as Timestamps with the zones + if is_object_dtype(dtype): + f = lambda x: lib.Timestamp(x, tz=self.values.tz) + return lib.map_infer( + self.values.ravel(), f).reshape(self.values.shape) + return self.values - return blocks + def _slice(self, slicer): + """ return a slice of my values """ + if isinstance(slicer, tuple): + col, loc = slicer + if not is_null_slice(col) and col != 0: + raise IndexError("{0} only contains one item".format(self)) + return self.values[loc] + return self.values[slicer] - def set(self, locs, values, check=False): + def _try_coerce_args(self, values, other): """ - Modify Block in-place with new item value + localize and return i8 for the values - Returns + Parameters + ---------- + values : ndarray-like + other : ndarray-like or scalar + + Returns ------- - None + base-type values, values mask, base-type other, other mask """ + values_mask = _block_shape(isna(values), ndim=self.ndim) + # asi8 is a view, needs copy + values = _block_shape(values.asi8, ndim=self.ndim) + other_mask = False - # GH6026 - if check: - try: - if (self.values[locs] == values).all(): - return - except: - pass - try: - self.values[locs] = values - except (ValueError): + if isinstance(other, ABCSeries): + other = self._holder(other) + other_mask = isna(other) - # broadcasting error - # see GH6171 - new_shape = list(values.shape) - new_shape[0] = len(self.items) - self.values = np.empty(tuple(new_shape), dtype=self.dtype) - self.values.fill(np.nan) - self.values[locs] = values + if isinstance(other, bool): + raise TypeError + elif (is_null_datelike_scalar(other) or + (is_scalar(other) and isna(other))): + other = tslib.iNaT + other_mask = True + elif isinstance(other, self._holder): + if other.tz != self.values.tz: + raise ValueError("incompatible or non tz-aware value") + other = other.asi8 + other_mask = isna(other) + elif isinstance(other, (np.datetime64, datetime, date)): + other = lib.Timestamp(other) + tz = getattr(other, 'tz', None) - def _maybe_downcast(self, blocks, downcast=None): + # test we can have an equal time zone + if tz is None or str(tz) != str(self.values.tz): + raise ValueError("incompatible or non tz-aware value") + other_mask = isna(other) + other = other.value + else: + raise TypeError - if downcast is not None: - return blocks + return values, values_mask, other, other_mask - # split and convert the blocks - return _extend_blocks([b.convert(datetime=True, numeric=False) - for b in blocks]) + def _try_coerce_result(self, result): + """ reverse of try_coerce_args """ + if isinstance(result, np.ndarray): + if result.dtype.kind in ['i', 'f', 'O']: + result = result.astype('M8[ns]') + elif isinstance(result, (np.integer, np.float, np.datetime64)): + result = lib.Timestamp(result, tz=self.values.tz) + if isinstance(result, np.ndarray): + # allow passing of > 1dim if its trivial + if result.ndim > 1: + result = result.reshape(np.prod(result.shape)) + result = self.values._shallow_copy(result) - def _can_hold_element(self, element): - return True + return result - def _try_coerce_args(self, values, other): - """ provide coercion to our input arguments """ + @property + def _box_func(self): + return lambda x: tslib.Timestamp(x, tz=self.dtype.tz) - if isinstance(other, ABCDatetimeIndex): - # to store DatetimeTZBlock as object - other = other.astype(object).values + def shift(self, periods, axis=0, mgr=None): + """ shift the block by periods """ - return values, False, other, False + # think about moving this to the DatetimeIndex. This is a non-freq + # (number of periods) shift ### - def should_store(self, value): - return not (issubclass(value.dtype.type, - (np.integer, np.floating, np.complexfloating, - np.datetime64, np.bool_)) or - is_extension_type(value)) + N = len(self) + indexer = np.zeros(N, dtype=int) + if periods > 0: + indexer[periods:] = np.arange(N - periods) + else: + indexer[:periods] = np.arange(-periods, N) - def replace(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True, mgr=None): - to_rep_is_list = is_list_like(to_replace) - value_is_list = is_list_like(value) - both_lists = to_rep_is_list and value_is_list - either_list = to_rep_is_list or value_is_list + new_values = self.values.asi8.take(indexer) - result_blocks = [] - blocks = [self] + if periods > 0: + new_values[:periods] = tslib.iNaT + else: + new_values[periods:] = tslib.iNaT - if not either_list and is_re(to_replace): - return self._replace_single(to_replace, value, inplace=inplace, - filter=filter, regex=True, - convert=convert, mgr=mgr) - elif not (either_list or regex): - return super(ObjectBlock, self).replace(to_replace, value, - inplace=inplace, - filter=filter, regex=regex, - convert=convert, mgr=mgr) - elif both_lists: - for to_rep, v in zip(to_replace, value): - result_blocks = [] - for b in blocks: - result = b._replace_single(to_rep, v, inplace=inplace, - filter=filter, regex=regex, - convert=convert, mgr=mgr) - result_blocks = _extend_blocks(result, result_blocks) - blocks = result_blocks - return result_blocks + new_values = self.values._shallow_copy(new_values) + return [self.make_block_same_class(new_values, + placement=self.mgr_locs)] - elif to_rep_is_list and regex: - for to_rep in to_replace: - result_blocks = [] - for b in blocks: - result = b._replace_single(to_rep, value, inplace=inplace, - filter=filter, regex=regex, - convert=convert, mgr=mgr) - result_blocks = _extend_blocks(result, result_blocks) - blocks = result_blocks - return result_blocks + def concat_same_type(self, to_concat, placement=None): + """ + Concatenate list of single blocks of the same type. + """ + values = self._concatenator([blk.values for blk in to_concat], + axis=self.ndim - 1) + # not using self.make_block_same_class as values can be non-tz dtype + return make_block( + values, placement=placement or slice(0, len(values), 1)) - return self._replace_single(to_replace, value, inplace=inplace, - filter=filter, convert=convert, - regex=regex, mgr=mgr) - def _replace_single(self, to_replace, value, inplace=False, filter=None, - regex=False, convert=True, mgr=None): +class TimeDeltaBlock(DatetimeLikeBlockMixin, IntBlock): + __slots__ = () + is_timedelta = True + _can_hold_na = True + is_numeric = False - inplace = validate_bool_kwarg(inplace, 'inplace') + @property + def _box_func(self): + return lambda x: tslib.Timedelta(x, unit='ns') - # to_replace is regex compilable - to_rep_re = regex and is_re_compilable(to_replace) + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, np.timedelta64) + return is_integer(element) or isinstance( + element, (timedelta, np.timedelta64)) - # regex is regex compilable - regex_re = is_re_compilable(regex) + def fillna(self, value, **kwargs): - # only one will survive - if to_rep_re and regex_re: - raise AssertionError('only one of to_replace and regex can be ' - 'regex compilable') + # allow filling with integers to be + # interpreted as seconds + if is_integer(value) and not isinstance(value, np.timedelta64): + value = Timedelta(value, unit='s') + return super(TimeDeltaBlock, self).fillna(value, **kwargs) - # if regex was passed as something that can be a regex (rather than a - # boolean) - if regex_re: - to_replace = regex + def _try_coerce_args(self, values, other): + """ + Coerce values and other to int64, with null values converted to + iNaT. values is always ndarray-like, other may not be - regex = regex_re or to_rep_re + Parameters + ---------- + values : ndarray-like + other : ndarray-like or scalar - # try to get the pattern attribute (compiled re) or it's a string - try: - pattern = to_replace.pattern - except AttributeError: - pattern = to_replace + Returns + ------- + base-type values, values mask, base-type other, other mask + """ - # if the pattern is not empty and to_replace is either a string or a - # regex - if regex and pattern: - rx = re.compile(to_replace) - else: - # if the thing to replace is not a string or compiled regex call - # the superclass method -> to_replace is some kind of object - return super(ObjectBlock, self).replace(to_replace, value, - inplace=inplace, - filter=filter, regex=regex, - mgr=mgr) + values_mask = isna(values) + values = values.view('i8') + other_mask = False - new_values = self.values if inplace else self.values.copy() + if isinstance(other, bool): + raise TypeError + elif is_null_datelike_scalar(other): + other = tslib.iNaT + other_mask = True + elif isinstance(other, Timedelta): + other_mask = isna(other) + other = other.value + elif isinstance(other, timedelta): + other = Timedelta(other).value + elif isinstance(other, np.timedelta64): + other_mask = isna(other) + other = Timedelta(other).value + elif hasattr(other, 'dtype') and is_timedelta64_dtype(other): + other_mask = isna(other) + other = other.astype('i8', copy=False).view('i8') + else: + # coercion issues + # let higher levels handle + raise TypeError - # deal with replacing values with objects (strings) that match but - # whose replacement is not a string (numeric, nan, object) - if isna(value) or not isinstance(value, compat.string_types): + return values, values_mask, other, other_mask - def re_replacer(s): - try: - return value if rx.search(s) is not None else s - except TypeError: - return s - else: - # value is guaranteed to be a string here, s can be either a string - # or null if it's null it gets returned - def re_replacer(s): - try: - return rx.sub(value, s) - except TypeError: - return s + def _try_coerce_result(self, result): + """ reverse of try_coerce_args / try_operate """ + if isinstance(result, np.ndarray): + mask = isna(result) + if result.dtype.kind in ['i', 'f', 'O']: + result = result.astype('m8[ns]') + result[mask] = tslib.iNaT + elif isinstance(result, (np.integer, np.float)): + result = self._box_func(result) + return result - f = np.vectorize(re_replacer, otypes=[self.dtype]) + def should_store(self, value): + return issubclass(value.dtype.type, np.timedelta64) - if filter is None: - filt = slice(None) - else: - filt = self.mgr_locs.isin(filter).nonzero()[0] + def to_native_types(self, slicer=None, na_rep=None, quoting=None, + **kwargs): + """ convert to our native types format, slicing if desired """ - new_values[filt] = f(new_values[filt]) + values = self.values + if slicer is not None: + values = values[:, slicer] + mask = isna(values) - # convert - block = self.make_block(new_values) - if convert: - block = block.convert(by_item=True, numeric=False) + rvalues = np.empty(values.shape, dtype=object) + if na_rep is None: + na_rep = 'NaT' + rvalues[mask] = na_rep + imask = (~mask).ravel() - return block + # FIXME: + # should use the formats.format.Timedelta64Formatter here + # to figure what format to pass to the Timedelta + # e.g. to not show the decimals say + rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all') + for val in values.ravel()[imask]], + dtype=object) + return rvalues -class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock): +class BoolBlock(NumericBlock): __slots__ = () - is_categorical = True - _verify_integrity = True - _can_hold_na = True - _holder = Categorical - _concatenator = staticmethod(_concat._concat_categorical) + is_bool = True + _can_hold_na = False - def __init__(self, values, placement, fastpath=False, **kwargs): + def _can_hold_element(self, element): + tipo = maybe_infer_dtype_type(element) + if tipo is not None: + return issubclass(tipo.type, np.bool_) + return isinstance(element, (bool, np.bool_)) - # coerce to categorical if we can - super(CategoricalBlock, self).__init__(_maybe_to_categorical(values), - fastpath=True, - placement=placement, **kwargs) + def should_store(self, value): + return issubclass(value.dtype.type, np.bool_) - @property - def is_view(self): - """ I am never a view """ - return False + def replace(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True, mgr=None): + inplace = validate_bool_kwarg(inplace, 'inplace') + to_replace_values = np.atleast_1d(to_replace) + if not np.can_cast(to_replace_values, bool): + return self + return super(BoolBlock, self).replace(to_replace, value, + inplace=inplace, filter=filter, + regex=regex, convert=convert, + mgr=mgr) - def to_dense(self): - return self.values.to_dense().view() - def convert(self, copy=True, **kwargs): - return self.copy() if copy else self +class ObjectBlock(Block): + __slots__ = () + is_object = True + _can_hold_na = True - @property - def array_dtype(self): - """ the dtype to return if I want to construct this block as an - array - """ - return np.object_ + def __init__(self, values, ndim=2, fastpath=False, placement=None, + **kwargs): + if issubclass(values.dtype.type, compat.string_types): + values = np.array(values, dtype=object) - def _slice(self, slicer): - """ return a slice of my values """ + super(ObjectBlock, self).__init__(values, ndim=ndim, fastpath=fastpath, + placement=placement, **kwargs) - # slice the category - # return same dims as we currently have - return self.values._slice(slicer) + @property + def is_bool(self): + """ we can be a bool if we have only bool values but are of type + object + """ + return lib.is_bool_array(self.values.ravel()) - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ + # TODO: Refactor when convert_objects is removed since there will be 1 path + def convert(self, *args, **kwargs): + """ attempt to coerce any object types to better types return a copy of + the block (if copy = True) by definition we ARE an ObjectBlock!!!!! - # GH12564: CategoricalBlock is 1-dim only - # while returned results could be any dim - if ((not is_categorical_dtype(result)) and - isinstance(result, np.ndarray)): - result = _block_shape(result, ndim=self.ndim) + can return multiple blocks! + """ - return result + if args: + raise NotImplementedError + by_item = True if 'by_item' not in kwargs else kwargs['by_item'] - def fillna(self, value, limit=None, inplace=False, downcast=None, - mgr=None): - # we may need to upcast our fill to match our dtype - if limit is not None: - raise NotImplementedError("specifying a limit for 'fillna' has " - "not been implemented yet") + new_inputs = ['coerce', 'datetime', 'numeric', 'timedelta'] + new_style = False + for kw in new_inputs: + new_style |= kw in kwargs - values = self.values if inplace else self.values.copy() - values = self._try_coerce_result(values.fillna(value=value, - limit=limit)) - return [self.make_block(values=values)] + if new_style: + fn = soft_convert_objects + fn_inputs = new_inputs + else: + fn = maybe_convert_objects + fn_inputs = ['convert_dates', 'convert_numeric', + 'convert_timedeltas'] + fn_inputs += ['copy'] - def interpolate(self, method='pad', axis=0, inplace=False, limit=None, - fill_value=None, **kwargs): + fn_kwargs = {} + for key in fn_inputs: + if key in kwargs: + fn_kwargs[key] = kwargs[key] - values = self.values if inplace else self.values.copy() - return self.make_block_same_class( - values=values.fillna(fill_value=fill_value, method=method, - limit=limit), - placement=self.mgr_locs) + # operate column-by-column + def f(m, v, i): + shape = v.shape + values = fn(v.ravel(), **fn_kwargs) + try: + values = values.reshape(shape) + values = _block_shape(values, ndim=self.ndim) + except (AttributeError, NotImplementedError): + pass - def shift(self, periods, axis=0, mgr=None): - return self.make_block_same_class(values=self.values.shift(periods), - placement=self.mgr_locs) + return values - def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): - """ - Take values according to indexer and return them as a block.bb - """ - if fill_tuple is None: - fill_value = None + if by_item and not self._is_single_block: + blocks = self.split_and_operate(None, f, False) else: - fill_value = fill_tuple[0] + values = f(None, self.values.ravel(), None) + blocks = [make_block(values, ndim=self.ndim, + placement=self.mgr_locs)] - # axis doesn't matter; we are really a single-dim object - # but are passed the axis depending on the calling routing - # if its REALLY axis 0, then this will be a reindex and not a take - new_values = self.values.take_nd(indexer, fill_value=fill_value) + return blocks - # if we are a 1-dim object, then always place at 0 - if self.ndim == 1: - new_mgr_locs = [0] - else: - if new_mgr_locs is None: - new_mgr_locs = self.mgr_locs + def set(self, locs, values, check=False): + """ + Modify Block in-place with new item value - return self.make_block_same_class(new_values, new_mgr_locs) + Returns + ------- + None + """ - def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): - """ convert to our native types format, slicing if desired """ + # GH6026 + if check: + try: + if (self.values[locs] == values).all(): + return + except: + pass + try: + self.values[locs] = values + except (ValueError): - values = self.values - if slicer is not None: - # Categorical is always one dimension - values = values[slicer] - mask = isna(values) - values = np.array(values, dtype='object') - values[mask] = na_rep + # broadcasting error + # see GH6171 + new_shape = list(values.shape) + new_shape[0] = len(self.items) + self.values = np.empty(tuple(new_shape), dtype=self.dtype) + self.values.fill(np.nan) + self.values[locs] = values - # we are expected to return a 2-d ndarray - return values.reshape(1, len(values)) + def _maybe_downcast(self, blocks, downcast=None): - def concat_same_type(self, to_concat, placement=None): - """ - Concatenate list of single blocks of the same type. - """ - values = self._concatenator([blk.values for blk in to_concat], - axis=self.ndim - 1) - # not using self.make_block_same_class as values can be object dtype - return make_block( - values, placement=placement or slice(0, len(values), 1), - ndim=self.ndim) + if downcast is not None: + return blocks + # split and convert the blocks + return _extend_blocks([b.convert(datetime=True, numeric=False) + for b in blocks]) -class DatetimeBlock(DatetimeLikeBlockMixin, Block): - __slots__ = () - is_datetime = True - _can_hold_na = True + def _can_hold_element(self, element): + return True - def __init__(self, values, placement, fastpath=False, **kwargs): - if values.dtype != _NS_DTYPE: - values = conversion.ensure_datetime64ns(values) + def _try_coerce_args(self, values, other): + """ provide coercion to our input arguments """ - super(DatetimeBlock, self).__init__(values, fastpath=True, - placement=placement, **kwargs) + if isinstance(other, ABCDatetimeIndex): + # to store DatetimeTZBlock as object + other = other.astype(object).values - def _astype(self, dtype, mgr=None, **kwargs): - """ - these automatically copy, so copy=True has no effect - raise on an except if raise == True - """ + return values, False, other, False - # if we are passed a datetime64[ns, tz] - if is_datetime64tz_dtype(dtype): - dtype = DatetimeTZDtype(dtype) + def should_store(self, value): + return not (issubclass(value.dtype.type, + (np.integer, np.floating, np.complexfloating, + np.datetime64, np.bool_)) or + is_extension_type(value)) - values = self.values - if getattr(values, 'tz', None) is None: - values = DatetimeIndex(values).tz_localize('UTC') - values = values.tz_convert(dtype.tz) - return self.make_block(values) + def replace(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True, mgr=None): + to_rep_is_list = is_list_like(to_replace) + value_is_list = is_list_like(value) + both_lists = to_rep_is_list and value_is_list + either_list = to_rep_is_list or value_is_list - # delegate - return super(DatetimeBlock, self)._astype(dtype=dtype, **kwargs) + result_blocks = [] + blocks = [self] - def _can_hold_element(self, element): - tipo = maybe_infer_dtype_type(element) - if tipo is not None: - # TODO: this still uses asarray, instead of dtype.type - element = np.array(element) - return element.dtype == _NS_DTYPE or element.dtype == np.int64 - return (is_integer(element) or isinstance(element, datetime) or - isna(element)) + if not either_list and is_re(to_replace): + return self._replace_single(to_replace, value, inplace=inplace, + filter=filter, regex=True, + convert=convert, mgr=mgr) + elif not (either_list or regex): + return super(ObjectBlock, self).replace(to_replace, value, + inplace=inplace, + filter=filter, regex=regex, + convert=convert, mgr=mgr) + elif both_lists: + for to_rep, v in zip(to_replace, value): + result_blocks = [] + for b in blocks: + result = b._replace_single(to_rep, v, inplace=inplace, + filter=filter, regex=regex, + convert=convert, mgr=mgr) + result_blocks = _extend_blocks(result, result_blocks) + blocks = result_blocks + return result_blocks - def _try_coerce_args(self, values, other): - """ - Coerce values and other to dtype 'i8'. NaN and NaT convert to - the smallest i8, and will correctly round-trip to NaT if converted - back in _try_coerce_result. values is always ndarray-like, other - may not be + elif to_rep_is_list and regex: + for to_rep in to_replace: + result_blocks = [] + for b in blocks: + result = b._replace_single(to_rep, value, inplace=inplace, + filter=filter, regex=regex, + convert=convert, mgr=mgr) + result_blocks = _extend_blocks(result, result_blocks) + blocks = result_blocks + return result_blocks - Parameters - ---------- - values : ndarray-like - other : ndarray-like or scalar + return self._replace_single(to_replace, value, inplace=inplace, + filter=filter, convert=convert, + regex=regex, mgr=mgr) - Returns - ------- - base-type values, values mask, base-type other, other mask - """ + def _replace_single(self, to_replace, value, inplace=False, filter=None, + regex=False, convert=True, mgr=None): - values_mask = isna(values) - values = values.view('i8') - other_mask = False + inplace = validate_bool_kwarg(inplace, 'inplace') - if isinstance(other, bool): - raise TypeError - elif is_null_datelike_scalar(other): - other = tslib.iNaT - other_mask = True - elif isinstance(other, (datetime, np.datetime64, date)): - other = self._box_func(other) - if getattr(other, 'tz') is not None: - raise TypeError("cannot coerce a Timestamp with a tz on a " - "naive Block") - other_mask = isna(other) - other = other.asm8.view('i8') - elif hasattr(other, 'dtype') and is_datetime64_dtype(other): - other_mask = isna(other) - other = other.astype('i8', copy=False).view('i8') - else: - # coercion issues - # let higher levels handle - raise TypeError + # to_replace is regex compilable + to_rep_re = regex and is_re_compilable(to_replace) - return values, values_mask, other, other_mask + # regex is regex compilable + regex_re = is_re_compilable(regex) - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - if isinstance(result, np.ndarray): - if result.dtype.kind in ['i', 'f', 'O']: - try: - result = result.astype('M8[ns]') - except ValueError: - pass - elif isinstance(result, (np.integer, np.float, np.datetime64)): - result = self._box_func(result) - return result + # only one will survive + if to_rep_re and regex_re: + raise AssertionError('only one of to_replace and regex can be ' + 'regex compilable') - @property - def _box_func(self): - return tslib.Timestamp + # if regex was passed as something that can be a regex (rather than a + # boolean) + if regex_re: + to_replace = regex - def to_native_types(self, slicer=None, na_rep=None, date_format=None, - quoting=None, **kwargs): - """ convert to our native types format, slicing if desired """ + regex = regex_re or to_rep_re - values = self.values - if slicer is not None: - values = values[..., slicer] + # try to get the pattern attribute (compiled re) or it's a string + try: + pattern = to_replace.pattern + except AttributeError: + pattern = to_replace + + # if the pattern is not empty and to_replace is either a string or a + # regex + if regex and pattern: + rx = re.compile(to_replace) + else: + # if the thing to replace is not a string or compiled regex call + # the superclass method -> to_replace is some kind of object + return super(ObjectBlock, self).replace(to_replace, value, + inplace=inplace, + filter=filter, regex=regex, + mgr=mgr) - from pandas.io.formats.format import _get_format_datetime64_from_values - format = _get_format_datetime64_from_values(values, date_format) + new_values = self.values if inplace else self.values.copy() - result = tslib.format_array_from_datetime( - values.view('i8').ravel(), tz=getattr(self.values, 'tz', None), - format=format, na_rep=na_rep).reshape(values.shape) - return np.atleast_2d(result) + # deal with replacing values with objects (strings) that match but + # whose replacement is not a string (numeric, nan, object) + if isna(value) or not isinstance(value, compat.string_types): - def should_store(self, value): - return (issubclass(value.dtype.type, np.datetime64) and - not is_datetimetz(value)) + def re_replacer(s): + try: + return value if rx.search(s) is not None else s + except TypeError: + return s + else: + # value is guaranteed to be a string here, s can be either a string + # or null if it's null it gets returned + def re_replacer(s): + try: + return rx.sub(value, s) + except TypeError: + return s - def set(self, locs, values, check=False): - """ - Modify Block in-place with new item value + f = np.vectorize(re_replacer, otypes=[self.dtype]) - Returns - ------- - None - """ - if values.dtype != _NS_DTYPE: - # Workaround for numpy 1.6 bug - values = conversion.ensure_datetime64ns(values) + if filter is None: + filt = slice(None) + else: + filt = self.mgr_locs.isin(filter).nonzero()[0] - self.values[locs] = values + new_values[filt] = f(new_values[filt]) + # convert + block = self.make_block(new_values) + if convert: + block = block.convert(by_item=True, numeric=False) -class DatetimeTZBlock(NonConsolidatableMixIn, DatetimeBlock): - """ implement a datetime64 block with a tz attribute """ - __slots__ = () - _holder = DatetimeIndex - _concatenator = staticmethod(_concat._concat_datetime) - is_datetimetz = True + return block - def __init__(self, values, placement, ndim=2, **kwargs): - if not isinstance(values, self._holder): - values = self._holder(values) +class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock): + __slots__ = () + is_categorical = True + _verify_integrity = True + _can_hold_na = True + _holder = Categorical + _concatenator = staticmethod(_concat._concat_categorical) - dtype = kwargs.pop('dtype', None) + def __init__(self, values, placement, fastpath=False, **kwargs): - if dtype is not None: - if isinstance(dtype, compat.string_types): - dtype = DatetimeTZDtype.construct_from_string(dtype) - values = values._shallow_copy(tz=dtype.tz) + # coerce to categorical if we can + super(CategoricalBlock, self).__init__(_maybe_to_categorical(values), + fastpath=True, + placement=placement, **kwargs) - if values.tz is None: - raise ValueError("cannot create a DatetimeTZBlock without a tz") + @property + def is_view(self): + """ I am never a view """ + return False - super(DatetimeTZBlock, self).__init__(values, placement=placement, - ndim=ndim, **kwargs) + def to_dense(self): + return self.values.to_dense().view() - def copy(self, deep=True, mgr=None): - """ copy constructor """ - values = self.values - if deep: - values = values.copy(deep=True) - return self.make_block_same_class(values) + def convert(self, copy=True, **kwargs): + return self.copy() if copy else self - def external_values(self): - """ we internally represent the data as a DatetimeIndex, but for - external compat with ndarray, export as a ndarray of Timestamps + @property + def array_dtype(self): + """ the dtype to return if I want to construct this block as an + array """ - return self.values.astype('datetime64[ns]').values - - def get_values(self, dtype=None): - # return object dtype as Timestamps with the zones - if is_object_dtype(dtype): - f = lambda x: lib.Timestamp(x, tz=self.values.tz) - return lib.map_infer( - self.values.ravel(), f).reshape(self.values.shape) - return self.values + return np.object_ def _slice(self, slicer): """ return a slice of my values """ - if isinstance(slicer, tuple): - col, loc = slicer - if not is_null_slice(col) and col != 0: - raise IndexError("{0} only contains one item".format(self)) - return self.values[loc] - return self.values[slicer] - - def _try_coerce_args(self, values, other): - """ - localize and return i8 for the values - Parameters - ---------- - values : ndarray-like - other : ndarray-like or scalar - - Returns - ------- - base-type values, values mask, base-type other, other mask - """ - values_mask = _block_shape(isna(values), ndim=self.ndim) - # asi8 is a view, needs copy - values = _block_shape(values.asi8, ndim=self.ndim) - other_mask = False + # slice the category + # return same dims as we currently have + return self.values._slice(slicer) - if isinstance(other, ABCSeries): - other = self._holder(other) - other_mask = isna(other) + def _try_coerce_result(self, result): + """ reverse of try_coerce_args """ - if isinstance(other, bool): - raise TypeError - elif (is_null_datelike_scalar(other) or - (is_scalar(other) and isna(other))): - other = tslib.iNaT - other_mask = True - elif isinstance(other, self._holder): - if other.tz != self.values.tz: - raise ValueError("incompatible or non tz-aware value") - other = other.asi8 - other_mask = isna(other) - elif isinstance(other, (np.datetime64, datetime, date)): - other = lib.Timestamp(other) - tz = getattr(other, 'tz', None) + # GH12564: CategoricalBlock is 1-dim only + # while returned results could be any dim + if ((not is_categorical_dtype(result)) and + isinstance(result, np.ndarray)): + result = _block_shape(result, ndim=self.ndim) - # test we can have an equal time zone - if tz is None or str(tz) != str(self.values.tz): - raise ValueError("incompatible or non tz-aware value") - other_mask = isna(other) - other = other.value - else: - raise TypeError + return result - return values, values_mask, other, other_mask + def fillna(self, value, limit=None, inplace=False, downcast=None, + mgr=None): + # we may need to upcast our fill to match our dtype + if limit is not None: + raise NotImplementedError("specifying a limit for 'fillna' has " + "not been implemented yet") - def _try_coerce_result(self, result): - """ reverse of try_coerce_args """ - if isinstance(result, np.ndarray): - if result.dtype.kind in ['i', 'f', 'O']: - result = result.astype('M8[ns]') - elif isinstance(result, (np.integer, np.float, np.datetime64)): - result = lib.Timestamp(result, tz=self.values.tz) - if isinstance(result, np.ndarray): - # allow passing of > 1dim if its trivial - if result.ndim > 1: - result = result.reshape(np.prod(result.shape)) - result = self.values._shallow_copy(result) + values = self.values if inplace else self.values.copy() + values = self._try_coerce_result(values.fillna(value=value, + limit=limit)) + return [self.make_block(values=values)] - return result + def interpolate(self, method='pad', axis=0, inplace=False, limit=None, + fill_value=None, **kwargs): - @property - def _box_func(self): - return lambda x: tslib.Timestamp(x, tz=self.dtype.tz) + values = self.values if inplace else self.values.copy() + return self.make_block_same_class( + values=values.fillna(fill_value=fill_value, method=method, + limit=limit), + placement=self.mgr_locs) def shift(self, periods, axis=0, mgr=None): - """ shift the block by periods """ - - # think about moving this to the DatetimeIndex. This is a non-freq - # (number of periods) shift ### + return self.make_block_same_class(values=self.values.shift(periods), + placement=self.mgr_locs) - N = len(self) - indexer = np.zeros(N, dtype=int) - if periods > 0: - indexer[periods:] = np.arange(N - periods) + def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None): + """ + Take values according to indexer and return them as a block.bb + """ + if fill_tuple is None: + fill_value = None else: - indexer[:periods] = np.arange(-periods, N) + fill_value = fill_tuple[0] - new_values = self.values.asi8.take(indexer) + # axis doesn't matter; we are really a single-dim object + # but are passed the axis depending on the calling routing + # if its REALLY axis 0, then this will be a reindex and not a take + new_values = self.values.take_nd(indexer, fill_value=fill_value) - if periods > 0: - new_values[:periods] = tslib.iNaT + # if we are a 1-dim object, then always place at 0 + if self.ndim == 1: + new_mgr_locs = [0] else: - new_values[periods:] = tslib.iNaT + if new_mgr_locs is None: + new_mgr_locs = self.mgr_locs - new_values = self.values._shallow_copy(new_values) - return [self.make_block_same_class(new_values, - placement=self.mgr_locs)] + return self.make_block_same_class(new_values, new_mgr_locs) + + def to_native_types(self, slicer=None, na_rep='', quoting=None, **kwargs): + """ convert to our native types format, slicing if desired """ + + values = self.values + if slicer is not None: + # Categorical is always one dimension + values = values[slicer] + mask = isna(values) + values = np.array(values, dtype='object') + values[mask] = na_rep + + # we are expected to return a 2-d ndarray + return values.reshape(1, len(values)) def concat_same_type(self, to_concat, placement=None): """ @@ -2740,9 +2739,10 @@ def concat_same_type(self, to_concat, placement=None): """ values = self._concatenator([blk.values for blk in to_concat], axis=self.ndim - 1) - # not using self.make_block_same_class as values can be non-tz dtype + # not using self.make_block_same_class as values can be object dtype return make_block( - values, placement=placement or slice(0, len(values), 1)) + values, placement=placement or slice(0, len(values), 1), + ndim=self.ndim) class SparseBlock(NonConsolidatableMixIn, Block):
Pure cut/paste.
https://api.github.com/repos/pandas-dev/pandas/pulls/19221
2018-01-13T07:26:55Z
2018-01-13T16:11:01Z
null
2018-02-11T21:59:17Z
Adding (DEPRECATED) prefix to deprecated objects summary in the documentation
diff --git a/doc/source/conf.py b/doc/source/conf.py index 43c7c23c5e20d..8509460f8ca4e 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -16,6 +16,7 @@ import re import inspect import importlib +from sphinx.ext.autosummary import _import_by_name import warnings @@ -47,6 +48,10 @@ ]) +# numpydoc is available in the sphinxext directory, and can't be imported +# until sphinxext is available in the Python path +from numpydoc.docscrape import NumpyDocString + # -- General configuration ----------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be @@ -505,9 +510,27 @@ def _replace_pandas_items(self, display_name, sig, summary, real_name): summary = 'Series plotting accessor and method' return (display_name, sig, summary, real_name) + @staticmethod + def _is_deprecated(real_name): + try: + obj, parent, modname = _import_by_name(real_name) + except ImportError: + return False + doc = NumpyDocString(obj.__doc__ or '') + summary = ''.join(doc['Summary'] + doc['Extended Summary']) + return '.. deprecated::' in summary + + def _add_deprecation_prefixes(self, items): + for item in items: + display_name, sig, summary, real_name = item + if self._is_deprecated(real_name): + summary = '(DEPRECATED) %s' % summary + yield display_name, sig, summary, real_name + def get_items(self, names): items = Autosummary.get_items(self, names) items = [self._replace_pandas_items(*item) for item in items] + items = list(self._add_deprecation_prefixes(items)) return items
- [ ] closes #xxxx - [ ] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry In #18934, @jorisvandenbossche pointed out that replacing a "manual" DEPRECATED comment at the beginning of the docstrings by the `.. deprecated::` sphinx directive had a drawback: in the summary pages of the documentation, it doesn't show that the method is deprecated anymore. This PR shows a way (there are surely others) of using the sphinx directive to add the DEPRECATED prefix automatically. So, for example, in a page like https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.html, in the list of methods, if `abs` was deprecated, instead of the current text (in the right column): > Return an object with absolute value taken–only applicable to objects that are all numeric. if would show: > (DEPRECATED) Return an object with absolute value taken–only applicable to objects that are all numeric. Personally I think this way is cleaner than adding the DEPRECATED text to each docstring.
https://api.github.com/repos/pandas-dev/pandas/pulls/19220
2018-01-13T02:30:19Z
2018-04-14T13:35:25Z
2018-04-14T13:35:25Z
2018-04-15T12:10:57Z
API: Add compression argument to Series.to_csv
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a0205a8d64cb7..61d79c943c0e4 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -271,6 +271,7 @@ Other API Changes - :class:`IntervalIndex` and ``IntervalDtype`` no longer support categorical, object, and string subtypes (:issue:`19016`) - The default ``Timedelta`` constructor now accepts an ``ISO 8601 Duration`` string as an argument (:issue:`19040`) - ``IntervalDtype`` now returns ``True`` when compared against ``'interval'`` regardless of subtype, and ``IntervalDtype.name`` now returns ``'interval'`` regardless of subtype (:issue:`18980`) +- :func:`Series.to_csv` now accepts a ``compression`` argument that works in the same way as the ``compression`` argument in :func:`DataFrame.to_csv` (:issue:`18958`) .. _whatsnew_0230.deprecations: diff --git a/pandas/core/series.py b/pandas/core/series.py index 71cded4f9c888..4b6e6690eac0a 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -2881,7 +2881,8 @@ def from_csv(cls, path, sep=',', parse_dates=True, header=None, def to_csv(self, path=None, index=True, sep=",", na_rep='', float_format=None, header=False, index_label=None, - mode='w', encoding=None, date_format=None, decimal='.'): + mode='w', encoding=None, compression=None, date_format=None, + decimal='.'): """ Write Series to a comma-separated values (csv) file @@ -2908,6 +2909,10 @@ def to_csv(self, path=None, index=True, sep=",", na_rep='', encoding : string, optional a string representing the encoding to use if the contents are non-ascii, for python versions prior to 3 + compression : string, optional + a string representing the compression to use in the output file, + allowed values are 'gzip', 'bz2', 'xz', only used when the first + argument is a filename date_format: string, default None Format string for datetime objects. decimal: string, default '.' @@ -2920,8 +2925,8 @@ def to_csv(self, path=None, index=True, sep=",", na_rep='', result = df.to_csv(path, index=index, sep=sep, na_rep=na_rep, float_format=float_format, header=header, index_label=index_label, mode=mode, - encoding=encoding, date_format=date_format, - decimal=decimal) + encoding=encoding, compression=compression, + date_format=date_format, decimal=decimal) if path is None: return result diff --git a/pandas/tests/series/test_io.py b/pandas/tests/series/test_io.py index ad51261a47c5c..99dcc9272bf11 100644 --- a/pandas/tests/series/test_io.py +++ b/pandas/tests/series/test_io.py @@ -14,6 +14,7 @@ from pandas.util.testing import (assert_series_equal, assert_almost_equal, assert_frame_equal, ensure_clean) import pandas.util.testing as tm +import pandas.util._test_decorators as td from .common import TestData @@ -138,6 +139,36 @@ def test_to_csv_path_is_none(self): csv_str = s.to_csv(path=None) assert isinstance(csv_str, str) + @pytest.mark.parametrize('compression', [ + None, + 'gzip', + 'bz2', + pytest.param('xz', marks=td.skip_if_no_lzma), + ]) + def test_to_csv_compression(self, compression): + + s = Series([0.123456, 0.234567, 0.567567], index=['A', 'B', 'C'], + name='X') + + with ensure_clean() as filename: + + s.to_csv(filename, compression=compression, header=True) + + # test the round trip - to_csv -> read_csv + rs = pd.read_csv(filename, compression=compression, index_col=0, + squeeze=True) + assert_series_equal(s, rs) + + # explicitly ensure file was compressed + f = tm.decompress_file(filename, compression=compression) + text = f.read().decode('utf8') + assert s.name in text + f.close() + + f = tm.decompress_file(filename, compression=compression) + assert_series_equal(s, pd.read_csv(f, index_col=0, squeeze=True)) + f.close() + class TestSeriesIO(TestData): diff --git a/pandas/util/testing.py b/pandas/util/testing.py index cd9ebd3017256..1bea25a16ca1e 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -162,6 +162,41 @@ def round_trip_localpath(writer, reader, path=None): return obj +def decompress_file(path, compression): + """ + Open a compressed file and return a file object + + Parameters + ---------- + path : str + The path where the file is read from + + compression : {'gzip', 'bz2', 'xz', None} + Name of the decompression to use + + Returns + ------- + f : file object + """ + + if compression is None: + f = open(path, 'rb') + elif compression == 'gzip': + import gzip + f = gzip.open(path, 'rb') + elif compression == 'bz2': + import bz2 + f = bz2.BZ2File(path, 'rb') + elif compression == 'xz': + lzma = compat.import_lzma() + f = lzma.LZMAFile(path, 'rb') + else: + msg = 'Unrecognized compression type: {}'.format(compression) + raise ValueError(msg) + + return f + + def assert_almost_equal(left, right, check_exact=False, check_dtype='equiv', check_less_precise=False, **kwargs):
- [ ] closes #18958 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19216
2018-01-13T00:05:22Z
2018-01-15T14:11:25Z
2018-01-15T14:11:24Z
2018-01-16T19:10:19Z
Doc: Adds example of exploding lists into columns instead of storing in dataframe cells
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst index bc490877e190d..b2854670739f4 100644 --- a/doc/source/gotchas.rst +++ b/doc/source/gotchas.rst @@ -332,3 +332,91 @@ using something similar to the following: See `the NumPy documentation on byte order <https://docs.scipy.org/doc/numpy/user/basics.byteswapping.html>`__ for more details. + + +Alternative to storing lists in DataFrame Cells +------------------------------------------------------ +Storing nested lists/arrays inside a pandas object should be avoided for performance and memory use reasons. Instead they should be "exploded" into a flat ``DataFrame`` structure. + +Example of exploding nested lists into a DataFrame: + +.. ipython:: python + + df = pd.DataFrame({'name': ['A.J. Price'] * 3, + 'opponent': ['76ers', 'blazers', 'bobcats']}, + columns=['name','opponent']) + df + + nearest_neighbors = [['Zach LaVine', 'Jeremy Lin', 'Nate Robinson', 'Isaia']]*3 + nearest_neighbors + + #. Create an index with the "parent" columns to be included in the final Dataframe + df2 = pd.concat([df[['name','opponent']], pd.DataFrame(nearest_neighbors)], axis=1) + df2 + + #. Transform the column with lists into series, which become columns in a new Dataframe. + # Note that only the index from the original df is retained - + # any other columns in the original df are not part of the new df + df3 = df2.set_index(['name', 'opponent']) + df3 + + #. Stack the new columns as rows; this creates a new index level we'll want to drop in the next step. + # Note that at this point we have a Series, not a Dataframe + ser = df3.stack() + ser + + #. Drop the extraneous index level created by the stack + ser.reset_index(level=2, drop=True, inplace=True) + ser + + #. Create a Dataframe from the Series + df4 = ser.to_frame('nearest_neighbors') + df4 + + # All steps in one stack + df4 = (df2.set_index(['name', 'opponent']) + .stack() + .reset_index(level=2, drop=True) + .to_frame('nearest_neighbors')) + df4 + +Example of exploding a list embedded in a dataframe: + +.. ipython:: python + + df = pd.DataFrame({'name': ['A.J. Price'] * 3, + 'opponent': ['76ers', 'blazers', 'bobcats'], + 'nearest_neighbors': [['Zach LaVine', 'Jeremy Lin', 'Nate Robinson', 'Isaia']] * 3}, + columns=['name','opponent','nearest_neighbors']) + df + + #. Create an index with the "parent" columns to be included in the final Dataframe + df2 = df.set_index(['name', 'opponent']) + df2 + + #. Transform the column with lists into series, which become columns in a new Dataframe. + # Note that only the index from the original df is retained - + # any other columns in the original df are not part of the new df + df3 = df2.nearest_neighbors.apply(pd.Series) + df3 + + #. Stack the new columns as rows; this creates a new index level we'll want to drop in the next step. + # Note that at this point we have a Series, not a Dataframe + ser = df3.stack() + ser + + #. Drop the extraneous index level created by the stack + ser.reset_index(level=2, drop=True, inplace=True) + ser + + #. Create a Dataframe from the Series + df4 = ser.to_frame('nearest_neighbors') + df4 + + # All steps in one stack + df4 = (df.set_index(['name', 'opponent']) + .nearest_neighbors.apply(pd.Series) + .stack() + .reset_index(level=2, drop=True) + .to_frame('nearest_neighbors')) + df4
- [X] closes #17027
https://api.github.com/repos/pandas-dev/pandas/pulls/19215
2018-01-12T23:56:49Z
2018-10-09T04:47:45Z
null
2018-10-09T04:47:46Z
Clarify equals method docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index cef1e551f948e..6d1cbdc3785cd 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1047,8 +1047,9 @@ def __invert__(self): def equals(self, other): """ - Determines if two NDFrame objects contain the same elements. NaNs in - the same location are considered equal. + Determines if two NDFrame objects have equal columns labels, equal + elements, and the same dtypes at corresponding locations. NaNs in the + same location are considered equal. """ if not isinstance(other, self._constructor): return False
Make explicit that equals method requires that columns have the same dtypes but not that indices have the same types (e.g. `pd.DataFrame({1:[0], 0:[1]}).equals(pd.DataFrame({1.0:[0], 0.0:[1]}))` returns `True` while `pd.DataFrame({1:[0], 0:[1]}, dtype='float32').equals(pd.DataFrame({1:[0], 0:[1]}, dtype='float64'))` returns `False`) - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19213
2018-01-12T19:36:22Z
2018-08-22T13:46:52Z
null
2018-08-22T13:46:52Z
DOC: list default compression for to_parquet
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a8c4053850548..974d5bc1ff113 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1670,8 +1670,8 @@ def to_parquet(self, fname, engine='auto', compression='snappy', Parquet reader library to use. If 'auto', then the option 'io.parquet.engine' is used. If 'auto', then the first library to be installed is used. - compression : {'snappy', 'gzip', 'brotli', 'None'} - Name of the compression to use. Use ``None`` for no compression + compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' + Name of the compression to use. Use ``None`` for no compression. kwargs Additional keyword arguments passed to the engine """ diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 0c88706a3bec2..f67b03d924d2c 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -218,8 +218,8 @@ def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): Parquet reader library to use. If 'auto', then the option 'io.parquet.engine' is used. If 'auto', then the first library to be installed is used. - compression : {'snappy', 'gzip', 'brotli', 'None'} - Name of the compression to use. Use ``None`` for no compression + compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy' + Name of the compression to use. Use ``None`` for no compression. kwargs Additional keyword arguments passed to the engine """
follow-up on GH19131
https://api.github.com/repos/pandas-dev/pandas/pulls/19212
2018-01-12T19:30:10Z
2018-01-13T14:47:36Z
2018-01-13T14:47:36Z
2018-01-13T14:47:50Z
DOC: Fix documentation for read_csv's mangle_dupe_cols (GH19203)
diff --git a/doc/source/io.rst b/doc/source/io.rst index 3e1619d6e1578..b15d3918eb569 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -149,7 +149,7 @@ squeeze : boolean, default ``False`` prefix : str, default ``None`` Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ... mangle_dupe_cols : boolean, default ``True`` - Duplicate columns will be specified as 'X.0'...'X.N', rather than 'X'...'X'. + Duplicate columns will be specified as 'X', 'X.1'...'X.N', rather than 'X'...'X'. Passing in False will cause data to be overwritten if there are duplicate names in the columns. @@ -548,7 +548,7 @@ these names so as to prevent data overwrite: pd.read_csv(StringIO(data)) There is no more duplicate data because ``mangle_dupe_cols=True`` by default, which modifies -a series of duplicate columns 'X'...'X' to become 'X.0'...'X.N'. If ``mangle_dupe_cols +a series of duplicate columns 'X'...'X' to become 'X', 'X.1',...'X.N'. If ``mangle_dupe_cols =False``, duplicate data can arise: .. code-block :: python diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py index acb7d00284693..150fccde81a60 100755 --- a/pandas/io/parsers.py +++ b/pandas/io/parsers.py @@ -114,7 +114,7 @@ prefix : str, default None Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ... mangle_dupe_cols : boolean, default True - Duplicate columns will be specified as 'X.0'...'X.N', rather than + Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than 'X'...'X'. Passing in False will cause data to be overwritten if there are duplicate names in the columns. dtype : Type name or dict of column -> type, default None
- [ x] closes #19203
https://api.github.com/repos/pandas-dev/pandas/pulls/19208
2018-01-12T14:32:43Z
2018-01-15T14:17:42Z
2018-01-15T14:17:42Z
2018-01-15T14:17:50Z
CLN: flake8 cleanup of core.internals
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 5a4778ae4e629..0eda1cf0c5f75 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -191,6 +191,13 @@ def fill_value(self): def mgr_locs(self): return self._mgr_locs + @mgr_locs.setter + def mgr_locs(self, new_mgr_locs): + if not isinstance(new_mgr_locs, BlockPlacement): + new_mgr_locs = BlockPlacement(new_mgr_locs) + + self._mgr_locs = new_mgr_locs + @property def array_dtype(self): """ the dtype to return if I want to construct this block as an @@ -224,13 +231,6 @@ def make_block_same_class(self, values, placement=None, fastpath=True, return make_block(values, placement=placement, klass=self.__class__, fastpath=fastpath, **kwargs) - @mgr_locs.setter - def mgr_locs(self, new_mgr_locs): - if not isinstance(new_mgr_locs, BlockPlacement): - new_mgr_locs = BlockPlacement(new_mgr_locs) - - self._mgr_locs = new_mgr_locs - def __unicode__(self): # don't want to print out all of the items here @@ -633,7 +633,7 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, newb = make_block(values, placement=self.mgr_locs, dtype=dtype, klass=klass) - except: + except Exception: if errors == 'raise': raise newb = self.copy() if copy else self @@ -840,7 +840,6 @@ def setitem(self, indexer, value, mgr=None): transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x) values = transf(values) - l = len(values) # length checking # boolean with truth values == len of the value is ok too @@ -855,7 +854,7 @@ def setitem(self, indexer, value, mgr=None): # slice elif isinstance(indexer, slice): - if is_list_like(value) and l: + if is_list_like(value) and len(values): if len(value) != length_of_indexer(indexer, values): raise ValueError("cannot set using a slice indexer with a " "different length than the value") @@ -1108,7 +1107,7 @@ def check_int_bool(self, inplace): # a fill na type method try: m = missing.clean_fill_method(method) - except: + except ValueError: m = None if m is not None: @@ -1123,7 +1122,7 @@ def check_int_bool(self, inplace): # try an interp method try: m = missing.clean_interp_method(method, **kwargs) - except: + except ValueError: m = None if m is not None: @@ -2166,7 +2165,7 @@ def set(self, locs, values, check=False): try: if (self.values[locs] == values).all(): return - except: + except Exception: pass try: self.values[locs] = values @@ -2807,7 +2806,8 @@ def _astype(self, dtype, copy=False, errors='raise', values=None, def __len__(self): try: return self.sp_index.length - except: + except Exception: + # TODO: Catch something more specific? return 0 def copy(self, deep=True, mgr=None):
- [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19202
2018-01-12T06:29:15Z
2018-01-13T18:56:00Z
null
2018-02-11T21:59:16Z
DOC: Update ASV benchmark example in contributing.rst
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst index dc07104f64c65..cdbbad6eb75d6 100644 --- a/doc/source/contributing.rst +++ b/doc/source/contributing.rst @@ -858,9 +858,9 @@ takes a regular expression. For example, this will only run tests from a If you want to only run a specific group of tests from a file, you can do it using ``.`` as a separator. For example:: - asv continuous -f 1.1 upstream/master HEAD -b groupby.groupby_agg_builtins + asv continuous -f 1.1 upstream/master HEAD -b groupby.GroupByMethods -will only run the ``groupby_agg_builtins`` benchmark defined in ``groupby.py``. +will only run the ``GroupByMethods`` benchmark defined in ``groupby.py``. You can also run the benchmark suite using the version of ``pandas`` already installed in your current Python environment. This can be
Updating the ASV example based on the cleaning that was done in `asv_bench/benchmarks/groupby.py`
https://api.github.com/repos/pandas-dev/pandas/pulls/19201
2018-01-12T06:02:19Z
2018-01-12T08:40:48Z
2018-01-12T08:40:48Z
2018-01-12T18:00:32Z
CI: move conda build tests back to allowed failures
diff --git a/.travis.yml b/.travis.yml index ca1dfcae43e76..bd5cac8955c8d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -49,6 +49,7 @@ matrix: apt: packages: - python-gtk2 + # In allow_failures - dist: trusty env: - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true @@ -76,6 +77,9 @@ matrix: env: - JOB="3.6_DOC" DOC=true allow_failures: + - dist: trusty + env: + - JOB="3.5_CONDA_BUILD_TEST" TEST_ARGS="--skip-slow --skip-network" CONDA_BUILD_TEST=true - dist: trusty env: - JOB="2.7_SLOW" SLOW=true
https://api.github.com/repos/pandas-dev/pandas/pulls/19196
2018-01-12T00:18:47Z
2018-01-12T00:30:19Z
2018-01-12T00:30:19Z
2018-01-13T19:49:31Z
Hand written ISO parser for Timedelta construction
diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index 8dba8c15f0b81..b2c9c464c7cbf 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # cython: profile=False import collections -import re import sys cdef bint PY3 = (sys.version_info[0] >= 3) @@ -236,6 +235,14 @@ cpdef inline int64_t cast_from_unit(object ts, object unit) except? -1: return <int64_t> (base *m) + <int64_t> (frac *m) +cdef inline _decode_if_necessary(object ts): + # decode ts if necessary + if not PyUnicode_Check(ts) and not PY3: + ts = str(ts).decode('utf-8') + + return ts + + cdef inline parse_timedelta_string(object ts): """ Parse a regular format timedelta string. Return an int64_t (in ns) @@ -258,9 +265,7 @@ cdef inline parse_timedelta_string(object ts): if len(ts) == 0 or ts in nat_strings: return NPY_NAT - # decode ts if necessary - if not PyUnicode_Check(ts) and not PY3: - ts = str(ts).decode('utf-8') + ts = _decode_if_necessary(ts) for c in ts: @@ -507,26 +512,14 @@ def _binary_op_method_timedeltalike(op, name): # ---------------------------------------------------------------------- # Timedelta Construction -iso_pater = re.compile(r"""P - (?P<days>-?[0-9]*)DT - (?P<hours>[0-9]{1,2})H - (?P<minutes>[0-9]{1,2})M - (?P<seconds>[0-9]{0,2}) - (\. - (?P<milliseconds>[0-9]{1,3}) - (?P<microseconds>[0-9]{0,3}) - (?P<nanoseconds>[0-9]{0,3}) - )?S""", re.VERBOSE) - - -cdef int64_t parse_iso_format_string(object iso_fmt) except? -1: +cdef inline int64_t parse_iso_format_string(object ts) except? -1: """ Extracts and cleanses the appropriate values from a match object with groups for each component of an ISO 8601 duration Parameters ---------- - iso_fmt: + ts: ISO 8601 Duration formatted string Returns @@ -537,25 +530,93 @@ cdef int64_t parse_iso_format_string(object iso_fmt) except? -1: Raises ------ ValueError - If ``iso_fmt`` cannot be parsed + If ``ts`` cannot be parsed """ - cdef int64_t ns = 0 + cdef: + unicode c + int64_t result = 0, r + int p=0 + object dec_unit = 'ms', err_msg + bint have_dot=0, have_value=0, neg=0 + list number=[], unit=[] - match = re.match(iso_pater, iso_fmt) - if match: - match_dict = match.groupdict(default='0') - for comp in ['milliseconds', 'microseconds', 'nanoseconds']: - match_dict[comp] = '{:0<3}'.format(match_dict[comp]) + ts = _decode_if_necessary(ts) - for k, v in match_dict.items(): - ns += timedelta_from_spec(v, '0', k) + err_msg = "Invalid ISO 8601 Duration format - {}".format(ts) - else: - raise ValueError("Invalid ISO 8601 Duration format - " - "{}".format(iso_fmt)) + for c in ts: + # number (ascii codes) + if ord(c) >= 48 and ord(c) <= 57: + + have_value = 1 + if have_dot: + if p == 3 and dec_unit != 'ns': + unit.append(dec_unit) + if dec_unit == 'ms': + dec_unit = 'us' + elif dec_unit == 'us': + dec_unit = 'ns' + p = 0 + p += 1 + + if not len(unit): + number.append(c) + else: + # if in days, pop trailing T + if unit[-1] == 'T': + unit.pop() + elif 'H' in unit or 'M' in unit: + if len(number) > 2: + raise ValueError(err_msg) + r = timedelta_from_spec(number, '0', unit) + result += timedelta_as_neg(r, neg) - return ns + neg = 0 + unit, number = [], [c] + else: + if c == 'P': + pass # ignore leading character + elif c == '-': + if neg or have_value: + raise ValueError(err_msg) + else: + neg = 1 + elif c in ['D', 'T', 'H', 'M']: + unit.append(c) + elif c == '.': + # append any seconds + if len(number): + r = timedelta_from_spec(number, '0', 'S') + result += timedelta_as_neg(r, neg) + unit, number = [], [] + have_dot = 1 + elif c == 'S': + if have_dot: # ms, us, or ns + if not len(number) or p > 3: + raise ValueError(err_msg) + # pad to 3 digits as required + pad = 3 - p + while pad > 0: + number.append('0') + pad -= 1 + + r = timedelta_from_spec(number, '0', dec_unit) + result += timedelta_as_neg(r, neg) + else: # seconds + if len(number) <= 2: + r = timedelta_from_spec(number, '0', 'S') + result += timedelta_as_neg(r, neg) + else: + raise ValueError(err_msg) + else: + raise ValueError(err_msg) + + if not have_value: + # Received string only - never parsed any values + raise ValueError(err_msg) + + return result cdef _to_py_int_float(v):
- [X] closes #19103 - [X] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry ``` Running 16 total benchmarks (2 commits * 1 environments * 8 benchmarks) [ 0.00%] · For pandas commit hash 76475dd8: [ 0.00%] ·· Building for conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt............................................................... [ 0.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt [ 6.25%] ··· Running timedelta.TimedeltaConstructor.time_from_components 15.4±0.4μs [ 12.50%] ··· Running timedelta.TimedeltaConstructor.time_from_datetime_timedelta 8.06±0.3μs [ 18.75%] ··· Running timedelta.TimedeltaConstructor.time_from_int 6.24±0.2μs [ 25.00%] ··· Running timedelta.TimedeltaConstructor.time_from_iso_format 12.1±0.4μs [ 31.25%] ··· Running timedelta.TimedeltaConstructor.time_from_missing 2.07±0.03μs [ 37.50%] ··· Running timedelta.TimedeltaConstructor.time_from_np_timedelta 5.06±0.09μs [ 43.75%] ··· Running timedelta.TimedeltaConstructor.time_from_string 6.52±0.1μs [ 50.00%] ··· Running timedelta.TimedeltaConstructor.time_from_unit 6.66±0.1μs [ 50.00%] · For pandas commit hash 8acdf801: [ 50.00%] ·· Building for conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt... [ 50.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt [ 56.25%] ··· Running timedelta.TimedeltaConstructor.time_from_components 15.5±0.3μs [ 62.50%] ··· Running timedelta.TimedeltaConstructor.time_from_datetime_timedelta 7.90±0.2μs [ 68.75%] ··· Running timedelta.TimedeltaConstructor.time_from_int 6.11±0.2μs [ 75.00%] ··· Running timedelta.TimedeltaConstructor.time_from_iso_format 25.0±0.7μs [ 81.25%] ··· Running timedelta.TimedeltaConstructor.time_from_missing 2.27±0.06μs [ 87.50%] ··· Running timedelta.TimedeltaConstructor.time_from_np_timedelta 5.04±0.1μs [ 93.75%] ··· Running timedelta.TimedeltaConstructor.time_from_string 7.04±0.2μs [100.00%] ··· Running timedelta.TimedeltaConstructor.time_from_unit 6.51±0.1μs before after ratio [8acdf801] [76475dd8] - 25.0±0.7μs 12.1±0.4μs 0.48 timedelta.TimedeltaConstructor.time_from_iso_format SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19191
2018-01-11T21:06:39Z
2018-01-12T11:36:14Z
2018-01-12T11:36:14Z
2018-02-27T01:32:15Z
CLN: unify logic for form_blocks and make_blocks
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 5a4778ae4e629..079f3113dc2da 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2914,35 +2914,52 @@ def sparse_reindex(self, new_index): placement=self.mgr_locs) +def get_block_type(values, dtype=None): + """ + Find the appropriate Block subclass to use for the given values and dtype. + + Parameters + ---------- + values : ndarray-like + dtype : numpy or pandas dtype + + Returns + ------- + cls : class, subclass of Block + """ + dtype = dtype or values.dtype + vtype = dtype.type + + if is_sparse(values): + cls = SparseBlock + elif issubclass(vtype, np.floating): + cls = FloatBlock + elif issubclass(vtype, np.timedelta64): + assert issubclass(vtype, np.integer) + cls = TimeDeltaBlock + elif issubclass(vtype, np.complexfloating): + cls = ComplexBlock + elif issubclass(vtype, np.datetime64): + assert not is_datetimetz(values) + cls = DatetimeBlock + elif is_datetimetz(values): + cls = DatetimeTZBlock + elif issubclass(vtype, np.integer): + cls = IntBlock + elif dtype == np.bool_: + cls = BoolBlock + elif is_categorical(values): + cls = CategoricalBlock + else: + cls = ObjectBlock + return cls + + def make_block(values, placement, klass=None, ndim=None, dtype=None, fastpath=False): if klass is None: dtype = dtype or values.dtype - vtype = dtype.type - - if isinstance(values, SparseArray): - klass = SparseBlock - elif issubclass(vtype, np.floating): - klass = FloatBlock - elif (issubclass(vtype, np.integer) and - issubclass(vtype, np.timedelta64)): - klass = TimeDeltaBlock - elif (issubclass(vtype, np.integer) and - not issubclass(vtype, np.datetime64)): - klass = IntBlock - elif dtype == np.bool_: - klass = BoolBlock - elif issubclass(vtype, np.datetime64): - assert not hasattr(values, 'tz') - klass = DatetimeBlock - elif is_datetimetz(values): - klass = DatetimeTZBlock - elif issubclass(vtype, np.complexfloating): - klass = ComplexBlock - elif is_categorical(values): - klass = CategoricalBlock - else: - klass = ObjectBlock + klass = get_block_type(values, dtype) elif klass is DatetimeTZBlock and not is_datetimetz(values): return klass(values, ndim=ndim, fastpath=fastpath, @@ -4658,15 +4675,7 @@ def create_block_manager_from_arrays(arrays, names, axes): def form_blocks(arrays, names, axes): # put "leftover" items in float bucket, where else? # generalize? - float_items = [] - complex_items = [] - int_items = [] - bool_items = [] - object_items = [] - sparse_items = [] - datetime_items = [] - datetime_tz_items = [] - cat_items = [] + items_dict = defaultdict(list) extra_locs = [] names_idx = Index(names) @@ -4684,70 +4693,55 @@ def form_blocks(arrays, names, axes): k = names[name_idx] v = arrays[name_idx] - if is_sparse(v): - sparse_items.append((i, k, v)) - elif issubclass(v.dtype.type, np.floating): - float_items.append((i, k, v)) - elif issubclass(v.dtype.type, np.complexfloating): - complex_items.append((i, k, v)) - elif issubclass(v.dtype.type, np.datetime64): - if v.dtype != _NS_DTYPE: - v = conversion.ensure_datetime64ns(v) - - assert not is_datetimetz(v) - datetime_items.append((i, k, v)) - elif is_datetimetz(v): - datetime_tz_items.append((i, k, v)) - elif issubclass(v.dtype.type, np.integer): - int_items.append((i, k, v)) - elif v.dtype == np.bool_: - bool_items.append((i, k, v)) - elif is_categorical(v): - cat_items.append((i, k, v)) - else: - object_items.append((i, k, v)) + block_type = get_block_type(v) + items_dict[block_type.__name__].append((i, k, v)) blocks = [] - if len(float_items): - float_blocks = _multi_blockify(float_items) + if len(items_dict['FloatBlock']): + float_blocks = _multi_blockify(items_dict['FloatBlock']) blocks.extend(float_blocks) - if len(complex_items): - complex_blocks = _multi_blockify(complex_items) + if len(items_dict['ComplexBlock']): + complex_blocks = _multi_blockify(items_dict['ComplexBlock']) blocks.extend(complex_blocks) - if len(int_items): - int_blocks = _multi_blockify(int_items) + if len(items_dict['TimeDeltaBlock']): + timedelta_blocks = _multi_blockify(items_dict['TimeDeltaBlock']) + blocks.extend(timedelta_blocks) + + if len(items_dict['IntBlock']): + int_blocks = _multi_blockify(items_dict['IntBlock']) blocks.extend(int_blocks) - if len(datetime_items): - datetime_blocks = _simple_blockify(datetime_items, _NS_DTYPE) + if len(items_dict['DatetimeBlock']): + datetime_blocks = _simple_blockify(items_dict['DatetimeBlock'], + _NS_DTYPE) blocks.extend(datetime_blocks) - if len(datetime_tz_items): + if len(items_dict['DatetimeTZBlock']): dttz_blocks = [make_block(array, klass=DatetimeTZBlock, fastpath=True, - placement=[i], ) - for i, _, array in datetime_tz_items] + placement=[i]) + for i, _, array in items_dict['DatetimeTZBlock']] blocks.extend(dttz_blocks) - if len(bool_items): - bool_blocks = _simple_blockify(bool_items, np.bool_) + if len(items_dict['BoolBlock']): + bool_blocks = _simple_blockify(items_dict['BoolBlock'], np.bool_) blocks.extend(bool_blocks) - if len(object_items) > 0: - object_blocks = _simple_blockify(object_items, np.object_) + if len(items_dict['ObjectBlock']) > 0: + object_blocks = _simple_blockify(items_dict['ObjectBlock'], np.object_) blocks.extend(object_blocks) - if len(sparse_items) > 0: - sparse_blocks = _sparse_blockify(sparse_items) + if len(items_dict['SparseBlock']) > 0: + sparse_blocks = _sparse_blockify(items_dict['SparseBlock']) blocks.extend(sparse_blocks) - if len(cat_items) > 0: + if len(items_dict['CategoricalBlock']) > 0: cat_blocks = [make_block(array, klass=CategoricalBlock, fastpath=True, placement=[i]) - for i, _, array in cat_items] + for i, _, array in items_dict['CategoricalBlock']] blocks.extend(cat_blocks) if len(extra_locs):
In the background part of the intention here is to make things like #19174 easier. - [x] closes #19179 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19189
2018-01-11T18:57:46Z
2018-01-18T11:09:49Z
2018-01-18T11:09:49Z
2018-02-11T21:59:01Z
DOC: Improve DataFrame.select_dtypes examples
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index a8c4053850548..43df2c48fcf58 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2417,17 +2417,18 @@ def select_dtypes(self, include=None, exclude=None): Notes ----- - * To select all *numeric* types use the numpy dtype ``numpy.number`` + * To select all *numeric* types, use ``np.number`` or ``'number'`` * To select strings you must use the ``object`` dtype, but note that this will return *all* object dtype columns * See the `numpy dtype hierarchy <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__ - * To select datetimes, use np.datetime64, 'datetime' or 'datetime64' - * To select timedeltas, use np.timedelta64, 'timedelta' or - 'timedelta64' - * To select Pandas categorical dtypes, use 'category' - * To select Pandas datetimetz dtypes, use 'datetimetz' (new in 0.20.0), - or a 'datetime64[ns, tz]' string + * To select datetimes, use ``np.datetime64``, ``'datetime'`` or + ``'datetime64'`` + * To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or + ``'timedelta64'`` + * To select Pandas categorical dtypes, use ``'category'`` + * To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in + 0.20.0) or ``'datetime64[ns, tz]'`` Examples -------- @@ -2436,12 +2437,12 @@ def select_dtypes(self, include=None, exclude=None): ... 'c': [1.0, 2.0] * 3}) >>> df a b c - 0 0.3962 True 1 - 1 0.1459 False 2 - 2 0.2623 True 1 - 3 0.0764 False 2 - 4 -0.9703 True 1 - 5 -1.2094 False 2 + 0 0.3962 True 1.0 + 1 0.1459 False 2.0 + 2 0.2623 True 1.0 + 3 0.0764 False 2.0 + 4 -0.9703 True 1.0 + 5 -1.2094 False 2.0 >>> df.select_dtypes(include='bool') c 0 True @@ -2452,12 +2453,12 @@ def select_dtypes(self, include=None, exclude=None): 5 False >>> df.select_dtypes(include=['float64']) c - 0 1 - 1 2 - 2 1 - 3 2 - 4 1 - 5 2 + 0 1.0 + 1 2.0 + 2 1.0 + 3 2.0 + 4 1.0 + 5 2.0 >>> df.select_dtypes(exclude=['floating']) b 0 True
Greetings, This is a minor change in the ``DataFrame.select_dtypes`` doc. - Add formatting (such as in `np.number`), to illustrate better it is a piece of code. - Add decimal place to the third column of the example to proper address it is a float column (maybe in the past float numbers didn't show decimal places all the time, but now they do). Thanks for reviewing this! Let me know if I can help further more.
https://api.github.com/repos/pandas-dev/pandas/pulls/19188
2018-01-11T18:53:16Z
2018-01-12T00:30:38Z
2018-01-12T00:30:38Z
2018-01-12T15:48:19Z
CI: move location of setting non-blocking IO
diff --git a/.travis.yml b/.travis.yml index 5cc6547968b7d..ca1dfcae43e76 100644 --- a/.travis.yml +++ b/.travis.yml @@ -95,6 +95,9 @@ matrix: before_install: - echo "before_install" + # set non-blocking IO on travis + # https://github.com/travis-ci/travis-ci/issues/8920#issuecomment-352661024 + - python -c 'import os,sys,fcntl; flags = fcntl.fcntl(sys.stdout, fcntl.F_GETFL); fcntl.fcntl(sys.stdout, fcntl.F_SETFL, flags&~os.O_NONBLOCK);' - source ci/travis_process_gbq_encryption.sh - export PATH="$HOME/miniconda3/bin:$PATH" - df -h diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 272e7f2e05d14..4ec5b0a9d8820 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -101,9 +101,6 @@ time conda create -n pandas --file=${REQ} || exit 1 source activate pandas -# https://github.com/travis-ci/travis-ci/issues/8920#issuecomment-352661024 -python -c "import fcntl; fcntl.fcntl(1, fcntl.F_SETFL, 0)" - # may have addtl installation instructions for this build echo echo "[build addtl installs]"
https://api.github.com/repos/pandas-dev/pandas/pulls/19184
2018-01-11T11:42:53Z
2018-01-11T13:21:54Z
2018-01-11T13:21:54Z
2018-01-11T13:22:25Z
CLN: Remove pandas.util | tools/hashing.py
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 92eeed89ada2a..dc305f36f32ec 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -317,6 +317,7 @@ Removal of prior version deprecations/changes - The options ``display.line_with`` and ``display.height`` are removed in favor of ``display.width`` and ``display.max_rows`` respectively (:issue:`4391`, :issue:`19107`) - The ``labels`` attribute of the ``Categorical`` class has been removed in favor of :attribute:`Categorical.codes` (:issue:`7768`) - The ``flavor`` parameter have been removed from func:`to_sql` method (:issue:`13611`) +- The modules `pandas.tools.hashing` and `pandas.util.hashing` have been removed (:issue:`16223`) .. _whatsnew_0230.performance: diff --git a/pandas/tests/util/test_hashing.py b/pandas/tests/util/test_hashing.py index 289592939e3da..fe8d75539879e 100644 --- a/pandas/tests/util/test_hashing.py +++ b/pandas/tests/util/test_hashing.py @@ -290,18 +290,3 @@ def test_hash_collisions(self): result = hash_array(np.asarray(L, dtype=object), 'utf8') tm.assert_numpy_array_equal( result, np.concatenate([expected1, expected2], axis=0)) - - -def test_deprecation(): - - with tm.assert_produces_warning(DeprecationWarning, - check_stacklevel=False): - from pandas.tools.hashing import hash_pandas_object - obj = Series(list('abc')) - hash_pandas_object(obj, hash_key='9876543210123456') - - with tm.assert_produces_warning(DeprecationWarning, - check_stacklevel=False): - from pandas.tools.hashing import hash_array - obj = np.array([1, 2, 3]) - hash_array(obj, hash_key='9876543210123456') diff --git a/pandas/tools/hashing.py b/pandas/tools/hashing.py deleted file mode 100644 index ba38710b607af..0000000000000 --- a/pandas/tools/hashing.py +++ /dev/null @@ -1,18 +0,0 @@ -import warnings -import sys - -m = sys.modules['pandas.tools.hashing'] -for t in ['hash_pandas_object', 'hash_array']: - - def outer(t=t): - - def wrapper(*args, **kwargs): - from pandas import util - warnings.warn("pandas.tools.hashing is deprecated and will be " - "removed in a future version, import " - "from pandas.util", - DeprecationWarning, stacklevel=3) - return getattr(util, t)(*args, **kwargs) - return wrapper - - setattr(m, t, outer(t)) diff --git a/pandas/util/hashing.py b/pandas/util/hashing.py deleted file mode 100644 index f97a7ac507407..0000000000000 --- a/pandas/util/hashing.py +++ /dev/null @@ -1,18 +0,0 @@ -import warnings -import sys - -m = sys.modules['pandas.util.hashing'] -for t in ['hash_pandas_object', 'hash_array']: - - def outer(t=t): - - def wrapper(*args, **kwargs): - from pandas import util - warnings.warn("pandas.util.hashing is deprecated and will be " - "removed in a future version, import " - "from pandas.util", - DeprecationWarning, stacklevel=3) - return getattr(util, t)(*args, **kwargs) - return wrapper - - setattr(m, t, outer(t))
Deprecated in v0.20.0 xref #16223
https://api.github.com/repos/pandas-dev/pandas/pulls/19181
2018-01-11T10:11:58Z
2018-01-11T11:32:14Z
2018-01-11T11:32:14Z
2018-01-11T16:43:17Z
remove unreachable states, assert they are unreachable just in case
diff --git a/pandas/core/internals.py b/pandas/core/internals.py index ba90503e3bf40..5a4778ae4e629 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -2933,10 +2933,8 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None, elif dtype == np.bool_: klass = BoolBlock elif issubclass(vtype, np.datetime64): - if hasattr(values, 'tz'): - klass = DatetimeTZBlock - else: - klass = DatetimeBlock + assert not hasattr(values, 'tz') + klass = DatetimeBlock elif is_datetimetz(values): klass = DatetimeTZBlock elif issubclass(vtype, np.complexfloating): @@ -4696,10 +4694,8 @@ def form_blocks(arrays, names, axes): if v.dtype != _NS_DTYPE: v = conversion.ensure_datetime64ns(v) - if is_datetimetz(v): - datetime_tz_items.append((i, k, v)) - else: - datetime_items.append((i, k, v)) + assert not is_datetimetz(v) + datetime_items.append((i, k, v)) elif is_datetimetz(v): datetime_tz_items.append((i, k, v)) elif issubclass(v.dtype.type, np.integer):
No luck so far in tracking down the issue where this was discussed. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19180
2018-01-11T02:59:16Z
2018-01-12T00:33:09Z
2018-01-12T00:33:09Z
2018-02-11T21:59:18Z
BUG: assign doesnt cast SparseDataFrame to DataFrame
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index acab9d0bbebf8..72f63a4da0f4d 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -822,6 +822,7 @@ Sparse - Bug in which creating a ``SparseDataFrame`` from a dense ``Series`` or an unsupported type raised an uncontrolled exception (:issue:`19374`) - Bug in :class:`SparseDataFrame.to_csv` causing exception (:issue:`19384`) - Bug in :class:`SparseSeries.memory_usage` which caused segfault by accessing non sparse elements (:issue:`19368`) +- Bug in constructing a ``SparseArray``: if ``data`` is a scalar and ``index`` is defined it will coerce to ``float64`` regardless of scalar's dtype. (:issue:`19163`) Reshaping ^^^^^^^^^ diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index 65aefd9fb8c0a..3cbae717d0e07 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -26,7 +26,8 @@ is_scalar, is_dtype_equal) from pandas.core.dtypes.cast import ( maybe_convert_platform, maybe_promote, - astype_nansafe, find_common_type) + astype_nansafe, find_common_type, infer_dtype_from_scalar, + construct_1d_arraylike_from_scalar) from pandas.core.dtypes.missing import isna, notna, na_value_for_dtype import pandas._libs.sparse as splib @@ -162,9 +163,9 @@ def __new__(cls, data, sparse_index=None, index=None, kind='integer', data = np.nan if not is_scalar(data): raise Exception("must only pass scalars with an index ") - values = np.empty(len(index), dtype='float64') - values.fill(data) - data = values + dtype = infer_dtype_from_scalar(data)[0] + data = construct_1d_arraylike_from_scalar( + data, len(index), dtype) if isinstance(data, ABCSparseSeries): data = data.values diff --git a/pandas/tests/sparse/frame/test_frame.py b/pandas/tests/sparse/frame/test_frame.py index 29fad3c8eefaf..0e8b2161cafc4 100644 --- a/pandas/tests/sparse/frame/test_frame.py +++ b/pandas/tests/sparse/frame/test_frame.py @@ -1257,3 +1257,14 @@ def test_quantile_multi(self): tm.assert_frame_equal(result, dense_expected) tm.assert_sp_frame_equal(result, sparse_expected) + + def test_assign_with_sparse_frame(self): + # GH 19163 + df = pd.DataFrame({"a": [1, 2, 3]}) + res = df.to_sparse(fill_value=False).assign(newcol=False) + exp = df.assign(newcol=False).to_sparse(fill_value=False) + + tm.assert_sp_frame_equal(res, exp) + + for column in res.columns: + assert type(res[column]) is SparseSeries diff --git a/pandas/tests/sparse/test_array.py b/pandas/tests/sparse/test_array.py index 8de93ff320961..6c0c83cf65ff7 100644 --- a/pandas/tests/sparse/test_array.py +++ b/pandas/tests/sparse/test_array.py @@ -113,6 +113,21 @@ def test_constructor_spindex_dtype(self): assert arr.dtype == np.int64 assert arr.fill_value == 0 + @pytest.mark.parametrize('scalar,dtype', [ + (False, bool), + (0.0, 'float64'), + (1, 'int64'), + ('z', 'object')]) + def test_scalar_with_index_infer_dtype(self, scalar, dtype): + # GH 19163 + arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar) + exp = SparseArray([scalar, scalar, scalar], fill_value=scalar) + + tm.assert_sp_array_equal(arr, exp) + + assert arr.dtype == dtype + assert exp.dtype == dtype + def test_sparseseries_roundtrip(self): # GH 13999 for kind in ['integer', 'block']:
The problem here is that a SparseDataFrame that calls assign should cast to a DataFrame mainly because SparseDataFrames are a special case. - [x] closes #19163 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19178
2018-01-11T02:46:38Z
2018-02-12T12:06:12Z
2018-02-12T12:06:12Z
2018-02-12T12:06:26Z
Fixed issue with read_json and partially missing MI names
diff --git a/doc/source/io.rst b/doc/source/io.rst index 3e1619d6e1578..59870ef9e7b0f 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -2228,9 +2228,10 @@ round-trippable manner. new_df new_df.dtypes -Please note that the string `index` is not supported with the round trip -format, as it is used by default in ``write_json`` to indicate a missing index -name. +Please note that the literal string 'index' as the name of an :class:`Index` +is not round-trippable, nor are any names beginning with 'level_' within a +:class:`MultiIndex`. These are used by default in :func:`DataFrame.to_json` to +indicate missing values and the subsequent read cannot distinguish the intent. .. ipython:: python diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index d1c83ad57f59d..6d35fc5769331 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -341,12 +341,14 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, Notes ----- - Specific to ``orient='table'``, if a ``DataFrame`` with a literal ``Index`` - name of `index` gets written with ``write_json``, the subsequent read - operation will incorrectly set the ``Index`` name to ``None``. This is - because `index` is also used by ``write_json`` to denote a missing - ``Index`` name, and the subsequent ``read_json`` operation cannot - distinguish between the two. + Specific to ``orient='table'``, if a :class:`DataFrame` with a literal + :class:`Index` name of `index` gets written with :func:`to_json`, the + subsequent read operation will incorrectly set the :class:`Index` name to + ``None``. This is because `index` is also used by :func:`DataFrame.to_json` + to denote a missing :class:`Index` name, and the subsequent + :func:`read_json` operation cannot distinguish between the two. The same + limitation is encountered with a :class:`MultiIndex` and any names + beginning with 'level_'. See Also -------- diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py index 8da36b64b0914..89b7a1de8acfc 100644 --- a/pandas/io/json/table_schema.py +++ b/pandas/io/json/table_schema.py @@ -3,6 +3,8 @@ http://specs.frictionlessdata.io/json-table-schema/ """ +import warnings + import pandas._libs.json as json from pandas import DataFrame from pandas.api.types import CategoricalDtype @@ -68,6 +70,12 @@ def as_json_table_type(x): def set_default_names(data): """Sets index names to 'index' for regular, or 'level_x' for Multi""" if _all_not_none(*data.index.names): + nms = data.index.names + if len(nms) == 1 and data.index.name == 'index': + warnings.warn("Index name of 'index' is not round-trippable") + elif len(nms) > 1 and any(x.startswith('level_') for x in nms): + warnings.warn("Index names beginning with 'level_' are not " + "round-trippable") return data data = data.copy() @@ -273,10 +281,13 @@ def parse_table_schema(json, precise_float): Notes ----- - Because ``write_json`` uses the string `index` to denote a name-less - ``Index``, this function sets the name of the returned ``DataFrame`` to - ``None`` when said string is encountered. Therefore, intentional usage - of `index` as the ``Index`` name is not supported. + Because :func:`DataFrame.to_json` uses the string 'index' to denote a + name-less :class:`Index`, this function sets the name of the returned + :class:`DataFrame` to ``None`` when said string is encountered with a + normal :class:`Index`. For a :class:`MultiIndex`, the same limitation + applies to any strings beginning with 'level_'. Therefore, an + :class:`Index` name of 'index' and :class:`MultiIndex` names starting + with 'level_' are not supported. See also -------- @@ -303,10 +314,11 @@ def parse_table_schema(json, precise_float): df = df.astype(dtypes) df = df.set_index(table['schema']['primaryKey']) - if len(df.index.names) == 1 and df.index.name == 'index': - df.index.name = None + if len(df.index.names) == 1: + if df.index.name == 'index': + df.index.name = None else: - if all(x.startswith('level_') for x in df.index.names): - df.index.names = [None] * len(df.index.names) + df.index.names = [None if x.startswith('level_') else x for x in + df.index.names] return df diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index ccccdc9b0863e..49b39c17238ae 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -451,6 +451,20 @@ def test_set_names_unset(self, idx, nm, prop): result = set_default_names(data) assert getattr(result.index, prop) == nm + @pytest.mark.parametrize("idx", [ + pd.Index([], name='index'), + pd.MultiIndex.from_arrays([['foo'], ['bar']], + names=('level_0', 'level_1')), + pd.MultiIndex.from_arrays([['foo'], ['bar']], + names=('foo', 'level_1')) + ]) + def test_warns_non_roundtrippable_names(self, idx): + # GH 19130 + df = pd.DataFrame([[]], index=idx) + df.index.name = 'index' + with tm.assert_produces_warning(): + set_default_names(df) + def test_timestamp_in_columns(self): df = pd.DataFrame([[1, 2]], columns=[pd.Timestamp('2016'), pd.Timedelta(10, unit='s')]) @@ -481,7 +495,8 @@ def test_mi_falsey_name(self): class TestTableOrientReader(object): @pytest.mark.parametrize("index_nm", [ - None, "idx", pytest.param("index", marks=pytest.mark.xfail)]) + None, "idx", pytest.param("index", marks=pytest.mark.xfail), + 'level_0']) @pytest.mark.parametrize("vals", [ {'ints': [1, 2, 3, 4]}, {'objects': ['a', 'b', 'c', 'd']}, @@ -492,7 +507,7 @@ class TestTableOrientReader(object): pytest.param({'floats': [1., 2., 3., 4.]}, marks=pytest.mark.xfail), {'floats': [1.1, 2.2, 3.3, 4.4]}, {'bools': [True, False, False, True]}]) - def test_read_json_table_orient(self, index_nm, vals): + def test_read_json_table_orient(self, index_nm, vals, recwarn): df = DataFrame(vals, index=pd.Index(range(4), name=index_nm)) out = df.to_json(orient="table") result = pd.read_json(out, orient="table") @@ -504,7 +519,7 @@ def test_read_json_table_orient(self, index_nm, vals): {'timedeltas': pd.timedelta_range('1H', periods=4, freq='T')}, {'timezones': pd.date_range('2016-01-01', freq='d', periods=4, tz='US/Central')}]) - def test_read_json_table_orient_raises(self, index_nm, vals): + def test_read_json_table_orient_raises(self, index_nm, vals, recwarn): df = DataFrame(vals, index=pd.Index(range(4), name=index_nm)) out = df.to_json(orient="table") with tm.assert_raises_regex(NotImplementedError, 'can not yet read '): @@ -530,7 +545,9 @@ def test_comprehensive(self): result = pd.read_json(out, orient="table") tm.assert_frame_equal(df, result) - @pytest.mark.parametrize("index_names", [[None, None], ['foo', 'bar']]) + @pytest.mark.parametrize("index_names", [ + [None, None], ['foo', 'bar'], ['foo', None], [None, 'foo'], + ['index', 'foo']]) def test_multiindex(self, index_names): # GH 18912 df = pd.DataFrame(
- [X] closes #19130 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19177
2018-01-11T01:02:49Z
2018-01-16T00:47:24Z
2018-01-16T00:47:23Z
2018-02-27T01:32:16Z
COMPAT: make astype to datetime/timedelta types more robust
diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index 5fcb5f09dfae7..4b1cb9c20e465 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -690,7 +690,7 @@ def astype_nansafe(arr, dtype, copy=True): raise ValueError('Cannot convert non-finite values (NA or inf) to ' 'integer') - elif arr.dtype == np.object_ and np.issubdtype(dtype.type, np.integer): + elif is_object_dtype(arr.dtype) and np.issubdtype(dtype.type, np.integer): # work around NumPy brokenness, #1987 return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape) @@ -703,6 +703,19 @@ def astype_nansafe(arr, dtype, copy=True): dtype = np.dtype(dtype.name + "[ns]") if copy: + + if arr.dtype == dtype: + return arr.copy() + + # we handle datetimelikes with pandas machinery + # to be robust to the input type + elif is_datetime64_dtype(dtype): + from pandas import to_datetime + return to_datetime(arr).values + elif is_timedelta64_dtype(dtype): + from pandas import to_timedelta + return to_timedelta(arr).values + return arr.astype(dtype) return arr.view(dtype) diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index fc3790287d7da..ccccdc9b0863e 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -499,7 +499,7 @@ def test_read_json_table_orient(self, index_nm, vals): tm.assert_frame_equal(df, result) @pytest.mark.parametrize("index_nm", [ - None, "idx", pytest.param("index", marks=pytest.mark.xfail)]) + None, "idx", "index"]) @pytest.mark.parametrize("vals", [ {'timedeltas': pd.timedelta_range('1H', periods=4, freq='T')}, {'timezones': pd.date_range('2016-01-01', freq='d', periods=4,
closes #19116
https://api.github.com/repos/pandas-dev/pandas/pulls/19176
2018-01-11T00:37:39Z
2018-01-11T11:09:13Z
2018-01-11T11:09:13Z
2018-01-11T11:09:34Z
Doc: Add warning to treat group chunks as immutable
diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index fecc336049a40..188ca80fbcd11 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -941,11 +941,15 @@ that is itself a series, and possibly upcast the result to a DataFrame: .. warning:: - In the current implementation apply calls func twice on the + * In the current implementation ``apply`` calls func twice on the first group to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if func has side-effects, as they will take effect twice for the first group. + + * For predictable results and performance, func should **not** mutate + any of the group chunks. Group chunks should be treated as immutable + since changes to a group chunk may produce unexpected results. .. ipython:: python
- [X] closes #14180
https://api.github.com/repos/pandas-dev/pandas/pulls/19175
2018-01-10T21:42:01Z
2018-12-02T00:25:58Z
null
2018-12-02T00:25:58Z
ENH: Extending Pandas with custom types
diff --git a/.gitignore b/.gitignore index 0d4e8c6fb75a6..657234a985111 100644 --- a/.gitignore +++ b/.gitignore @@ -108,3 +108,4 @@ doc/tmp.sv doc/source/styled.xlsx doc/source/templates/ env/ +.mypy_cache diff --git a/doc/source/developer.rst b/doc/source/developer.rst index b8bb2b2fcbe2f..9d6d0b8c2ff5a 100644 --- a/doc/source/developer.rst +++ b/doc/source/developer.rst @@ -140,3 +140,64 @@ As an example of fully-formed metadata: 'metadata': None} ], 'pandas_version': '0.20.0'} + +.. _developer.custom-array-types: + +Custom Array Types +------------------ + +.. versionadded:: 0.23.0 + +.. warning:: + Support for custom array types is experimental. + +Sometimes the NumPy type system isn't rich enough for your needs. Pandas has +made a few extensions internally (e.g. ``Categorical``). While this has worked +well for pandas, not all custom data types belong in pandas itself. + +Pandas defines an interface for custom arrays. Arrays implementing this +interface will be stored correctly in ``Series`` or ``DataFrame``. The ABCs +that must be implemented are + +1. :class:`ExtensionDtype` A class describing your data type itself. This is + similar to a ``numpy.dtype``. +2. :class:`ExtensionArray`: A container for your data. + +Throughout this document, we'll use the example of storing IPv6 addresses. An +IPv6 address is 128 bits, so NumPy doesn't have a native data type for it. We'll +model it as a structured array with two ``uint64`` fields, which together +represent the 128-bit integer that is the IP Address. + +Extension Dtype +''''''''''''''' + +This class should describe your data type. The most important fields are +``name`` and ``base``: + +.. code-block:: python + + class IPv6Type(ExtensionDtype): + name = 'IPv6' + base = np.dtype([('hi', '>u8'), ('lo', '>u8')]) + type = IPTypeType + kind = 'O' + fill_value = np.array([(0, 0)], dtype=base) + +``base`` describe the underlying storage of individual items in your array. +TODO: is this true? Or does ``.base`` refer to the original memory this +is a view on? Different meanings for ``np.dtype.base`` vs. ``np.ndarray.base``? + +In our IPAddress case, we're using a NumPy structured array with two fields. + +Extension Array +''''''''''''''' + +This is the actual array container for your data, similar to a ``Categorical``, +and requires the most work to implement correctly. *pandas makes no assumptions +about how you store the data*. You're free to use NumPy arrays or PyArrow +arrays, or even just Python lists. That said, several of the methods required by +the interface expect NumPy arrays as the return value. + +* ``dtype``: Should be an *instance* of your custom ``ExtensionType`` +* ``formtting_values(self)``: Used for printing Series and DataFrame +* ``concat_same_type(concat)``: Used in :func:`pd.concat` diff --git a/pandas/api/__init__.py b/pandas/api/__init__.py index fcbf42f6dabc4..afff059e7b601 100644 --- a/pandas/api/__init__.py +++ b/pandas/api/__init__.py @@ -1 +1,2 @@ """ public toolkit API """ +from . import types, extensions # noqa diff --git a/pandas/api/extensions.py b/pandas/api/extensions.py new file mode 100644 index 0000000000000..e9a9a2f41b50d --- /dev/null +++ b/pandas/api/extensions.py @@ -0,0 +1,4 @@ +from pandas.core.extensions import ( # noqa + ExtensionArray, + ExtensionDtype, +) diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py index c754c063fce8e..ec993374139a2 100644 --- a/pandas/core/algorithms.py +++ b/pandas/core/algorithms.py @@ -6,6 +6,7 @@ from warnings import warn, catch_warnings import numpy as np +from pandas.core.extensions import ExtensionArray from pandas.core.dtypes.cast import ( maybe_promote, construct_1d_object_array_from_listlike) from pandas.core.dtypes.generic import ( @@ -22,7 +23,7 @@ is_categorical, is_datetimetz, is_datetime64_any_dtype, is_datetime64tz_dtype, is_timedelta64_dtype, is_interval_dtype, - is_scalar, is_list_like, + is_scalar, is_list_like, is_extension_type, _ensure_platform_int, _ensure_object, _ensure_float64, _ensure_uint64, _ensure_int64) @@ -542,9 +543,12 @@ def value_counts(values, sort=True, ascending=False, normalize=False, else: - if is_categorical_dtype(values) or is_sparse(values): - - # handle Categorical and sparse, + if (is_extension_type(values) and not + is_datetime64tz_dtype(values)): + # Need the not is_datetime64tz_dtype since it's actually + # an ndarray. It doesn't have a `.values.value_counts`. + # Perhaps we need a new is_extension_type method that + # distinguishes these... result = Series(values).values.value_counts(dropna=dropna) result.name = name counts = result.values @@ -1323,6 +1327,8 @@ def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan, mask_info=None, return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) elif is_interval_dtype(arr): return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) + elif isinstance(arr, ExtensionArray): + return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) if indexer is None: indexer = np.arange(arr.shape[axis], dtype=np.int64) diff --git a/pandas/core/base.py b/pandas/core/base.py index e90794c6c2e1a..3bcb51de69d32 100644 --- a/pandas/core/base.py +++ b/pandas/core/base.py @@ -880,7 +880,7 @@ def _map_values(self, mapper, na_action=None): if isinstance(mapper, ABCSeries): # Since values were input this means we came from either # a dict or a series and mapper should be an index - if is_extension_type(self.dtype): + if is_extension_type(self): values = self._values else: values = self.values @@ -891,7 +891,8 @@ def _map_values(self, mapper, na_action=None): return new_values # we must convert to python types - if is_extension_type(self.dtype): + # TODO: is map part of the interface? + if is_extension_type(self) and hasattr(self._values, 'map'): values = self._values if na_action is not None: raise NotImplementedError diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 92fcdc0f4625b..6c26d382c0fb5 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -43,6 +43,7 @@ from pandas.io.formats.terminal import get_terminal_size from pandas.util._validators import validate_bool_kwarg from pandas.core.config import get_option +from pandas.core.extensions import ExtensionArray def _cat_compare_op(op): @@ -409,6 +410,11 @@ def dtype(self): """The :class:`~pandas.api.types.CategoricalDtype` for this instance""" return self._dtype + @property + def _block_type(self): + from pandas.core.internals import CategoricalBlock + return CategoricalBlock + @property def _constructor(self): return Categorical @@ -2131,6 +2137,15 @@ def repeat(self, repeats, *args, **kwargs): return self._constructor(values=codes, categories=self.categories, ordered=self.ordered, fastpath=True) + +# TODO: Categorical does not currently implement +# - concat_same_type +# - can_hold_na +# We don't need to implement these, since they're just for +# Block things, and we only use CategoricalBlocks for categoricals. +# We could move that logic from CategoricalBlock to Categorical, +# but holding off for now. +ExtensionArray.register(Categorical) # The Series.cat accessor diff --git a/pandas/core/dtypes/cast.py b/pandas/core/dtypes/cast.py index b3ae8aae53b35..e1bce91fee624 100644 --- a/pandas/core/dtypes/cast.py +++ b/pandas/core/dtypes/cast.py @@ -1089,6 +1089,7 @@ def find_common_type(types): numpy.find_common_type """ + # TODO: Make part of the interface? if len(types) == 0: raise ValueError('no types given') @@ -1100,7 +1101,8 @@ def find_common_type(types): if all(is_dtype_equal(first, t) for t in types[1:]): return first - if any(isinstance(t, ExtensionDtype) for t in types): + # TODO: Period is an ExtensionDtype + if any(isinstance(t, (ExtensionDtype, PeriodDtype)) for t in types): return np.object # take lowest unit diff --git a/pandas/core/dtypes/common.py b/pandas/core/dtypes/common.py index 5d6fc7487eeb5..bd3c2928d4dd0 100644 --- a/pandas/core/dtypes/common.py +++ b/pandas/core/dtypes/common.py @@ -568,7 +568,6 @@ def is_string_dtype(arr_or_dtype): """ # TODO: gh-15585: consider making the checks stricter. - if arr_or_dtype is None: return False try: @@ -1624,11 +1623,13 @@ def is_bool_dtype(arr_or_dtype): def is_extension_type(arr): """ - Check whether an array-like is of a pandas extension class instance. + Check whether an array-like is a pandas extension class instance. Extension classes include categoricals, pandas sparse objects (i.e. classes represented within the pandas library and not ones external - to it like scipy sparse matrices), and datetime-like arrays. + to it like scipy sparse matrices), and datetime-like arrays with + timezones, or any third-party objects satisfying the pandas array + interface. Parameters ---------- @@ -1646,39 +1647,44 @@ def is_extension_type(arr): False >>> is_extension_type(np.array([1, 2, 3])) False - >>> + + Categoricals >>> cat = pd.Categorical([1, 2, 3]) - >>> >>> is_extension_type(cat) True >>> is_extension_type(pd.Series(cat)) - True + + pandas' Sparse arrays >>> is_extension_type(pd.SparseArray([1, 2, 3])) True >>> is_extension_type(pd.SparseSeries([1, 2, 3])) True - >>> >>> from scipy.sparse import bsr_matrix >>> is_extension_type(bsr_matrix([1, 2, 3])) False >>> is_extension_type(pd.DatetimeIndex([1, 2, 3])) False + + pandas' datetime with timezone >>> is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) True - >>> >>> dtype = DatetimeTZDtype("ns", tz="US/Eastern") >>> s = pd.Series([], dtype=dtype) >>> is_extension_type(s) True """ - - if is_categorical(arr): - return True - elif is_sparse(arr): - return True - elif is_datetimetz(arr): - return True - return False + # XXX: we have many places where we call this with a `.dtype`, + # instead of a type. Think about supporting that too... + from pandas.core.extensions import ExtensionArray, ExtensionDtype + return (isinstance(arr, ExtensionArray) or + isinstance(getattr(arr, 'values', None), ExtensionArray) or + # XXX: I don't like this getattr('dtype'), but I think it's + # necessary since DatetimeIndex().values of a datetime w/ tz + # is just a regular numpy array, and not an instance of + # ExtensionArray. I think that's since + # datetime (without tz) is *not* an extension type, but + # datetime[tz] *is* an extension type. + isinstance(getattr(arr, 'dtype', None), ExtensionDtype)) def is_complex_dtype(arr_or_dtype): diff --git a/pandas/core/dtypes/dtypes.py b/pandas/core/dtypes/dtypes.py index 2ec35889d6a7a..ec197a1519b7f 100644 --- a/pandas/core/dtypes/dtypes.py +++ b/pandas/core/dtypes/dtypes.py @@ -4,27 +4,11 @@ import numpy as np from pandas import compat from pandas.core.dtypes.generic import ABCIndexClass, ABCCategoricalIndex +from pandas.core.extensions import ExtensionDtype -class ExtensionDtype(object): - """ - A np.dtype duck-typed class, suitable for holding a custom dtype. - - THIS IS NOT A REAL NUMPY DTYPE - """ - name = None - names = None - type = None - subdtype = None - kind = None - str = None - num = 100 - shape = tuple() - itemsize = 8 - base = None - isbuiltin = 0 - isnative = 0 - _metadata = [] +class PandasExtensionMixin(object): + """Useful stuff that isn't in the interface""" _cache = {} def __unicode__(self): @@ -62,17 +46,6 @@ def __repr__(self): """ return str(self) - def __hash__(self): - raise NotImplementedError("sub-classes should implement an __hash__ " - "method") - - def __eq__(self, other): - raise NotImplementedError("sub-classes should implement an __eq__ " - "method") - - def __ne__(self, other): - return not self.__eq__(other) - def __getstate__(self): # pickle support; we don't want to pickle the cache return {k: getattr(self, k, None) for k in self._metadata} @@ -84,9 +57,6 @@ def reset_cache(cls): @classmethod def is_dtype(cls, dtype): - """ Return a boolean if the passed type is an actual dtype that - we can match (via string or type) - """ if hasattr(dtype, 'dtype'): dtype = dtype.dtype if isinstance(dtype, np.dtype): @@ -97,7 +67,7 @@ def is_dtype(cls, dtype): return True try: return cls.construct_from_string(dtype) is not None - except: + except TypeError: return False @@ -108,7 +78,7 @@ class CategoricalDtypeType(type): pass -class CategoricalDtype(ExtensionDtype): +class CategoricalDtype(PandasExtensionMixin, ExtensionDtype): """ Type for categorical data with the categories and orderedness @@ -387,7 +357,7 @@ class DatetimeTZDtypeType(type): pass -class DatetimeTZDtype(ExtensionDtype): +class DatetimeTZDtype(PandasExtensionMixin, ExtensionDtype): """ A np.dtype duck-typed class, suitable for holding a custom datetime with tz @@ -501,7 +471,7 @@ class PeriodDtypeType(type): pass -class PeriodDtype(ExtensionDtype): +class PeriodDtype(PandasExtensionMixin): __metaclass__ = PeriodDtypeType """ A Period duck-typed class, suitable for holding a period with freq dtype. @@ -516,6 +486,7 @@ class PeriodDtype(ExtensionDtype): _metadata = ['freq'] _match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]") _cache = {} + names = None # TODO inherit and remove def __new__(cls, freq=None): """ @@ -619,7 +590,7 @@ class IntervalDtypeType(type): pass -class IntervalDtype(ExtensionDtype): +class IntervalDtype(PandasExtensionMixin, ExtensionDtype): __metaclass__ = IntervalDtypeType """ A Interval duck-typed class, suitable for holding an interval diff --git a/pandas/core/dtypes/missing.py b/pandas/core/dtypes/missing.py index ffac702476af1..ef0f7bd708091 100644 --- a/pandas/core/dtypes/missing.py +++ b/pandas/core/dtypes/missing.py @@ -18,9 +18,11 @@ is_scalar, is_object_dtype, is_integer, + is_extension_type, _TD_DTYPE, _NS_DTYPE) from .inference import is_list_like +from ..extensions import ExtensionArray isposinf_scalar = libmissing.isposinf_scalar isneginf_scalar = libmissing.isneginf_scalar @@ -57,7 +59,8 @@ def _isna_new(obj): # hack (for now) because MI registers as ndarray elif isinstance(obj, ABCMultiIndex): raise NotImplementedError("isna is not defined for MultiIndex") - elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass)): + elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass, + ExtensionArray)): return _isna_ndarraylike(obj) elif isinstance(obj, ABCGeneric): return obj._constructor(obj._data.isna(func=isna)) @@ -128,7 +131,9 @@ def _isna_ndarraylike(obj): values = getattr(obj, 'values', obj) dtype = values.dtype - if is_string_dtype(dtype): + if isinstance(values, ExtensionArray): + result = values.isna() + elif is_string_dtype(dtype): if is_categorical_dtype(values): from pandas import Categorical if not isinstance(values, Categorical): @@ -406,4 +411,7 @@ def remove_na_arraylike(arr): """ Return array-like containing only true/non-NaN values, possibly empty. """ - return arr[notna(lib.values_from_object(arr))] + if is_extension_type(arr): + return arr[notna(arr)] + else: + return arr[notna(lib.values_from_object(arr))] diff --git a/pandas/core/extensions.py b/pandas/core/extensions.py new file mode 100644 index 0000000000000..8ca4af5f2f0cc --- /dev/null +++ b/pandas/core/extensions.py @@ -0,0 +1,233 @@ +"""Extend pandas with custom array types. +""" +import abc +import typing as T # noqa + +import numpy as np # noqa + + +class ExtensionDtype(metaclass=abc.ABCMeta): + """A custom data type for your array. + """ + @property + def type(self): + # type: () -> T.Any + """Typically a metaclass inheriting from 'type' with no methods.""" + return type(self.name, (), {}) + + @property + def kind(self): + # type: () -> str + """A character code (one of 'biufcmMOSUV'), default 'O' + + See Also + -------- + numpy.dtype.kind + """ + return 'O' + + @property + @abc.abstractmethod + def name(self): + # type: () -> str + """An string identifying the data type. + + Will be used in, e.g. ``Series.dtype`` + """ + + @property + def names(self): + # type: () -> T.Optional[T.List[str]] + """Ordered list of field names, or None if there are no fields""" + return None + + @classmethod + def construct_from_string(cls, string): + # type: (str) -> ExtensionDtype + """Attempt to construct this type from a string. + + Parameters + ---------- + string : str + + Returns + ------- + self : instance of 'cls' + + Raises + ------ + TypeError + + Notes + ----- + The default implementation checks if 'string' matches your + type's name. If so, it calls your class with no arguments. + """ + if string == cls.name: + return cls() + else: + raise TypeError("Cannot construct a '{}' from " + "'{}'".format(cls, string)) + + @classmethod + def is_dtype(cls, dtype): + # type: (T.Union[str, type]) -> bool + """Check if we match 'dtype' + + Parameters + ---------- + dtype : str or dtype + + Returns + ------- + is_dtype : bool + + Notes + ----- + The default implementation is True if + + 1. 'dtype' is a string that returns true for + ``cls.construct_from_string`` + 2. 'dtype' is ``cls`` or a subclass of ``cls``. + """ + if isinstance(dtype, str): + try: + return isinstance(cls.construct_from_string(dtype), cls) + except TypeError: + return False + else: + return issubclass(dtype, cls) + + +class ExtensionArray(metaclass=abc.ABCMeta): + """Abstract base class for custom array types + + pandas will recognize instances of this class as proper arrays + with a custom type and will not attempt to coerce them to objects. + + Subclasses are expected to implement the following methods. + """ + # ------------------------------------------------------------------------ + # Must be a Sequence + # ------------------------------------------------------------------------ + @abc.abstractmethod + def __getitem__(self, item): + pass + + @abc.abstractmethod + def __iter__(self): + pass + + @abc.abstractmethod + def __len__(self): + pass + + # ------------------------------------------------------------------------ + # Required attributes + # ------------------------------------------------------------------------ + @property + @abc.abstractmethod + def dtype(self): + # type: () -> ExtensionDtype + pass + + @property + def shape(self): + # type: () -> T.Tuple[int, ...] + return (len(self),) + + @property + def ndim(self): + # type: () -> int + """Extension Arrays are only allowed to be 1-dimensional""" + return 1 + + @property + @abc.abstractmethod + def nbytes(self): + # type: () -> int + # TODO: default impl? + pass + + # ------------------------------------------------------------------------ + # Additional Methods + # ------------------------------------------------------------------------ + @abc.abstractmethod + def isna(self): + # type: () -> T.Sequence[bool] + # TODO: narrow this type? + pass + + # ------------------------------------------------------------------------ + # Indexing methods + # ------------------------------------------------------------------------ + @abc.abstractmethod + def take(self, indexer, allow_fill=True, fill_value=None): + # type: (T.Sequence, bool, T.Optional[T.Any]) -> ExtensionArray + """For slicing""" + + @abc.abstractmethod + def take_nd(self, indexer, allow_fill=True, fill_value=None): + """For slicing""" + # TODO: this isn't nescesary if we only allow 1D (though maybe + # impelment it). + + @abc.abstractmethod + def copy(self, deep=False): + # type: (bool) -> ExtensionArray + """For slicing""" + + # ------------------------------------------------------------------------ + # Block-related methods + # ------------------------------------------------------------------------ + @property + def fill_value(self): + # type: () -> T.Any + # TODO + return None + + @abc.abstractmethod + def formatting_values(self): + # type: () -> np.ndarray + # At the moment, this has to be an array since we use result.dtype + """An array of values to be printed in, e.g. the Series repr""" + + @classmethod + @abc.abstractmethod + def concat_same_type(cls, to_concat): + # type: (T.Sequence[ExtensionArray]) -> ExtensionArray + """Concatenate multiple array + + Parameters + ---------- + to_concat : sequence of this type + + Returns + ------- + cls + """ + + @abc.abstractmethod + def get_values(self): + # type: () -> np.ndarray + # TODO: What is the required return value? Sequence? ndarray?, ...? + # Categorical does an ndarray + """Get the underlying values backing your data + """ + pass + + @property + @abc.abstractmethod + def can_hold_na(self): + # type: () -> bool + pass + + @property + def is_sparse(self): + # type: () -> bool + return False + + def slice(self, slicer): + # TODO: is this right? + # In general, no. Probably just remove it? + return self.get_values()[slicer] diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 43df2c48fcf58..4f2a41c96148b 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -517,7 +517,7 @@ def _get_axes(N, K, index=index, columns=columns): index, columns = _get_axes(len(values), 1) return _arrays_to_mgr([values], columns, index, columns, dtype=dtype) - elif is_datetimetz(values): + elif is_extension_type(values): return self._init_dict({0: values}, index, columns, dtype=dtype) # by definition an array here @@ -3346,6 +3346,7 @@ class max type new_obj = self.copy() def _maybe_casted_values(index, labels=None): + # TODO: Handle extension index -> extension array if isinstance(index, PeriodIndex): values = index.astype(object).values elif isinstance(index, DatetimeIndex) and index.tz is not None: diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index f634d809560ee..0eb7b3f0b8701 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -40,6 +40,7 @@ needs_i8_conversion, is_iterator, is_list_like, is_scalar) +from pandas.core.extensions import ExtensionArray from pandas.core.common import (is_bool_indexer, _values_from_object, _asarray_tuplesafe, _not_none, _index_labels_to_array) @@ -148,6 +149,8 @@ class Index(IndexOpsMixin, PandasObject): _inner_indexer = libjoin.inner_join_indexer_object _outer_indexer = libjoin.outer_join_indexer_object _box_scalars = False + # Whether items returned by self._data.__getitem__ need to be boxed + _box_slices = False _typ = 'index' _data = None @@ -1953,6 +1956,8 @@ def _format_with_header(self, header, na_rep='NaN', **kwargs): if is_categorical_dtype(values.dtype): values = np.array(values) + elif isinstance(values, ExtensionArray): + values = np.asarray(values._format_values()) elif is_object_dtype(values.dtype): values = lib.maybe_convert_objects(values, safe=1) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 0ee2f8ebce011..28e626648a2dd 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -287,7 +287,11 @@ def __getitem__(self, key): getitem = self._data.__getitem__ if is_int: val = getitem(key) - return self._box_func(val) + # XXX: Period will be boxed already, datetime won't be + if self._box_slices: + return self._box_func(val) + else: + return val else: if com.is_bool_indexer(key): key = np.asarray(key) diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py index d83d2d2c93ec8..51abf22aae056 100644 --- a/pandas/core/indexes/datetimes.py +++ b/pandas/core/indexes/datetimes.py @@ -273,6 +273,7 @@ class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin, _typ = 'datetimeindex' _join_precedence = 10 + _box_slices = True def _join_i8_wrapper(joinf, **kwargs): return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='M8[ns]', diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index baf80173d7362..71f30b635976f 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -3,16 +3,13 @@ import numpy as np from pandas.core.dtypes.missing import notna, isna -from pandas.core.dtypes.generic import ABCDatetimeIndex, ABCPeriodIndex from pandas.core.dtypes.dtypes import IntervalDtype -from pandas.core.dtypes.cast import maybe_convert_platform, find_common_type +from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ( _ensure_platform_int, is_list_like, is_datetime_or_timedelta_dtype, is_datetime64tz_dtype, - is_categorical_dtype, - is_string_dtype, is_integer_dtype, is_float_dtype, is_interval_dtype, @@ -28,14 +25,13 @@ from pandas._libs import Timestamp, Timedelta from pandas._libs.interval import ( Interval, IntervalMixin, IntervalTree, - intervals_to_interval_bounds) +) from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.timedeltas import timedelta_range from pandas.core.indexes.multi import MultiIndex -from pandas.compat.numpy import function as nv from pandas.core.common import ( - _all_not_none, _any_none, _asarray_tuplesafe, _count_not_none, + _any_none, _asarray_tuplesafe, _count_not_none, is_bool_indexer, _maybe_box_datetimelike, _not_none) from pandas.util._decorators import cache_readonly, Appender from pandas.core.config import get_option @@ -43,6 +39,8 @@ from pandas.tseries.offsets import DateOffset import pandas.core.indexes.base as ibase +from pandas.core.interval import IntervalArray, ScalarDataError + _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update( dict(klass='IntervalIndex', @@ -95,30 +93,6 @@ def _get_interval_closed_bounds(interval): return left, right -def maybe_convert_platform_interval(values): - """ - Try to do platform conversion, with special casing for IntervalIndex. - Wrapper around maybe_convert_platform that alters the default return - dtype in certain cases to be compatible with IntervalIndex. For example, - empty lists return with integer dtype instead of object dtype, which is - prohibited for IntervalIndex. - - Parameters - ---------- - values : array-like - - Returns - ------- - array - """ - if isinstance(values, (list, tuple)) and len(values) == 0: - # GH 19016 - # empty lists/tuples get object dtype by default, but this is not - # prohibited for IntervalIndex, so coerce to integer instead - return np.array([], dtype=np.int64) - return maybe_convert_platform(values) - - def _new_IntervalIndex(cls, d): """ This is called upon unpickling, rather than the default which doesn't have @@ -216,77 +190,30 @@ def __new__(cls, data, closed=None, name=None, copy=False, dtype=None, fastpath=False, verify_integrity=True): + # XXX: nail down verify_integrity. + # It should only ever be done on the Interval, yes? if fastpath: - return cls._simple_new(data.left, data.right, closed, name, - copy=copy, verify_integrity=False) + return cls._simple_new(data, name) if name is None and hasattr(data, 'name'): name = data.name - if isinstance(data, IntervalIndex): - left = data.left - right = data.right - closed = data.closed - else: - - # don't allow scalars - if is_scalar(data): - cls._scalar_data_error(data) - - data = maybe_convert_platform_interval(data) - left, right, infer_closed = intervals_to_interval_bounds(data) - - if _all_not_none(closed, infer_closed) and closed != infer_closed: - # GH 18421 - msg = ("conflicting values for closed: constructor got " - "'{closed}', inferred from data '{infer_closed}'" - .format(closed=closed, infer_closed=infer_closed)) - raise ValueError(msg) - - closed = closed or infer_closed + try: + array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype, + fastpath=fastpath, + verify_integrity=verify_integrity) + except ScalarDataError as e: + raise cls._scalar_data_error(data) from e - return cls._simple_new(left, right, closed, name, - copy=copy, verify_integrity=verify_integrity) + return cls._simple_new(array, name, verify_integrity=verify_integrity) @classmethod - def _simple_new(cls, left, right, closed=None, name=None, - copy=False, verify_integrity=True): + def _simple_new(cls, array, name, verify_integrity=True): result = IntervalMixin.__new__(cls) - - if closed is None: - closed = 'right' - left = _ensure_index(left, copy=copy) - right = _ensure_index(right, copy=copy) - - # coerce dtypes to match if needed - if is_float_dtype(left) and is_integer_dtype(right): - right = right.astype(left.dtype) - elif is_float_dtype(right) and is_integer_dtype(left): - left = left.astype(right.dtype) - - if type(left) != type(right): - msg = ('must not have differing left [{ltype}] and right ' - '[{rtype}] types') - raise ValueError(msg.format(ltype=type(left).__name__, - rtype=type(right).__name__)) - elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype): - # GH 19016 - msg = ('category, object, and string subtypes are not supported ' - 'for IntervalIndex') - raise TypeError(msg) - elif isinstance(left, ABCPeriodIndex): - msg = 'Period dtypes are not supported, use a PeriodIndex instead' - raise ValueError(msg) - elif (isinstance(left, ABCDatetimeIndex) and - str(left.tz) != str(right.tz)): - msg = ("left and right must have the same time zone, got " - "'{left_tz}' and '{right_tz}'") - raise ValueError(msg.format(left_tz=left.tz, right_tz=right.tz)) - - result._left = left - result._right = right - result._closed = closed + result._data = array result.name = name + # XXX: check that we don't verify_integrity twice. Anywhere we do + # array = self._data._simple_new() will have already done it. if verify_integrity: result._validate() result._reset_identity() @@ -294,28 +221,10 @@ def _simple_new(cls, left, right, closed=None, name=None, @Appender(_index_shared_docs['_shallow_copy']) def _shallow_copy(self, left=None, right=None, **kwargs): - if left is None: - - # no values passed - left, right = self.left, self.right - - elif right is None: - - # only single value passed, could be an IntervalIndex - # or array of Intervals - if not isinstance(left, IntervalIndex): - left = type(self).from_intervals(left) - - left, right = left.left, left.right - else: - - # both left and right are values - pass - + result = self._data._shallow_copy(left=left, right=right) attributes = self._get_attributes_dict() attributes.update(kwargs) - attributes['verify_integrity'] = False - return self._simple_new(left, right, **attributes) + return self._simple_new(result, name=self.name, verify_integrity=False) def _validate(self): """ @@ -435,10 +344,8 @@ def from_breaks(cls, breaks, closed='right', name=None, copy=False): IntervalIndex.from_tuples : Construct an IntervalIndex from a list/array of tuples """ - breaks = maybe_convert_platform_interval(breaks) - - return cls.from_arrays(breaks[:-1], breaks[1:], closed, - name=name, copy=copy) + array = IntervalArray.from_breaks(breaks, closed=closed, copy=copy) + return cls._simple_new(array, name) @classmethod def from_arrays(cls, left, right, closed='right', name=None, copy=False): @@ -476,11 +383,8 @@ def from_arrays(cls, left, right, closed='right', name=None, copy=False): IntervalIndex.from_tuples : Construct an IntervalIndex from a list/array of tuples """ - left = maybe_convert_platform_interval(left) - right = maybe_convert_platform_interval(right) - - return cls._simple_new(left, right, closed, name=name, - copy=copy, verify_integrity=True) + array = IntervalArray.from_arrays(left, right, closed, copy=copy) + return cls._simple_new(array, name, verify_integrity=True) @classmethod def from_intervals(cls, data, name=None, copy=False): @@ -521,13 +425,10 @@ def from_intervals(cls, data, name=None, copy=False): IntervalIndex.from_tuples : Construct an IntervalIndex from a list/array of tuples """ - if isinstance(data, IntervalIndex): - left, right, closed = data.left, data.right, data.closed - name = name or data.name - else: - data = maybe_convert_platform_interval(data) - left, right, closed = intervals_to_interval_bounds(data) - return cls.from_arrays(left, right, closed, name=name, copy=False) + arr = IntervalArray.from_intervals(data, copy=copy) + if name is None and isinstance(data, cls): + name = data.name + return cls._simple_new(arr, name=name) @classmethod def from_tuples(cls, data, closed='right', name=None, copy=False): @@ -562,24 +463,8 @@ def from_tuples(cls, data, closed='right', name=None, copy=False): IntervalIndex.from_intervals : Construct an IntervalIndex from an array of Interval objects """ - if len(data): - left, right = [], [] - else: - left = right = data - - for d in data: - if isna(d): - lhs = rhs = np.nan - else: - lhs, rhs = d - left.append(lhs) - right.append(rhs) - - # TODO - # if we have nulls and we previous had *only* - # integer data, then we have changed the dtype - - return cls.from_arrays(left, right, closed, name=name, copy=False) + arr = IntervalArray.from_tuples(data, closed=closed, copy=copy) + return cls._simple_new(arr, name=name) def to_tuples(self, na_tuple=True): """ @@ -601,6 +486,7 @@ def to_tuples(self, na_tuple=True): >>> idx.to_tuples(na_tuple=False) Index([(0.0, 1.0), nan, (2.0, 3.0)], dtype='object') """ + # TODO: Move to array? tuples = _asarray_tuplesafe(zip(self.left, self.right)) if not na_tuple: # GH 18756 @@ -618,7 +504,7 @@ def left(self): Return the left endpoints of each Interval in the IntervalIndex as an Index """ - return self._left + return self._data._left @property def right(self): @@ -626,7 +512,7 @@ def right(self): Return the right endpoints of each Interval in the IntervalIndex as an Index """ - return self._right + return self._data._right @property def closed(self): @@ -634,7 +520,7 @@ def closed(self): Whether the intervals are closed on the left-side, right-side, both or neither """ - return self._closed + return self._data._closed @property def length(self): @@ -662,7 +548,7 @@ def values(self): left = self.left right = self.right mask = self._isnan - closed = self._closed + closed = self.closed result = np.empty(len(left), dtype=object) for i in range(len(left)): @@ -691,11 +577,9 @@ def __reduce__(self): @Appender(_index_shared_docs['copy']) def copy(self, deep=False, name=None): - left = self.left.copy(deep=True) if deep else self.left - right = self.right.copy(deep=True) if deep else self.right + array = self._data.copy(deep=deep) name = name if name is not None else self.name - closed = self.closed - return type(self).from_arrays(left, right, closed=closed, name=name) + return self._simple_new(array, name, verify_integrity=False) @Appender(_index_shared_docs['astype']) def astype(self, dtype, copy=True): @@ -1165,33 +1049,9 @@ def _concat_same_dtype(self, to_concat, name): @Appender(_index_shared_docs['take'] % _index_doc_kwargs) def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs): - nv.validate_take(tuple(), kwargs) - indices = _ensure_platform_int(indices) - left, right = self.left, self.right - - if fill_value is None: - fill_value = self._na_value - mask = indices == -1 - - if not mask.any(): - # we won't change dtype here in this case - # if we don't need - allow_fill = False - - taker = lambda x: x.take(indices, allow_fill=allow_fill, - fill_value=fill_value) - - try: - new_left = taker(left) - new_right = taker(right) - except ValueError: - - # we need to coerce; migth have NA's in an - # integer dtype - new_left = taker(left.astype(float)) - new_right = taker(right.astype(float)) - - return self._shallow_copy(new_left, new_right) + result = self._data.take(indices, axis=axis, allow_fill=allow_fill, + fill_value=fill_value, **kwargs) + return self._simple_new(result, self.name, verify_integrity=False) def __getitem__(self, value): mask = self._isnan[value] diff --git a/pandas/core/indexes/period.py b/pandas/core/indexes/period.py index 8b35b1a231551..5272e2b9fb31d 100644 --- a/pandas/core/indexes/period.py +++ b/pandas/core/indexes/period.py @@ -9,16 +9,14 @@ is_float, is_integer_dtype, is_float_dtype, - is_scalar, - is_datetime64_dtype, is_datetime64_any_dtype, is_timedelta64_dtype, is_period_dtype, is_bool_dtype, pandas_dtype, - _ensure_object) +) from pandas.core.dtypes.dtypes import PeriodDtype -from pandas.core.dtypes.generic import ABCSeries +from pandas.core.period import PeriodArray import pandas.tseries.frequencies as frequencies from pandas.tseries.frequencies import get_freq_code as _gfc @@ -28,11 +26,10 @@ from pandas.core.tools.datetimes import parse_time_string import pandas.tseries.offsets as offsets -from pandas._libs.lib import infer_dtype from pandas._libs import tslib, index as libindex from pandas._libs.tslibs.period import (Period, IncompatibleFrequency, get_period_field_arr, - _validate_end_alias, _quarter_to_myear) + _validate_end_alias) from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs import resolution, period from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds @@ -43,7 +40,7 @@ from pandas import compat from pandas.util._decorators import (Appender, Substitution, cache_readonly, deprecate_kwarg) -from pandas.compat import zip, u +from pandas.compat import u import pandas.core.indexes.base as ibase _index_doc_kwargs = dict(ibase._index_doc_kwargs) @@ -59,16 +56,6 @@ def f(self): f.__name__ = name f.__doc__ = docstring return property(f) - - -def dt64arr_to_periodarr(data, freq, tz): - if data.dtype != np.dtype('M8[ns]'): - raise ValueError('Wrong dtype: %s' % data.dtype) - - freq = Period._maybe_convert_freq(freq) - base, mult = _gfc(freq) - return period.dt64arr_to_periodarr(data.view('i8'), base, tz) - # --- Period index sketch @@ -119,8 +106,10 @@ def wrapper(self, other): def _new_PeriodIndex(cls, **d): # GH13277 for unpickling - if d['data'].dtype == 'int64': + if d['data'].dtype == 'int64' or isinstance(d['data'], PeriodArray): values = d.pop('data') + elif isinstance(d['data'], PeriodArray): + values = d.pop('data')._data return cls._from_ordinals(values=values, **d) @@ -205,6 +194,7 @@ class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index): TimedeltaIndex : Index of timedelta64 data """ _box_scalars = True + _box_slices = False _typ = 'periodindex' _attributes = ['name', 'freq'] @@ -239,131 +229,56 @@ def _add_comparison_methods(cls): def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None, periods=None, copy=False, name=None, tz=None, dtype=None, **kwargs): - - if periods is not None: - if is_float(periods): - periods = int(periods) - elif not is_integer(periods): - msg = 'periods must be a number, got {periods}' - raise TypeError(msg.format(periods=periods)) - + array = PeriodArray(data, ordinal=ordinal, freq=freq, start=start, + end=end, periods=periods, copy=copy, tz=tz, + dtype=dtype, **kwargs) if name is None and hasattr(data, 'name'): name = data.name - if dtype is not None: - dtype = pandas_dtype(dtype) - if not is_period_dtype(dtype): - raise ValueError('dtype must be PeriodDtype') - if freq is None: - freq = dtype.freq - elif freq != dtype.freq: - msg = 'specified freq and dtype are different' - raise IncompatibleFrequency(msg) - - # coerce freq to freq object, otherwise it can be coerced elementwise - # which is slow - if freq: - freq = Period._maybe_convert_freq(freq) - - if data is None: - if ordinal is not None: - data = np.asarray(ordinal, dtype=np.int64) - else: - data, freq = cls._generate_range(start, end, periods, - freq, kwargs) - return cls._from_ordinals(data, name=name, freq=freq) - - if isinstance(data, PeriodIndex): - if freq is None or freq == data.freq: # no freq change - freq = data.freq - data = data._values - else: - base1, _ = _gfc(data.freq) - base2, _ = _gfc(freq) - data = period.period_asfreq_arr(data._values, - base1, base2, 1) - return cls._simple_new(data, name=name, freq=freq) - - # not array / index - if not isinstance(data, (np.ndarray, PeriodIndex, - DatetimeIndex, Int64Index)): - if is_scalar(data) or isinstance(data, Period): - cls._scalar_data_error(data) - - # other iterable of some kind - if not isinstance(data, (list, tuple)): - data = list(data) - - data = np.asarray(data) - - # datetime other than period - if is_datetime64_dtype(data.dtype): - data = dt64arr_to_periodarr(data, freq, tz) - return cls._from_ordinals(data, name=name, freq=freq) - - # check not floats - if infer_dtype(data) == 'floating' and len(data) > 0: - raise TypeError("PeriodIndex does not allow " - "floating point in construction") - - # anything else, likely an array of strings or periods - data = _ensure_object(data) - freq = freq or period.extract_freq(data) - data = period.extract_ordinals(data, freq) - return cls._from_ordinals(data, name=name, freq=freq) + return cls._from_period_array(array, name=name) @cache_readonly def _engine(self): return self._engine_type(lambda: self, len(self)) - @classmethod - def _generate_range(cls, start, end, periods, freq, fields): - if freq is not None: - freq = Period._maybe_convert_freq(freq) - - field_count = len(fields) - if com._count_not_none(start, end) > 0: - if field_count > 0: - raise ValueError('Can either instantiate from fields ' - 'or endpoints, but not both') - subarr, freq = _get_ordinal_range(start, end, periods, freq) - elif field_count > 0: - subarr, freq = _range_from_fields(freq=freq, **fields) - else: - raise ValueError('Not enough parameters to construct ' - 'Period range') - - return subarr, freq - @classmethod def _simple_new(cls, values, name=None, freq=None, **kwargs): """ Values can be any type that can be coerced to Periods. Ordinals in an ndarray are fastpath-ed to `_from_ordinals` """ - if not is_integer_dtype(values): - values = np.array(values, copy=False) - if len(values) > 0 and is_float_dtype(values): - raise TypeError("PeriodIndex can't take floats") - return cls(values, name=name, freq=freq, **kwargs) + # Kept for compatability with other indexes + if not isinstance(values, PeriodArray): + if not is_integer_dtype(values): + values = np.array(values, copy=False) + if len(values) > 0 and is_float_dtype(values): + raise TypeError("PeriodIndex can't take floats") + + return cls(values, name=name, freq=freq, **kwargs) + else: + return cls._from_ordinals(values, name, freq) - return cls._from_ordinals(values, name, freq, **kwargs) + return cls._from_period_array(values, name, freq) @classmethod - def _from_ordinals(cls, values, name=None, freq=None, **kwargs): + def _from_ordinals(cls, values, name, freq): + array = PeriodArray._from_ordinals(values, freq=freq) + return cls._from_period_array(array, name=name, freq=freq) + + @classmethod + def _from_period_array(cls, values, name=None, freq=None): """ Values should be int ordinals `__new__` & `_simple_new` cooerce to ordinals and call this method """ - - values = np.array(values, dtype='int64', copy=False) + if freq and values.freq != freq: + # may have to cast here. + values = values.asfreq(freq) result = object.__new__(cls) result._data = values result.name = name - if freq is None: - raise ValueError('freq is not specified and cannot be inferred') - result.freq = Period._maybe_convert_freq(freq) + result.freq = values.freq result._reset_identity() return result @@ -375,7 +290,7 @@ def _shallow_copy(self, values=None, freq=None, **kwargs): if freq is None: freq = self.freq if values is None: - values = self._values + values = self._data return super(PeriodIndex, self)._shallow_copy(values=values, freq=freq, **kwargs) @@ -408,7 +323,7 @@ def __contains__(self, key): @property def asi8(self): - return self._values.view('i8') + return self._data.asi8 @cache_readonly def _int64index(self): @@ -416,11 +331,13 @@ def _int64index(self): @property def values(self): + """An object array of Periods with our 'freq'.""" return self.astype(object).values @property def _values(self): - return self._data + """The ordinal integers.""" + return self._data._data def __array__(self, dtype=None): if is_integer_dtype(dtype): @@ -581,27 +498,8 @@ def asfreq(self, freq=None, how='E'): [2010-01, ..., 2015-01] Length: 6, Freq: M """ - how = _validate_end_alias(how) - - freq = Period._maybe_convert_freq(freq) - - base1, mult1 = _gfc(self.freq) - base2, mult2 = _gfc(freq) - - asi8 = self.asi8 - # mult1 can't be negative or 0 - end = how == 'E' - if end: - ordinal = asi8 + mult1 - 1 - else: - ordinal = asi8 - - new_data = period.period_asfreq_arr(ordinal, base1, base2, end) - - if self.hasnans: - new_data[self._isnan] = tslib.iNaT - - return self._simple_new(new_data, self.name, freq=freq) + values = self._data.asfreq(freq, how=how) + return self._from_period_array(values, name=self.name) year = _field_accessor('year', 0, "The year of the period") month = _field_accessor('month', 3, "The month as January=1, December=12") @@ -1097,102 +995,6 @@ def tz_localize(self, tz, infer_dst=False): PeriodIndex._add_datetimelike_methods() -def _get_ordinal_range(start, end, periods, freq, mult=1): - if com._count_not_none(start, end, periods) != 2: - raise ValueError('Of the three parameters: start, end, and periods, ' - 'exactly two must be specified') - - if freq is not None: - _, mult = _gfc(freq) - - if start is not None: - start = Period(start, freq) - if end is not None: - end = Period(end, freq) - - is_start_per = isinstance(start, Period) - is_end_per = isinstance(end, Period) - - if is_start_per and is_end_per and start.freq != end.freq: - raise ValueError('start and end must have same freq') - if (start is tslib.NaT or end is tslib.NaT): - raise ValueError('start and end must not be NaT') - - if freq is None: - if is_start_per: - freq = start.freq - elif is_end_per: - freq = end.freq - else: # pragma: no cover - raise ValueError('Could not infer freq from start/end') - - if periods is not None: - periods = periods * mult - if start is None: - data = np.arange(end.ordinal - periods + mult, - end.ordinal + 1, mult, - dtype=np.int64) - else: - data = np.arange(start.ordinal, start.ordinal + periods, mult, - dtype=np.int64) - else: - data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) - - return data, freq - - -def _range_from_fields(year=None, month=None, quarter=None, day=None, - hour=None, minute=None, second=None, freq=None): - if hour is None: - hour = 0 - if minute is None: - minute = 0 - if second is None: - second = 0 - if day is None: - day = 1 - - ordinals = [] - - if quarter is not None: - if freq is None: - freq = 'Q' - base = frequencies.FreqGroup.FR_QTR - else: - base, mult = _gfc(freq) - if base != frequencies.FreqGroup.FR_QTR: - raise AssertionError("base must equal FR_QTR") - - year, quarter = _make_field_arrays(year, quarter) - for y, q in zip(year, quarter): - y, m = _quarter_to_myear(y, q, freq) - val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) - ordinals.append(val) - else: - base, mult = _gfc(freq) - arrays = _make_field_arrays(year, month, day, hour, minute, second) - for y, mth, d, h, mn, s in zip(*arrays): - ordinals.append(period.period_ordinal( - y, mth, d, h, mn, s, 0, 0, base)) - - return np.array(ordinals, dtype=np.int64), freq - - -def _make_field_arrays(*fields): - length = None - for x in fields: - if isinstance(x, (list, np.ndarray, ABCSeries)): - if length is not None and len(x) != length: - raise ValueError('Mismatched Period array lengths') - elif length is None: - length = len(x) - - arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries)) - else np.repeat(x, length) for x in fields] - - return arrays - - def pnow(freq=None): # deprecation, xref #13790 warnings.warn("pd.pnow() and pandas.core.indexes.period.pnow() " diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 12ca26cfe0266..158334de9e1df 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -161,6 +161,7 @@ class TimedeltaIndex(DatetimeIndexOpsMixin, TimelikeOps, Int64Index): _typ = 'timedeltaindex' _join_precedence = 10 + _box_slices = True def _join_i8_wrapper(joinf, **kwargs): return DatetimeIndexOpsMixin._join_i8_wrapper( diff --git a/pandas/core/internals.py b/pandas/core/internals.py index 5a4778ae4e629..c6bf09dbd0055 100644 --- a/pandas/core/internals.py +++ b/pandas/core/internals.py @@ -13,8 +13,9 @@ from pandas.core.base import PandasObject +from pandas.core.extensions import ExtensionDtype, ExtensionArray from pandas.core.dtypes.dtypes import ( - ExtensionDtype, DatetimeTZDtype, + DatetimeTZDtype, CategoricalDtype) from pandas.core.dtypes.common import ( _TD_DTYPE, _NS_DTYPE, @@ -55,6 +56,7 @@ from pandas.core.dtypes.generic import ABCSeries, ABCDatetimeIndex from pandas.core.common import is_null_slice, _any_not_none + import pandas.core.algorithms as algos from pandas.core.index import Index, MultiIndex, _ensure_index @@ -76,7 +78,169 @@ from pandas.compat import range, map, zip, u -class Block(PandasObject): +class BlockOpsMixin(object): + """Operations that should work on regular or extension blocks + + These methods should only use attributes that are part of the interface. + """ + + def __init__(self, values, placement, ndim=None, fastpath=None): + # Placement must be converted to BlockPlacement via property setter + # before ndim logic, because placement may be a slice which doesn't + # have a length. + self.mgr_locs = placement + + # kludgetastic + if ndim is None: + if len(self.mgr_locs) != 1: + ndim = 1 + else: + ndim = 2 + self.ndim = ndim + + if not isinstance(values, self._holder): + raise TypeError("values must be {0}".format(self._holder.__name__)) + + self.values = values + + @property + def mgr_locs(self): + # TODO: check perf for base... + return self._mgr_locs + + @mgr_locs.setter + def mgr_locs(self, new_mgr_locs): + if not isinstance(new_mgr_locs, BlockPlacement): + new_mgr_locs = BlockPlacement(new_mgr_locs) + + self._mgr_locs = new_mgr_locs + + def copy(self, deep=False, mgr=None): + values = self.values + if deep: + values = values.copy() + return self.make_block_same_class(values) + + def getitem_block(self, slicer, new_mgr_locs=None): + """ + Perform __getitem__-like, return result as block. + + As of now, only supports slices that preserve dimensionality. + """ + if new_mgr_locs is None: + if isinstance(slicer, tuple): + axis0_slicer = slicer[0] + else: + axis0_slicer = slicer + new_mgr_locs = self.mgr_locs[axis0_slicer] + + new_values = self._slice(slicer) + + if self._validate_ndim and new_values.ndim != self.ndim: + raise ValueError("Only same dim slicing is allowed") + + return self.make_block_same_class(new_values, new_mgr_locs) + + def apply(self, func, mgr=None, **kwargs): + """ apply the function to my values; return a block if we are not + one + """ + from pandas.core.internals import Block, _block_shape + + with np.errstate(all='ignore'): + result = func(self.values, **kwargs) + if not isinstance(result, Block): + result = self.make_block(values=_block_shape(result, + ndim=self.ndim)) + + return result + + def make_block(self, values, placement=None, ndim=None, **kwargs): + """ + Create a new block, with type inference propagate any values that are + not specified + """ + from pandas.core.internals import make_block + + if placement is None: + placement = self.mgr_locs + if ndim is None: + ndim = self.ndim + + return make_block(values, placement=placement, ndim=ndim, **kwargs) + + def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None): + """ + Take values according to indexer and return them as a block.bb + + """ + + # algos.take_nd dispatches for DatetimeTZBlock, CategoricalBlock + # so need to preserve types + # sparse is treated like an ndarray, but needs .get_values() shaping + import pandas.core.algorithms as algos + from pandas.core.dtypes.common import is_dtype_equal + + values = self.values + if self.is_sparse: + values = self.get_values() + + if fill_tuple is None: + fill_value = self.fill_value + new_values = algos.take_nd(values, indexer, axis=axis, + allow_fill=False) + else: + fill_value = fill_tuple[0] + new_values = algos.take_nd(values, indexer, axis=axis, + allow_fill=True, fill_value=fill_value) + + if new_mgr_locs is None: + if axis == 0: + slc = lib.indexer_as_slice(indexer) + if slc is not None: + new_mgr_locs = self.mgr_locs[slc] + else: + new_mgr_locs = self.mgr_locs[indexer] + else: + new_mgr_locs = self.mgr_locs + + if not is_dtype_equal(new_values.dtype, self.dtype): + return self.make_block(new_values, new_mgr_locs) + else: + return self.make_block_same_class(new_values, new_mgr_locs) + + @property + def is_sparse(self): + return False + + def ftype(self): + return self.is_sparse + + def ftypes(self): + return self.is_sparse + + def __len__(self): + return len(self.values) + + @property + def fill_value(self): + return self._holder.fill_value + + def make_block_same_class(self, values, placement=None, fastpath=True, + **kwargs): + """ Wrap given values in a block of same type as self. """ + from pandas.core.internals import make_block + + if placement is None: + placement = self.mgr_locs + return make_block(values, placement=placement, klass=self.__class__, + fastpath=fastpath, **kwargs) + + def _try_coerce_result(self, result): + return result + + +class Block(BlockOpsMixin, PandasObject): """ Canonical n-dimensional unit of homogeneous dtype contained in a pandas data structure @@ -1689,140 +1853,6 @@ def __len__(self): return 0 -class NonConsolidatableMixIn(object): - """ hold methods for the nonconsolidatable blocks """ - _can_consolidate = False - _verify_integrity = False - _validate_ndim = False - _holder = None - - def __init__(self, values, placement, ndim=None, fastpath=False, **kwargs): - - # Placement must be converted to BlockPlacement via property setter - # before ndim logic, because placement may be a slice which doesn't - # have a length. - self.mgr_locs = placement - - # kludgetastic - if ndim is None: - if len(self.mgr_locs) != 1: - ndim = 1 - else: - ndim = 2 - self.ndim = ndim - - if not isinstance(values, self._holder): - raise TypeError("values must be {0}".format(self._holder.__name__)) - - self.values = values - - @property - def shape(self): - if self.ndim == 1: - return (len(self.values)), - return (len(self.mgr_locs), len(self.values)) - - def get_values(self, dtype=None): - """ need to to_dense myself (and always return a ndim sized object) """ - values = self.values.to_dense() - if values.ndim == self.ndim - 1: - values = values.reshape((1,) + values.shape) - return values - - def iget(self, col): - - if self.ndim == 2 and isinstance(col, tuple): - col, loc = col - if not is_null_slice(col) and col != 0: - raise IndexError("{0} only contains one item".format(self)) - return self.values[loc] - else: - if col != 0: - raise IndexError("{0} only contains one item".format(self)) - return self.values - - def should_store(self, value): - return isinstance(value, self._holder) - - def set(self, locs, values, check=False): - assert locs.tolist() == [0] - self.values = values - - def putmask(self, mask, new, align=True, inplace=False, axis=0, - transpose=False, mgr=None): - """ - putmask the data to the block; we must be a single block and not - generate other blocks - - return the resulting block - - Parameters - ---------- - mask : the condition to respect - new : a ndarray/object - align : boolean, perform alignment on other/cond, default is True - inplace : perform inplace modification, default is False - - Returns - ------- - a new block(s), the result of the putmask - """ - inplace = validate_bool_kwarg(inplace, 'inplace') - - # use block's copy logic. - # .values may be an Index which does shallow copy by default - new_values = self.values if inplace else self.copy().values - new_values, _, new, _ = self._try_coerce_args(new_values, new) - - if isinstance(new, np.ndarray) and len(new) == len(mask): - new = new[mask] - - mask = _safe_reshape(mask, new_values.shape) - - new_values[mask] = new - new_values = self._try_coerce_result(new_values) - return [self.make_block(values=new_values)] - - def _slice(self, slicer): - """ return a slice of my values (but densify first) """ - return self.get_values()[slicer] - - def _try_cast_result(self, result, dtype=None): - return result - - def _unstack(self, unstacker_func, new_columns): - """Return a list of unstacked blocks of self - - Parameters - ---------- - unstacker_func : callable - Partially applied unstacker. - new_columns : Index - All columns of the unstacked BlockManager. - - Returns - ------- - blocks : list of Block - New blocks of unstacked values. - mask : array_like of bool - The mask of columns of `blocks` we should keep. - """ - # NonConsolidatable blocks can have a single item only, so we return - # one block per item - unstacker = unstacker_func(self.values.T) - new_items = unstacker.get_new_columns() - new_placement = new_columns.get_indexer(new_items) - new_values, mask = unstacker.get_new_values() - - mask = mask.any(0) - new_values = new_values.T[mask] - new_placement = new_placement[mask] - - blocks = [self.make_block_same_class(vals, [place]) - for vals, place in zip(new_values, new_placement)] - return blocks, mask - - class NumericBlock(Block): __slots__ = () is_numeric = True @@ -2330,6 +2360,143 @@ def re_replacer(s): return block +class NonConsolidatableMixIn(object): + """ hold methods for the nonconsolidatable blocks """ + _can_consolidate = False + _verify_integrity = False + _validate_ndim = False + _holder = None + + def __init__(self, values, placement, ndim=None, fastpath=False, **kwargs): + + # Placement must be converted to BlockPlacement via property setter + # before ndim logic, because placement may be a slice which doesn't + # have a length. + self.mgr_locs = placement + + # kludgetastic + if ndim is None: + if len(self.mgr_locs) != 1: + ndim = 1 + else: + ndim = 2 + self.ndim = ndim + + if not isinstance(values, self._holder): + raise TypeError("values must be {0}".format(self._holder.__name__)) + + self.values = values + + @property + def shape(self): + if self.ndim == 1: + return (len(self.values)), + return (len(self.mgr_locs), len(self.values)) + + def get_values(self, dtype=None): + """ need to to_dense myself (and always return a ndim sized object) """ + values = self.values.to_dense() + if values.ndim == self.ndim - 1: + values = values.reshape((1,) + values.shape) + return values + + def iget(self, col): + from pandas.core.common import is_null_slice + + if self.ndim == 2 and isinstance(col, tuple): + col, loc = col + if not is_null_slice(col) and col != 0: + raise IndexError("{0} only contains one item".format(self)) + return self.values[loc] + else: + if col != 0: + raise IndexError("{0} only contains one item".format(self)) + return self.values + + def should_store(self, value): + return isinstance(value, self._holder) + + def set(self, locs, values, check=False): + assert locs.tolist() == [0] + self.values = values + + def putmask(self, mask, new, align=True, inplace=False, axis=0, + transpose=False, mgr=None): + """ + putmask the data to the block; we must be a single block and not + generate other blocks + + return the resulting block + + Parameters + ---------- + mask : the condition to respect + new : a ndarray/object + align : boolean, perform alignment on other/cond, default is True + inplace : perform inplace modification, default is False + + Returns + ------- + a new block(s), the result of the putmask + """ + from pandas.util._validators import validate_bool_kwarg + + inplace = validate_bool_kwarg(inplace, 'inplace') + + # use block's copy logic. + # .values may be an Index which does shallow copy by default + new_values = self.values if inplace else self.copy().values + new_values, _, new, _ = self._try_coerce_args(new_values, new) + + if isinstance(new, np.ndarray) and len(new) == len(mask): + new = new[mask] + + mask = _safe_reshape(mask, new_values.shape) + + new_values[mask] = new + new_values = self._try_coerce_result(new_values) + return [self.make_block(values=new_values)] + + def _slice(self, slicer): + """ return a slice of my values (but densify first) """ + return self.get_values()[slicer] + + def _try_cast_result(self, result, dtype=None): + return result + + def _unstack(self, unstacker_func, new_columns): + """Return a list of unstacked blocks of self + + Parameters + ---------- + unstacker_func : callable + Partially applied unstacker. + new_columns : Index + All columns of the unstacked BlockManager. + + Returns + ------- + blocks : list of Block + New blocks of unstacked values. + mask : array_like of bool + The mask of columns of `blocks` we should keep. + """ + # NonConsolidatable blocks can have a single item only, so we return + # one block per item + unstacker = unstacker_func(self.values.T) + new_items = unstacker.get_new_columns() + new_placement = new_columns.get_indexer(new_items) + new_values, mask = unstacker.get_new_values() + + mask = mask.any(0) + new_values = new_values.T[mask] + new_placement = new_placement[mask] + + blocks = [self.make_block_same_class(vals, [place]) + for vals, place in zip(new_values, new_placement)] + return blocks, mask + + class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock): __slots__ = () is_categorical = True @@ -2920,7 +3087,12 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None, dtype = dtype or values.dtype vtype = dtype.type - if isinstance(values, SparseArray): + # TODO: cleanup + if isinstance(values, ExtensionArray): + # Our classes may implement a custom block type. 3rd + # party classes are currently stuck with ExtensionBlock + klass = getattr(values, '_block_type', ExtensionBlock) + elif isinstance(values, SparseArray): klass = SparseBlock elif issubclass(vtype, np.floating): klass = FloatBlock @@ -2953,6 +3125,76 @@ def make_block(values, placement, klass=None, ndim=None, dtype=None, # TODO: flexible with index=None and/or items=None +class ExtensionBlock(BlockOpsMixin, NonConsolidatableMixIn): + """Extend pandas internal storage mechanism. + + This is *not* part of the interface. It's intended to hide Blocks from + third-party libraries. + """ + _box_to_block_values = False + + def __init__(self, values: 'ExtensionArray', placement, + ndim=None, fastpath=False) -> None: + self._holder = type(values) + super().__init__(values, placement, ndim=ndim, fastpath=fastpath) + + def formatting_values(self) -> np.ndarray: + """An array of values for printing""" + return self.values.formatting_values() + + def concat_same_type(self, to_concat, placement=None): + values = self._holder.concat_same_type([ + blk.values for blk in to_concat + ]) + return self.make_block_same_class( + values, placement=placement or slice(0, len(values), 1) + ) + + def get_values(self, dtype=None): + return self.values.get_values() + + @property + def dtype(self): + """The custom type for your array""" + return self.values.dtype + + def to_dense(self): + """The array backing your data.""" + return self.values.get_values() + + @property + def _can_hold_na(self) -> bool: + """Boolean indicating whether your container holds missing values""" + return self.values.can_hold_na + + def internal_values(self): + """ return an internal format, currently just the ndarray + this should be the pure internal API format + """ + return self.values + + def external_values(self): + return self.values + + @property + def is_sparse(self): + return False + + def ftype(self): + return self.is_sparse + + def ftypes(self): + return self.is_sparse + + @property + def fill_value(self): + return self._holder.fill_value + + def _slice(self, slicer): + """ return a slice of my values (but densify first) """ + return self.values.slice(slicer) + + class BlockManager(PandasObject): """ Core internal data structure to implement DataFrame, Series, Panel, etc. @@ -4410,7 +4652,7 @@ def __init__(self, block, axis, do_integrity_check=False, fastpath=False): 'more than 1 block') block = block[0] - if not isinstance(block, Block): + if not isinstance(block, (Block, ExtensionBlock)): block = make_block(block, placement=slice(0, len(axis)), ndim=1, fastpath=True) @@ -4667,6 +4909,7 @@ def form_blocks(arrays, names, axes): datetime_items = [] datetime_tz_items = [] cat_items = [] + external_items = [] extra_locs = [] names_idx = Index(names) @@ -4704,6 +4947,8 @@ def form_blocks(arrays, names, axes): bool_items.append((i, k, v)) elif is_categorical(v): cat_items.append((i, k, v)) + elif is_extension_type(v): + external_items.append((i, k, v)) else: object_items.append((i, k, v)) @@ -4750,6 +4995,18 @@ def form_blocks(arrays, names, axes): for i, _, array in cat_items] blocks.extend(cat_blocks) + if len(external_items): + external_blocks = [] + for i, _, array in external_items: + if isinstance(array, ABCSeries): + array = array.values + block_type = getattr(array, '_block_type', ExtensionBlock) + external_blocks.append( + make_block(array, klass=block_type, + fastpath=True, placement=[i]) + ) + blocks.extend(external_blocks) + if len(extra_locs): shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:]) @@ -5052,28 +5309,6 @@ def rrenamer(x): _transform_index(right, rrenamer)) -def _safe_reshape(arr, new_shape): - """ - If possible, reshape `arr` to have shape `new_shape`, - with a couple of exceptions (see gh-13012): - - 1) If `arr` is a Categorical or Index, `arr` will be - returned as is. - 2) If `arr` is a Series, the `_values` attribute will - be reshaped and returned. - - Parameters - ---------- - arr : array-like, object to be reshaped - new_shape : int or tuple of ints, the new shape - """ - if isinstance(arr, ABCSeries): - arr = arr._values - if not isinstance(arr, Categorical): - arr = arr.reshape(new_shape) - return arr - - def _transform_index(index, func, level=None): """ Apply function to all values found in index. @@ -5271,6 +5506,7 @@ def get_empty_dtype_and_na(join_units): if dtype is None: continue + # TODO: simplify if is_categorical_dtype(dtype): upcast_cls = 'category' elif is_datetimetz(dtype): @@ -5671,3 +5907,28 @@ def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill): if not allow_fill: indexer = maybe_convert_indices(indexer, length) return 'fancy', indexer, len(indexer) + + +def _safe_reshape(arr, new_shape): + """ + If possible, reshape `arr` to have shape `new_shape`, + with a couple of exceptions (see gh-13012): + + 1) If `arr` is a Categorical or Index, `arr` will be + returned as is. + 2) If `arr` is a Series, the `_values` attribute will + be reshaped and returned. + + Parameters + ---------- + arr : array-like, object to be reshaped + new_shape : int or tuple of ints, the new shape + """ + from pandas.core.dtypes.generic import ABCSeries + from pandas.core.categorical import Categorical + + if isinstance(arr, ABCSeries): + arr = arr._values + if not isinstance(arr, Categorical): + arr = arr.reshape(new_shape) + return arr diff --git a/pandas/core/interval.py b/pandas/core/interval.py new file mode 100644 index 0000000000000..e5ce62690beac --- /dev/null +++ b/pandas/core/interval.py @@ -0,0 +1,549 @@ +import numpy as np + +from pandas._libs.interval import (Interval, IntervalMixin, + intervals_to_interval_bounds) +from pandas.compat.numpy import function as nv +from pandas.core.common import _all_not_none +from pandas.core.config import get_option +from pandas.core.dtypes.cast import maybe_convert_platform +from pandas.core.dtypes.common import (_ensure_platform_int, + is_categorical_dtype, is_float_dtype, + is_integer_dtype, is_interval_dtype, + is_scalar, is_string_dtype) +from pandas.core.dtypes.dtypes import IntervalDtype +from pandas.core.dtypes.generic import (ABCDatetimeIndex, ABCPeriodIndex, + ABCSeries) +from pandas.core.dtypes.missing import isna, notna +from pandas.core.extensions import ExtensionArray +from pandas.core.indexes.base import Index, _ensure_index + +_VALID_CLOSED = set(['left', 'right', 'both', 'neither']) + + +class ScalarDataError(TypeError): + # XXX: this is a "hack" to get the right class name in the error + # message. + pass + + +class IntervalArray(IntervalMixin, ExtensionArray): + dtype = IntervalDtype() + ndim = 1 + can_hold_na = True + _na_value = fill_value = np.nan + + def __new__(cls, data, closed=None, copy=False, dtype=None, + fastpath=False, verify_integrity=True): + + from pandas.core.indexes.interval import IntervalIndex + + if fastpath: + return cls._simple_new(data.left, data.right, closed, + copy=copy, verify_integrity=False) + + if isinstance(data, ABCSeries) and is_interval_dtype(data): + data = data.values + if isinstance(data, (cls, IntervalIndex)): + left = data.left + right = data.right + closed = data.closed + else: + + # don't allow scalars + if is_scalar(data): + cls._scalar_data_error(data) + + data = maybe_convert_platform_interval(data) + left, right, infer_closed = intervals_to_interval_bounds(data) + + if _all_not_none(closed, infer_closed) and closed != infer_closed: + # GH 18421 + msg = ("conflicting values for closed: constructor got " + "'{closed}', inferred from data '{infer_closed}'" + .format(closed=closed, infer_closed=infer_closed)) + raise ValueError(msg) + + closed = closed or infer_closed + + return cls._simple_new(left, right, closed, + copy=copy, verify_integrity=verify_integrity) + + @classmethod + def _simple_new(cls, left, right, closed=None, + copy=False, verify_integrity=True): + result = IntervalMixin.__new__(cls) + + if closed is None: + closed = 'right' + left = _ensure_index(left, copy=copy) + right = _ensure_index(right, copy=copy) + + # coerce dtypes to match if needed + if is_float_dtype(left) and is_integer_dtype(right): + right = right.astype(left.dtype) + elif is_float_dtype(right) and is_integer_dtype(left): + left = left.astype(right.dtype) + + if type(left) != type(right): + msg = ('must not have differing left [{ltype}] and right ' + '[{rtype}] types') + raise ValueError(msg.format(ltype=type(left).__name__, + rtype=type(right).__name__)) + elif is_categorical_dtype(left.dtype) or is_string_dtype(left.dtype): + # GH 19016 + msg = ('category, object, and string subtypes are not supported ' + 'for IntervalIndex') + raise TypeError(msg) + elif isinstance(left, ABCPeriodIndex): + msg = 'Period dtypes are not supported, use a PeriodIndex instead' + raise ValueError(msg) + elif (isinstance(left, ABCDatetimeIndex) and + str(left.tz) != str(right.tz)): + msg = ("left and right must have the same time zone, got " + "'{left_tz}' and '{right_tz}'") + raise ValueError(msg.format(left_tz=left.tz, right_tz=right.tz)) + + result._left = left + result._right = right + result._closed = closed + if verify_integrity: + result._validate() + return result + + @classmethod + def from_breaks(cls, breaks, closed='right', copy=False): + """ + Construct an IntervalIndex from an array of splits + + Parameters + ---------- + breaks : array-like (1-dimensional) + Left and right bounds for each interval. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither. + copy : boolean, default False + copy the data + + Examples + -------- + >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3]) + IntervalIndex([(0, 1], (1, 2], (2, 3]] + closed='right', + dtype='interval[int64]') + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex + IntervalIndex.from_arrays : Construct an IntervalIndex from a left and + right array + IntervalIndex.from_intervals : Construct an IntervalIndex from an array + of Interval objects + IntervalIndex.from_tuples : Construct an IntervalIndex from a + list/array of tuples + """ + breaks = maybe_convert_platform_interval(breaks) + + return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy) + + @classmethod + def from_arrays(cls, left, right, closed='right', copy=False): + """ + Construct an IntervalIndex from a a left and right array + + Parameters + ---------- + left : array-like (1-dimensional) + Left bounds for each interval. + right : array-like (1-dimensional) + Right bounds for each interval. + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither. + copy : boolean, default False + copy the data + + Examples + -------- + >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3]) + IntervalIndex([(0, 1], (1, 2], (2, 3]] + closed='right', + dtype='interval[int64]') + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex + IntervalIndex.from_breaks : Construct an IntervalIndex from an array of + splits + IntervalIndex.from_intervals : Construct an IntervalIndex from an array + of Interval objects + IntervalIndex.from_tuples : Construct an IntervalIndex from a + list/array of tuples + """ + left = maybe_convert_platform_interval(left) + right = maybe_convert_platform_interval(right) + + return cls._simple_new(left, right, closed, copy=copy, + verify_integrity=True) + + @classmethod + def from_intervals(cls, data, copy=False): + """ + Construct an IntervalIndex from a 1d array of Interval objects + + Parameters + ---------- + data : array-like (1-dimensional) + Array of Interval objects. All intervals must be closed on the same + sides. + copy : boolean, default False + by-default copy the data, this is compat only and ignored + + Examples + -------- + >>> pd.IntervalIndex.from_intervals([pd.Interval(0, 1), + ... pd.Interval(1, 2)]) + IntervalIndex([(0, 1], (1, 2]] + closed='right', dtype='interval[int64]') + + The generic Index constructor work identically when it infers an array + of all intervals: + + >>> pd.Index([pd.Interval(0, 1), pd.Interval(1, 2)]) + IntervalIndex([(0, 1], (1, 2]] + closed='right', dtype='interval[int64]') + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex + IntervalIndex.from_arrays : Construct an IntervalIndex from a left and + right array + IntervalIndex.from_breaks : Construct an IntervalIndex from an array of + splits + IntervalIndex.from_tuples : Construct an IntervalIndex from a + list/array of tuples + """ + from pandas.core.indexes.interval import IntervalIndex + + if isinstance(data, (cls, IntervalIndex)): + left, right, closed = data.left, data.right, data.closed + else: + data = maybe_convert_platform_interval(data) + left, right, closed = intervals_to_interval_bounds(data) + return cls.from_arrays(left, right, closed, copy=False) + + @classmethod + def from_tuples(cls, data, closed='right', copy=False): + """ + Construct an IntervalIndex from a list/array of tuples + + Parameters + ---------- + data : array-like (1-dimensional) + Array of tuples + closed : {'left', 'right', 'both', 'neither'}, default 'right' + Whether the intervals are closed on the left-side, right-side, both + or neither. + copy : boolean, default False + by-default copy the data, this is compat only and ignored + + Examples + -------- + >>> pd.IntervalIndex.from_tuples([(0, 1), (1,2)]) + IntervalIndex([(0, 1], (1, 2]], + closed='right', dtype='interval[int64]') + + See Also + -------- + interval_range : Function to create a fixed frequency IntervalIndex + IntervalIndex.from_arrays : Construct an IntervalIndex from a left and + right array + IntervalIndex.from_breaks : Construct an IntervalIndex from an array of + splits + IntervalIndex.from_intervals : Construct an IntervalIndex from an array + of Interval objects + """ + if len(data): + left, right = [], [] + else: + left = right = data + + for d in data: + if isna(d): + lhs = rhs = np.nan + else: + lhs, rhs = d + left.append(lhs) + right.append(rhs) + + # TODO + # if we have nulls and we previous had *only* + # integer data, then we have changed the dtype + + return cls.from_arrays(left, right, closed, copy=False) + + def _validate(self): + """ + Verify that the IntervalIndex is valid. + """ + if self.closed not in _VALID_CLOSED: + raise ValueError("invalid options for 'closed': {closed}" + .format(closed=self.closed)) + if len(self.left) != len(self.right): + raise ValueError('left and right must have the same length') + left_mask = notna(self.left) + right_mask = notna(self.right) + if not (left_mask == right_mask).all(): + raise ValueError('missing values must be missing in the same ' + 'location both left and right sides') + if not (self.left[left_mask] <= self.right[left_mask]).all(): + raise ValueError('left side of interval must be <= right side') + self._mask = ~left_mask + + # --------- + # Interface + # --------- + def __iter__(self): + return iter(self.values) + + def __len__(self): + return len(self.left) + + def __getitem__(self, value): + mask = self.isna() + if is_scalar(mask) and mask: + return self.fill_value + + left = self.left[value] + right = self.right[value] + + # scalar + if not isinstance(left, Index): + return Interval(left, right, self.closed) + + return self._shallow_copy(left, right) + + def _shallow_copy(self, left=None, right=None): + from pandas.core.indexes.interval import IntervalIndex + + if left is None: + + # no values passed + # XXX: is ^ right? Or does that mean just left wasn't passed? + left, right = self.left, self.right + + elif right is None: + + # only single value passed, could be an IntervalIndex + # or array of Intervals + if not isinstance(left, (type(self), IntervalIndex)): + left = type(self).from_intervals(left) + + left, right = left.left, left.right + else: + + # both left and right are values + pass + + return self._simple_new(left, right, closed=self.closed, + verify_integrity=False) + + @classmethod + def concat_same_type(cls, to_concat): + closed = set(interval.closed for interval in to_concat) + if len(closed) != 1: + raise ValueError("Intervals must all be closed on the same side.") + closed = closed.pop() + + # TODO: avoid intermediate list + left = np.concatenate([interval.left for interval in to_concat]) + right = np.concatenate([interval.right for interval in to_concat]) + return cls._simple_new(left, right, closed=closed, copy=False) + + # TODO: doc + def copy(self, deep=False): + left = self.left.copy(deep=True) if deep else self.left + right = self.right.copy(deep=True) if deep else self.right + closed = self.closed + return type(self).from_arrays(left, right, closed=closed) + + def formatting_values(self): + return self.values + + def get_values(self): + return self.values + + def isna(self): + return isna(self.left) + + def nbytes(self): + # XXX: https://github.com/pandas-dev/pandas/issues/19209 + return self.values.nbytes + + def take(self, indices, axis=0, allow_fill=True, fill_value=None, + **kwargs): + nv.validate_take(tuple(), kwargs) + indices = _ensure_platform_int(indices) + left, right = self.left, self.right + + if fill_value is None: + fill_value = self._na_value + mask = indices == -1 + + if not mask.any(): + # we won't change dtype here in this case + # if we don't need + allow_fill = False + + taker = lambda x: x.take(indices, allow_fill=allow_fill, + fill_value=fill_value) + + try: + new_left = taker(left) + new_right = taker(right) + except ValueError: + + # we need to coerce; migth have NA's in an + # integer dtype + new_left = taker(left.astype(float)) + new_right = taker(right.astype(float)) + + return self._shallow_copy(new_left, new_right) + + take_nd = take + + def _format_data(self): + + # TODO: integrate with categorical and make generic + # name argument is unused here; just for compat with base / categorical + n = len(self) + max_seq_items = min((get_option( + 'display.max_seq_items') or n) // 10, 10) + + formatter = str + + if n == 0: + summary = '[]' + elif n == 1: + first = formatter(self[0]) + summary = '[{first}]'.format(first=first) + elif n == 2: + first = formatter(self[0]) + last = formatter(self[-1]) + summary = '[{first}, {last}]'.format(first=first, last=last) + else: + + if n > max_seq_items: + n = min(max_seq_items // 2, 10) + head = [formatter(x) for x in self[:n]] + tail = [formatter(x) for x in self[-n:]] + summary = '[{head} ... {tail}]'.format( + head=', '.join(head), tail=', '.join(tail)) + else: + head = [] + tail = [formatter(x) for x in self] + summary = '[{tail}]'.format(tail=', '.join(tail)) + + return summary + + def _format_space(self): + space = ' ' * (len(self.__class__.__name__) + 1) + return "\n{space}".format(space=space) + + @property + def left(self): + """ + Return the left endpoints of each Interval in the IntervalIndex as + an Index + """ + return self._left + + @property + def right(self): + """ + Return the right endpoints of each Interval in the IntervalIndex as + an Index + """ + return self._right + + @property + def closed(self): + """ + Whether the intervals are closed on the left-side, right-side, both or + neither + """ + return self._closed + + @property + def length(self): + """ + Return an Index with entries denoting the length of each Interval in + the IntervalIndex + """ + try: + return self.right - self.left + except TypeError: + # length not defined for some types, e.g. string + msg = ('IntervalIndex contains Intervals without defined length, ' + 'e.g. Intervals with string endpoints') + raise TypeError(msg) + + def __repr__(self): + return "{}({})".format(self.__class__.__name__, self._format_data()) + + @property + def values(self): + """ + Return the IntervalIndex's data as a numpy array of Interval + objects (with dtype='object') + """ + left = self.left + right = self.right + mask = self.isna() + closed = self._closed + + result = np.empty(len(left), dtype=object) + for i in range(len(left)): + if mask[i]: + result[i] = np.nan + else: + result[i] = Interval(left[i], right[i], closed) + return result + + @classmethod + def _scalar_data_error(cls, data): + # TODO: array-mixin + raise ScalarDataError( + '{0}(...) must be called with a collection of some ' + 'kind, {1} was passed'.format(cls.__name__, repr(data)) + ) + + def slice(self, slicer): + left = self.left[slicer] + right = self.right[slicer] + return self._simple_new(left, right, closed=self.closed, + verify_integrity=False) + + +def maybe_convert_platform_interval(values): + """ + Try to do platform conversion, with special casing for IntervalIndex. + Wrapper around maybe_convert_platform that alters the default return + dtype in certain cases to be compatible with IntervalIndex. For example, + empty lists return with integer dtype instead of object dtype, which is + prohibited for IntervalIndex. + + Parameters + ---------- + values : array-like + + Returns + ------- + array + """ + if isinstance(values, (list, tuple)) and len(values) == 0: + # GH 19016 + # empty lists/tuples get object dtype by default, but this is not + # prohibited for IntervalIndex, so coerce to integer instead + return np.array([], dtype=np.int64) + return maybe_convert_platform(values) diff --git a/pandas/core/period.py b/pandas/core/period.py new file mode 100644 index 0000000000000..03099cc47c73a --- /dev/null +++ b/pandas/core/period.py @@ -0,0 +1,469 @@ +"""Extension array for Period data +""" +import numpy as np + +from pandas.core.dtypes.generic import ABCSeries +from pandas.core.dtypes.missing import isna +from pandas.core.dtypes.dtypes import PeriodDtype +from pandas.core import common as com +from pandas.core.extensions import ExtensionArray +from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin +from pandas._libs import tslib, iNaT +from pandas._libs.lib import infer_dtype +from pandas._libs.tslibs import period +from pandas._libs.tslibs.period import ( + IncompatibleFrequency, + Period, + _quarter_to_myear, + _validate_end_alias, +) +from pandas.core.dtypes.common import ( + is_datetime64_dtype, + is_float, + is_float_dtype, + is_integer, + is_integer_dtype, + is_object_dtype, + is_period_dtype, + is_scalar, + pandas_dtype, + _ensure_object, +) +import pandas.tseries.frequencies as frequencies +from pandas.tseries.frequencies import get_freq_code as _gfc + + +def dt64arr_to_periodarr(data, freq, tz): + # TODO: the reverse is in period. move there? + if data.dtype != np.dtype('M8[ns]'): + raise ValueError('Wrong dtype: %s' % data.dtype) + + freq = Period._maybe_convert_freq(freq) + base, mult = _gfc(freq) + return period.dt64arr_to_periodarr(data.view('i8'), base, tz) + + +def to_period(data): + data = np.asanyarray(data) + if data.dtype != int: + raise ValueError(data.dtype) + + return data + + +def _make_field_arrays(*fields): + length = None + for x in fields: + if isinstance(x, (list, np.ndarray, ABCSeries)): + if length is not None and len(x) != length: + raise ValueError('Mismatched Period array lengths') + elif length is None: + length = len(x) + + arrays = [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries)) + else np.repeat(x, length) for x in fields] + + return arrays + + +def _range_from_fields(year=None, month=None, quarter=None, day=None, + hour=None, minute=None, second=None, freq=None): + if hour is None: + hour = 0 + if minute is None: + minute = 0 + if second is None: + second = 0 + if day is None: + day = 1 + + ordinals = [] + + if quarter is not None: + if freq is None: + freq = 'Q' + base = frequencies.FreqGroup.FR_QTR + else: + base, mult = _gfc(freq) + if base != frequencies.FreqGroup.FR_QTR: + raise AssertionError("base must equal FR_QTR") + + year, quarter = _make_field_arrays(year, quarter) + for y, q in zip(year, quarter): + y, m = _quarter_to_myear(y, q, freq) + val = period.period_ordinal(y, m, 1, 1, 1, 1, 0, 0, base) + ordinals.append(val) + else: + base, mult = _gfc(freq) + arrays = _make_field_arrays(year, month, day, hour, minute, second) + for y, mth, d, h, mn, s in zip(*arrays): + ordinals.append(period.period_ordinal( + y, mth, d, h, mn, s, 0, 0, base)) + + return np.array(ordinals, dtype=np.int64), freq + + +def _get_ordinal_range(start, end, periods, freq, mult=1): + if com._count_not_none(start, end, periods) != 2: + raise ValueError('Of the three parameters: start, end, and periods, ' + 'exactly two must be specified') + + if freq is not None: + _, mult = _gfc(freq) + + if start is not None: + start = Period(start, freq) + if end is not None: + end = Period(end, freq) + + is_start_per = isinstance(start, Period) + is_end_per = isinstance(end, Period) + + if is_start_per and is_end_per and start.freq != end.freq: + raise ValueError('start and end must have same freq') + if (start is tslib.NaT or end is tslib.NaT): + raise ValueError('start and end must not be NaT') + + if freq is None: + if is_start_per: + freq = start.freq + elif is_end_per: + freq = end.freq + else: # pragma: no cover + raise ValueError('Could not infer freq from start/end') + + if periods is not None: + periods = periods * mult + if start is None: + data = np.arange(end.ordinal - periods + mult, + end.ordinal + 1, mult, + dtype=np.int64) + else: + data = np.arange(start.ordinal, start.ordinal + periods, mult, + dtype=np.int64) + else: + data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) + + return data, freq + + +# XXX: We inherit from DatetimeIndexOpsMixin to get comparison, arithmetics +# This should be split into an DatetimeArrayOpsMixin, and then any Index +# version that just does index-stuff + + +class PeriodArray(DatetimeIndexOpsMixin, ExtensionArray): + dtype = PeriodDtype() + ndim = 1 + can_hold_na = True + _dtype = None + + def __new__(cls, data=None, ordinal=None, freq=None, start=None, end=None, + periods=None, copy=False, name=None, tz=None, dtype=None, + **kwargs): + from pandas.core.indexes.datetimes import DatetimeIndex + from pandas.core.indexes.numeric import Int64Index + from pandas.core.indexes.period import PeriodIndex + + if periods is not None: + if is_float(periods): + periods = int(periods) + elif not is_integer(periods): + msg = 'periods must be a number, got {periods}' + raise TypeError(msg.format(periods=periods)) + + if dtype is not None: + dtype = pandas_dtype(dtype) + if not is_period_dtype(dtype): + raise ValueError('dtype must be PeriodDtype') + if freq is None: + freq = dtype.freq + elif freq != dtype.freq: + msg = 'specified freq and dtype are different' + raise IncompatibleFrequency(msg) + + # coerce freq to freq object, otherwise it can be coerced elementwise + # which is slow + if freq: + freq = Period._maybe_convert_freq(freq) + + if data is None: + if ordinal is not None: + data = np.asarray(ordinal, dtype=np.int64) + else: + data, freq = cls._generate_range(start, end, periods, + freq, kwargs) + return cls._from_ordinals(data, freq=freq) + + if isinstance(data, PeriodIndex): + data = data._data + + if isinstance(data, cls): + if freq is None or freq == data.freq: # no freq change + freq = data.freq + data = data._data + else: + base1, _ = _gfc(data.freq) + base2, _ = _gfc(freq) + data = period.period_asfreq_arr(data._data, + base1, base2, 1) + return cls._simple_new(data, freq=freq) + + # not array / index + if not isinstance(data, (np.ndarray, PeriodIndex, + DatetimeIndex, Int64Index)): + if is_scalar(data) or isinstance(data, Period): + cls._scalar_data_error(data) + + # other iterable of some kind + if not isinstance(data, (list, tuple)): + data = list(data) + + data = np.asarray(data) + + # datetime other than period + if is_datetime64_dtype(data.dtype): + data = dt64arr_to_periodarr(data, freq, tz) + return cls._from_ordinals(data, freq=freq) + + # check not floats + if infer_dtype(data) == 'floating' and len(data) > 0: + raise TypeError("PeriodIndex does not allow " + "floating point in construction") + + # anything else, likely an array of strings or periods + data = _ensure_object(data) + freq = freq or period.extract_freq(data) + data = period.extract_ordinals(data, freq) + return cls._from_ordinals(data, freq=freq) + + @classmethod + def _generate_range(cls, start, end, periods, freq, fields): + if freq is not None: + freq = Period._maybe_convert_freq(freq) + + field_count = len(fields) + if com._count_not_none(start, end) > 0: + if field_count > 0: + raise ValueError('Can either instantiate from fields ' + 'or endpoints, but not both') + subarr, freq = _get_ordinal_range(start, end, periods, freq) + elif field_count > 0: + subarr, freq = _range_from_fields(freq=freq, **fields) + else: + raise ValueError('Not enough parameters to construct ' + 'Period range') + + return subarr, freq + + @classmethod + def _simple_new(cls, values, freq=None): + """ + Values can be any type that can be coerced to Periods. + Ordinals in an ndarray are fastpath-ed to `_from_ordinals` + """ + if not is_integer_dtype(values): + values = np.array(values, copy=False) + if len(values) > 0 and is_float_dtype(values): + raise TypeError("PeriodArray can't take floats") + return cls(values, freq=freq) + + return cls._from_ordinals(values, freq) + + @classmethod + def _from_ordinals(cls, values, freq=None): + """ + Values should be int ordinals + `__new__` & `_simple_new` cooerce to ordinals and call this method + """ + + values = np.array(values, dtype='int64', copy=False) + + result = object.__new__(cls) + result._data = values + if freq is None: + raise ValueError('freq is not specified and cannot be inferred') + result.freq = Period._maybe_convert_freq(freq) + return result + + def __iter__(self): + return iter(self._data) + + def __len__(self): + return len(self._data) + + def __repr__(self): + values = self._format_values() + return "PeriodArray({}, freq={}, dtype={})".format( + values, self.freq, self.dtype + ) + + def __getitem__(self, item): + if is_scalar(item): + return self._box_func(self._data[item]) + else: + values = self._data[item] + return self._simple_new(values, self.freq) + + @property + def dtype(self): + if self._dtype is None: + self._dtype = PeriodDtype(self.freq) + return self._dtype + + @property + def shape(self): + return (len(self),) + + @property + def values(self): + return self.astype(object) + + @property + def asi8(self): + return self._data.view('i8') + + @property + def _box_func(self): + return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq) + + def _format_values(self): + return np.array(['%s' % x for x in self.values], dtype='object') + + def formatting_values(self): + return self._format_values() + + def astype(self, dtype, copy=True, how='start'): + dtype = pandas_dtype(dtype) + if is_object_dtype(dtype): + return np.array([Period._from_ordinal(p, self.freq) + for p in self], dtype='object') + else: + raise ValueError('invalid dtype') + + def copy(self): + return self._from_ordinals(self._data.copy(), freq=self.freq) + + def isna(self): + return self.asi8 == iNaT + + def nbytes(self): + return self._data.nbytes + + def take(self, indexer, allow_fill=True, fill_value=None): + # XXX: is take supposed to be a view? + return self._from_ordinals(self._data.take(indexer), self.freq) + + take_nd = take + + @classmethod + def concat_same_type(cls, to_concat): + dtype = to_concat[0].dtype + if not all(other.dtype == dtype for other in to_concat): + raise TypeError("All frequencies must match") + values = np.concatenate([other._data for other in to_concat]) + return cls._from_ordinals(values, freq=to_concat[0].freq) + + def get_values(self): + return self._data + + @classmethod + def _scalar_data_error(cls, data): + # TODO: array-mixin + raise TypeError('{0}(...) must be called with a collection of some ' + 'kind, {1} was passed'.format(cls.__name__, + repr(data))) + + def _get_attributes_dict(self): + # TODO: from indexes.base, needed for ops, can remove + return {} + + def view(self, cls=None): + return self._data.view(cls) + + def equals(self, other): + if not isinstance(other, type(self)): + return False + return (self.freq == other.freq and + len(self) == len(other) and + np.all(self._data == other._data)) + + def slice(self, slicer): + return self._from_ordinals(self._data[slicer], freq=self.freq) + + def asfreq(self, freq=None, how='E'): + """ + Convert the PeriodArray to the specified frequency `freq`. + + Parameters + ---------- + + freq : str + a frequency + how : str {'E', 'S'} + 'E', 'END', or 'FINISH' for end, + 'S', 'START', or 'BEGIN' for start. + Whether the elements should be aligned to the end + or start within pa period. January 31st ('END') vs. + Janury 1st ('START') for example. + + Returns + ------- + + new : PeriodArray with the new frequency + + Examples + -------- + >>> pidx = pd.period_range('2010-01-01', '2015-01-01', freq='A') + >>> pidx + <class 'pandas.core.indexes.period.PeriodIndex'> + [2010, ..., 2015] + Length: 6, Freq: A-DEC + + >>> pidx.asfreq('M') + <class 'pandas.core.indexes.period.PeriodIndex'> + [2010-12, ..., 2015-12] + Length: 6, Freq: M + + >>> pidx.asfreq('M', how='S') + <class 'pandas.core.indexes.period.PeriodIndex'> + [2010-01, ..., 2015-01] + Length: 6, Freq: M + """ + how = _validate_end_alias(how) + + freq = Period._maybe_convert_freq(freq) + + base1, mult1 = _gfc(self.freq) + base2, mult2 = _gfc(freq) + + asi8 = self.asi8 + # mult1 can't be negative or 0 + end = how == 'E' + if end: + ordinal = asi8 + mult1 - 1 + else: + ordinal = asi8 + + new_data = period.period_asfreq_arr(ordinal, base1, base2, end) + + # XXX: PeriodIndex could cache this. We can't, so this will be slower. + mask = self.isna() + if isna(self).any(): + new_data[mask] = tslib.iNaT + + return self._from_ordinals(new_data, freq=freq) + + # Pickling + def __getnewargs__(self): + # values, oridinal, freq + return (None, self._data, self.freq) + + def __getstate__(self): + return {'ordinal': self._data, 'freq': self.freq} + + def __setstate__(self, state): + self.__dict__.update(state) + + +PeriodArray._add_datetimelike_methods() diff --git a/pandas/core/series.py b/pandas/core/series.py index 71cded4f9c888..440bdc01e2539 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -37,7 +37,7 @@ maybe_cast_to_datetime, maybe_castable, construct_1d_arraylike_from_scalar) from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike - +from pandas.core.extensions import ExtensionArray from pandas.core.common import (is_bool_indexer, _default_index, _asarray_tuplesafe, @@ -239,6 +239,11 @@ def __init__(self, data=None, index=None, dtype=None, name=None, copy=copy) elif copy: data = data.copy() + elif isinstance(data, ExtensionArray): + # data = data._block_type(data, index) + if copy: + data = data.copy() + data = SingleBlockManager(data, index, fastpath=True) else: data = _sanitize_array(data, index, dtype, copy, raise_cast_failure=True) @@ -2523,7 +2528,7 @@ def apply(self, func, convert_dtype=True, args=(), **kwds): return f(self) # row-wise access - if is_extension_type(self.dtype): + if is_extension_type(self): mapped = self._values.map(f) else: values = self.astype(object).values @@ -3217,6 +3222,8 @@ def _try_cast(arr, take_fast_path): start, stop, step = get_range_parameters(data) arr = np.arange(start, stop, step, dtype='int64') subarr = _try_cast(arr, False) + elif isinstance(data, ExtensionArray): + subarr = data else: subarr = _try_cast(data, False) diff --git a/pandas/core/sparse/array.py b/pandas/core/sparse/array.py index 9b2650359bf68..b57e14f57e606 100644 --- a/pandas/core/sparse/array.py +++ b/pandas/core/sparse/array.py @@ -9,6 +9,7 @@ import pandas as pd from pandas.core.base import PandasObject +from pandas.core.extensions import ExtensionArray from pandas import compat from pandas.compat import range @@ -161,7 +162,7 @@ def _wrap_result(name, data, sparse_index, fill_value, dtype=None): fill_value=fill_value, dtype=dtype) -class SparseArray(PandasObject, np.ndarray): +class SparseArray(PandasObject, np.ndarray, ExtensionArray): """Data structure for labeled, sparse floating point 1-D data Parameters @@ -272,6 +273,14 @@ def kind(self): elif isinstance(self.sp_index, IntIndex): return 'integer' + @property + def _block_type(self): + from pandas.core.internals import SparseBlock + return SparseBlock + + def isna(self): + return np.isnan(self) + def __array_wrap__(self, out_arr, context=None): """ NumPy calls this method when ufunc is applied diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 8962eb90be828..821c7858c7a5c 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -122,7 +122,7 @@ def test_api(self): class TestApi(Base): - allowed = ['types'] + allowed = ['types', 'extensions'] def test_api(self): diff --git a/pandas/tests/dtypes/test_common.py b/pandas/tests/dtypes/test_common.py index bfec229d32b22..e63e52179d417 100644 --- a/pandas/tests/dtypes/test_common.py +++ b/pandas/tests/dtypes/test_common.py @@ -503,28 +503,26 @@ def test_is_bool_dtype(): assert com.is_bool_dtype(pd.Index([True, False])) -@pytest.mark.parametrize("check_scipy", [ - False, pytest.param(True, marks=td.skip_if_no_scipy) -]) -def test_is_extension_type(check_scipy): - assert not com.is_extension_type([1, 2, 3]) - assert not com.is_extension_type(np.array([1, 2, 3])) - assert not com.is_extension_type(pd.DatetimeIndex([1, 2, 3])) +@pytest.mark.parametrize("name, obj, is_extension", [ + ('list', [1, 2, 3], False), + ('ndarray', np.array([1, 2, 3]), False), + ('datetimeindex', pd.DatetimeIndex([1, 2, 3]), False), # ? + ('category', pd.Categorical([1, 2, 3]), True), + ('series[categorical]', pd.Series(pd.Categorical([1, 2, 3])), True), + ('sparse', pd.SparseArray([1, 2, 3]), True), + ('series[sparse]', pd.SparseSeries([1, 2, 3]), True), + ('datetime-with-tz', pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"), True), - cat = pd.Categorical([1, 2, 3]) - assert com.is_extension_type(cat) - assert com.is_extension_type(pd.Series(cat)) - assert com.is_extension_type(pd.SparseArray([1, 2, 3])) - assert com.is_extension_type(pd.SparseSeries([1, 2, 3])) - assert com.is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern")) +]) +def test_is_extension_type(name, obj, is_extension): + result = com.is_extension_type(obj) + assert result == is_extension - dtype = DatetimeTZDtype("ns", tz="US/Eastern") - s = pd.Series([], dtype=dtype) - assert com.is_extension_type(s) - if check_scipy: - import scipy.sparse - assert not com.is_extension_type(scipy.sparse.bsr_matrix([1, 2, 3])) +@td.skip_if_no_scipy +def test_is_extension_type_scipy(): + import scipy.sparse + assert not com.is_extension_type(scipy.sparse.bsr_matrix([1, 2, 3])) def test_is_complex_dtype(): diff --git a/pandas/tests/frame/test_alter_axes.py b/pandas/tests/frame/test_alter_axes.py index c824f0026af50..fa61f4c432dea 100644 --- a/pandas/tests/frame/test_alter_axes.py +++ b/pandas/tests/frame/test_alter_axes.py @@ -327,6 +327,8 @@ def test_reset_index_with_intervals(self): assert_frame_equal(result, expected) result2 = result.reset_index() + # XXX: Handle extension index things + # This will fail assert_frame_equal(result2, original) def test_set_index_multiindexcolumns(self): diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index b6d49c9e7ba19..bd1a280bfda4c 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -10,6 +10,7 @@ from pandas.tests.indexes.common import Base import pandas.util.testing as tm import pandas as pd +from pandas.core.interval import IntervalArray @pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither']) @@ -48,8 +49,8 @@ def create_index_with_nan(self, closed='right'): def test_constructors(self, data, closed, name): left, right = data[:-1], data[1:] ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)] - expected = IntervalIndex._simple_new( - left=left, right=right, closed=closed, name=name) + arr = IntervalArray._simple_new(left=left, right=right, closed=closed) + expected = IntervalIndex._simple_new(arr, name=name) # validate expected assert expected.closed == closed @@ -1225,3 +1226,10 @@ def test_to_tuples_na(self, tuples, na_tuple): assert all(isna(x) for x in result_na) else: assert isna(result_na) + + def test_from_interval_array(self): + breaks = list(range(10)) + arr = IntervalArray.from_breaks(breaks) + result = IntervalIndex(arr) + expected = IntervalIndex.from_breaks(breaks) + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/indexes/period/test_asfreq.py b/pandas/tests/indexes/period/test_asfreq.py index c8724b2a3bc91..269e36a2ce17e 100644 --- a/pandas/tests/indexes/period/test_asfreq.py +++ b/pandas/tests/indexes/period/test_asfreq.py @@ -153,3 +153,8 @@ def test_astype_asfreq(self): exp = PeriodIndex(['2011-01', '2011-02', '2011-03'], freq='3M') tm.assert_index_equal(pi1.asfreq('3M'), exp) tm.assert_index_equal(pi1.astype('period[3M]'), exp) + + def test_shallow_copy_asfreq(self): + result = PeriodIndex(['2017'], freq='D')._shallow_copy(freq='M') + expected = PeriodIndex(['2017'], freq='M') + tm.assert_index_equal(result, expected) diff --git a/pandas/tests/test_extension_arrays/__init__.py b/pandas/tests/test_extension_arrays/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/pandas/tests/test_extension_arrays/base.py b/pandas/tests/test_extension_arrays/base.py new file mode 100644 index 0000000000000..bfc953de0bca7 --- /dev/null +++ b/pandas/tests/test_extension_arrays/base.py @@ -0,0 +1,41 @@ +import pandas as pd +import pandas.util.testing as tm +from pandas.core.internals import ExtensionBlock + + +class BaseArrayTests: + + def test_series_constructor(self, test_data): + result = pd.Series(test_data) + assert result.dtype == test_data.dtype + assert len(result) == len(test_data) + assert isinstance(result._data.blocks[0], ExtensionBlock) + + def test_dataframe_constructor(self, test_data): + result = pd.DataFrame({"A": test_data}) + assert result.dtypes['A'] == test_data.dtype + assert result.shape == (len(test_data), 1) + assert isinstance(result._data.blocks[0], ExtensionBlock) + + def test_concat(self, test_data): + result = pd.concat([ + pd.Series(test_data), + pd.Series(test_data), + ], ignore_index=True) + assert len(result) == len(test_data) * 2 + + def test_iloc(self, test_data): + ser = pd.Series(test_data) + result = ser.iloc[:4] + expected = pd.Series(test_data[:4]) + tm.assert_series_equal(result, expected) + + def test_loc(self, test_data): + ser = pd.Series(test_data) + result = ser.loc[[0, 1, 2, 3]] + expected = pd.Series(test_data[:4]) + tm.assert_series_equal(result, expected) + + def test_repr(self, test_data): + ser = pd.Series(test_data) + repr(ser) diff --git a/pandas/tests/test_extension_arrays/test_interval.py b/pandas/tests/test_extension_arrays/test_interval.py new file mode 100644 index 0000000000000..aa6e2b3d7511d --- /dev/null +++ b/pandas/tests/test_extension_arrays/test_interval.py @@ -0,0 +1,16 @@ +import pytest + +import pandas as pd +from pandas.core.interval import IntervalArray + +from .base import BaseArrayTests + + +@pytest.fixture +def test_data(): + """Length-100 PeriodArray for semantics test.""" + return IntervalArray(pd.interval_range(0, periods=100)) + + +class TestPeriod(BaseArrayTests): + pass diff --git a/pandas/tests/test_extension_arrays/test_json.py b/pandas/tests/test_extension_arrays/test_json.py new file mode 100644 index 0000000000000..b39db9bc125fd --- /dev/null +++ b/pandas/tests/test_extension_arrays/test_json.py @@ -0,0 +1,88 @@ +import itertools +import json +import random +import string +import sys + +import numpy as np +import pytest + +from pandas.core.extensions import ExtensionArray, ExtensionDtype + +from .base import BaseArrayTests + + +class JSONType(ExtensionDtype): + name = 'json' + base = None + kind = 'O' + + +class JSONArray(ExtensionArray): + dtype = JSONType() + fill_value = [] + can_hold_na = True + + def __init__(self, data): + if isinstance(data, str): + data = json.loads(data) + elif isinstance(data, type(self)): + data = data.data + assert isinstance(data, list), "'data' must be a list of records." + self.data = data + + def __getitem__(self, item): + if isinstance(item, slice): + result = self.data[item] + else: + result = [self.data[i] for i in item] + return type(self)(result) + + def __iter__(self): + return iter(self.data) + + def __len__(self): + return len(self.data) + + @property + def nbytes(self): + return sum(sys.getsizeof(x) for x in self) + + def isna(self): + return np.array(x == [] for x in self) + + def take(self, indexer, allow_fill=True, fill_value=None): + return type(self)(self[indexer]) + + take_nd = take + + def formatting_values(self): + return np.array(self.data).ravel() + + def get_values(self): + return np.array(self.data) + + def slice(self, slicer): + return self[slicer] + + @classmethod + def concat_same_type(cls, to_concat): + return cls(list(itertools.chain(to_concat))) + + def copy(self, deep=False): + data = self.data + if deep: + data = self.data.copy() + return type(self)(data) + + +@pytest.fixture +def test_data(): + choices = list(string.ascii_letters) + list(range(100)) + data = [dict([random.choices(choices, k=2)]) + for _ in range(100)] + return JSONArray(data) + + +class TestJSONArray(BaseArrayTests): + pass diff --git a/pandas/tests/test_extension_arrays/test_period.py b/pandas/tests/test_extension_arrays/test_period.py new file mode 100644 index 0000000000000..59aa5805da232 --- /dev/null +++ b/pandas/tests/test_extension_arrays/test_period.py @@ -0,0 +1,69 @@ +import pytest + +import numpy as np +import pandas as pd +import pandas.util.testing as tm +from pandas.core.period import PeriodArray + +from .base import BaseArrayTests + + +@pytest.fixture +def test_data(): + """Length-100 PeriodArray for semantics test.""" + return PeriodArray(pd.period_range("2000", periods=100)) + + +class TestPeriod(BaseArrayTests): + pass + + +class TestArray: + + def test_init(self): + arr = PeriodArray([2017, 2018], freq='A') + assert isinstance(arr, PeriodArray) + + def test_concat(self): + p1 = PeriodArray([2017, 2018], freq='A') + p2 = PeriodArray([2019, 2020], freq='A') + result = pd.concat([pd.Series(p1), pd.Series(p2)], ignore_index=True) + expected = pd.Series(PeriodArray([2017, 2018, 2019, 2020], freq='A')) + tm.assert_series_equal(result, expected) + + def test_equals(self): + p1 = PeriodArray([2017, 2018], freq='A') + p2 = PeriodArray([2017, 2018], freq='A') + assert p1.equals(p2) + + @pytest.mark.parametrize('other', [ + 2017, + [2017, 2018], + PeriodArray([2016, 2017], freq='A'), + PeriodArray([2017, 2018], freq='A-JAN'), + PeriodArray([2017, 2018, 2019], freq='A'), + ]) + def test_equals_unequal(self, other): + p1 = PeriodArray([2017, 2018], freq='A') + assert not p1.equals(other) + + def test_getitem(self): + p1 = PeriodArray([2017, 2018, 2019], freq='A') + result = p1[0] + expected = pd.Period(2017, freq='A') + assert result == expected + + result = p1[[0, 1]] + expected = PeriodArray([2017, 2018], freq='A') + assert result.equals(expected) + + result = p1[slice(2)] + assert result.equals(expected) + + result = p1[np.array([True, True, False])] + assert result.equals(expected) + + def test_isna(self): + result = PeriodArray(['2018', 'NaT'], freq='D').isna() + expected = np.array([False, True]) + tm.assert_numpy_array_equal(result, expected) diff --git a/pandas/tests/test_extension_arrays/test_pyarrow.py b/pandas/tests/test_extension_arrays/test_pyarrow.py new file mode 100644 index 0000000000000..3986bb92f256c --- /dev/null +++ b/pandas/tests/test_extension_arrays/test_pyarrow.py @@ -0,0 +1,102 @@ +import collections + +import pyarrow as pa +import pytest + +import numpy as np +import pandas as pd +from pandas.core.extensions import ExtensionArray, ExtensionDtype +from .base import BaseArrayTests + + +class MyDtypeType(type): + pass + + +class ArrowDtype(ExtensionDtype): + _can_hold_na = True + type = MyDtypeType + base = None + name = 'pa64' + arrow_type = pa.int64() + + +class ArrowArray(ExtensionArray): + dtype = ArrowDtype() + ndim = 1 + can_hold_na = True + + def __init__(self, values): + if not isinstance(values, pa.Array): + values = pa.array(values) + assert values.type == self.dtype.arrow_type + self.data = values + + def __iter__(self): + return iter(self.data) + + def __len__(self): + return len(self.data) + + def __getitem__(self, item): + result = self.data[item] + if isinstance(item, (slice, collections.Sequence)): + return type(self)(result) + else: + return result + + @property + def nbytes(self): + return 64 * len(self) + + @property + def shape(self): + return (len(self),) + + def take(self, indexer, allow_fill=True, fill_value=None): + return type(self)(self.data.to_pandas().take(indexer)) + + take_nd = take + + def copy(self): + # TODO: Jira for pa.array(pyarrow.array) + return pa.array(self.data.to_pandas()) + + def isna(self): + # https://github.com/apache/arrow/pull/1378 + return pd.isna(self.data.to_pandas()) + + @classmethod + def concat_same_type(cls, to_concat): + return cls(np.concatenate([arr.data.to_pandas() for arr in to_concat])) + + def get_values(self): + return self.data + + def formatting_values(self): + return self.data.to_pandas() + + def slice(self, indexer): + return self[indexer] + + +@pytest.fixture +def test_data(): + """Length-100 int64 arrow array for semantics test.""" + return ArrowArray(np.arange(100)) + + +class TestArrow(BaseArrayTests): + def test_iloc(self, test_data): + ser = pd.Series(test_data) + result = ser.iloc[:4] + expected = test_data[:4] + assert isinstance(result, pd.Series) + assert result.values.data.equals(expected.data) + + def test_loc(self, test_data): + ser = pd.Series(test_data) + result = ser.loc[[0, 1, 2, 3]] + expected = test_data[:4] + assert isinstance(result, pd.Series) + assert result.values.data.equals(expected.data)
Adds an interface for 3rd party libraries to define custom array types. When pandas encounters these objects, we'll resist the temptation to cast them to `object` dtype. This isn't near being ready yet, but I wanted to make a PR to get some thoughts on the general approach / scope. I wouldn't recommend reviewing the code in detail yet. Some questions: 1. Do we want to do this? The big concern being the pandas2 transition. I've tried to hide all block-related changes in an adapter class. I don't think that allowing extension types will hinder the transition in a major way. 2. Should our current extension types implement the interface? Categorical essentially satisfies it already. Interval basically does, but there isn't an ``IntervalArray`` class really, just an ``IntervalIndex``. Period is the same as Interval. SparseArray is a bit weird since it's an actual subclass of ``ndarray``. Needs some more thought. datetime-with-tz is also weird (ndarray, just with a custom type). 3. What to do with indexes? Having an ``ExtensionIndex`` will be useful I think (value_counts, groupby, etc.). But I haven't tackled it yet. Haven't really thought about indexing at all, since it sounds hard. 4. How much of the ndarray implementation should we push onto the pandas array interface? Probably some others, but that should kick things off. For those interested in following along, I'm using this over in https://github.com/continuumio/pandas-ip. It's nice to have a concrete problem to solve. Closes https://github.com/pandas-dev/pandas/issues/18767
https://api.github.com/repos/pandas-dev/pandas/pulls/19174
2018-01-10T20:30:06Z
2018-01-30T20:40:49Z
null
2018-05-02T13:09:41Z
Split and parametrize test_operators
diff --git a/pandas/tests/frame/test_operators.py b/pandas/tests/frame/test_operators.py index fd1eb23643c2b..0bc4a7df6a55b 100644 --- a/pandas/tests/frame/test_operators.py +++ b/pandas/tests/frame/test_operators.py @@ -28,6 +28,53 @@ _check_mixed_int) +class TestDataFrameArithmetic(object): + + @pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano') + def test_frame_sub_datetime64_not_ns(self): + df = pd.DataFrame(date_range('20130101', periods=3)) + dt64 = np.datetime64('2013-01-01') + assert dt64.dtype == 'datetime64[D]' + res = df - dt64 + expected = pd.DataFrame([pd.Timedelta(days=0), pd.Timedelta(days=1), + pd.Timedelta(days=2)]) + tm.assert_frame_equal(res, expected) + + @pytest.mark.parametrize('data', [ + [1, 2, 3], + [1.1, 2.2, 3.3], + [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.NaT], + ['x', 'y', 1]]) + @pytest.mark.parametrize('dtype', [None, object]) + def test_frame_radd_str_invalid(self, dtype, data): + df = DataFrame(data, dtype=dtype) + with pytest.raises(TypeError): + 'foo_' + df + + @pytest.mark.parametrize('dtype', [None, object]) + def test_frame_with_dtype_radd_int(self, dtype): + df = pd.DataFrame([1, 2, 3], dtype=dtype) + expected = pd.DataFrame([2, 3, 4], dtype=dtype) + result = 1 + df + assert_frame_equal(result, expected) + result = df + 1 + assert_frame_equal(result, expected) + + @pytest.mark.parametrize('dtype', [None, object]) + def test_frame_with_dtype_radd_nan(self, dtype): + df = pd.DataFrame([1, 2, 3], dtype=dtype) + expected = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype) + result = np.nan + df + assert_frame_equal(result, expected) + result = df + np.nan + assert_frame_equal(result, expected) + + def test_frame_radd_str(self): + df = pd.DataFrame(['x', np.nan, 'x']) + assert_frame_equal('a' + df, pd.DataFrame(['ax', np.nan, 'ax'])) + assert_frame_equal(df + 'a', pd.DataFrame(['xa', np.nan, 'xa'])) + + class TestDataFrameOperators(TestData): def test_operators(self): diff --git a/pandas/tests/series/test_dtypes.py b/pandas/tests/series/test_dtypes.py index 441e811706487..56ff092dd0a27 100644 --- a/pandas/tests/series/test_dtypes.py +++ b/pandas/tests/series/test_dtypes.py @@ -3,7 +3,7 @@ import pytest -from datetime import datetime +from datetime import datetime, timedelta import sys import string @@ -29,6 +29,18 @@ class TestSeriesDtypes(TestData): + def test_dt64_series_astype_object(self): + dt64ser = Series(date_range('20130101', periods=3)) + result = dt64ser.astype(object) + assert isinstance(result.iloc[0], datetime) + assert result.dtype == np.object_ + + def test_td64_series_astype_object(self): + tdser = Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]') + result = tdser.astype(object) + assert isinstance(result.iloc[0], timedelta) + assert result.dtype == np.object_ + @pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"]) def test_astype(self, dtype): diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index ed9307d50521f..1797dbcc15872 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -183,18 +183,18 @@ def test_comparison_tuples(self): assert_series_equal(result, expected) def test_comparison_operators_with_nas(self): - s = Series(bdate_range('1/1/2000', periods=10), dtype=object) - s[::2] = np.nan + ser = Series(bdate_range('1/1/2000', periods=10), dtype=object) + ser[::2] = np.nan # test that comparisons work ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne'] for op in ops: - val = s[5] + val = ser[5] f = getattr(operator, op) - result = f(s, val) + result = f(ser, val) - expected = f(s.dropna(), val).reindex(s.index) + expected = f(ser.dropna(), val).reindex(ser.index) if op == 'ne': expected = expected.fillna(True).astype(bool) @@ -211,28 +211,28 @@ def test_comparison_operators_with_nas(self): # boolean &, |, ^ should work with object arrays and propagate NAs ops = ['and_', 'or_', 'xor'] - mask = s.isna() + mask = ser.isna() for bool_op in ops: - f = getattr(operator, bool_op) + func = getattr(operator, bool_op) - filled = s.fillna(s[0]) + filled = ser.fillna(ser[0]) - result = f(s < s[9], s > s[3]) + result = func(ser < ser[9], ser > ser[3]) - expected = f(filled < filled[9], filled > filled[3]) + expected = func(filled < filled[9], filled > filled[3]) expected[mask] = False assert_series_equal(result, expected) def test_comparison_object_numeric_nas(self): - s = Series(np.random.randn(10), dtype=object) - shifted = s.shift(2) + ser = Series(np.random.randn(10), dtype=object) + shifted = ser.shift(2) ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne'] for op in ops: - f = getattr(operator, op) + func = getattr(operator, op) - result = f(s, shifted) - expected = f(s.astype(float), shifted.astype(float)) + result = func(ser, shifted) + expected = func(ser.astype(float), shifted.astype(float)) assert_series_equal(result, expected) def test_comparison_invalid(self): @@ -277,98 +277,94 @@ def f(): tm.assert_series_equal(cat == "d", Series([False, False, False])) tm.assert_series_equal(cat != "d", Series([True, True, True])) - def test_more_na_comparisons(self): - for dtype in [None, object]: - left = Series(['a', np.nan, 'c'], dtype=dtype) - right = Series(['a', np.nan, 'd'], dtype=dtype) - - result = left == right - expected = Series([True, False, False]) - assert_series_equal(result, expected) - - result = left != right - expected = Series([False, True, True]) - assert_series_equal(result, expected) - - result = left == np.nan - expected = Series([False, False, False]) - assert_series_equal(result, expected) - - result = left != np.nan - expected = Series([True, True, True]) - assert_series_equal(result, expected) + @pytest.mark.parametrize('dtype', [None, object]) + def test_more_na_comparisons(self, dtype): + left = Series(['a', np.nan, 'c'], dtype=dtype) + right = Series(['a', np.nan, 'd'], dtype=dtype) - def test_nat_comparisons(self): - data = [([pd.Timestamp('2011-01-01'), pd.NaT, - pd.Timestamp('2011-01-03')], - [pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]), + result = left == right + expected = Series([True, False, False]) + assert_series_equal(result, expected) - ([pd.Timedelta('1 days'), pd.NaT, - pd.Timedelta('3 days')], - [pd.NaT, pd.NaT, pd.Timedelta('3 days')]), + result = left != right + expected = Series([False, True, True]) + assert_series_equal(result, expected) - ([pd.Period('2011-01', freq='M'), pd.NaT, - pd.Period('2011-03', freq='M')], - [pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])] + result = left == np.nan + expected = Series([False, False, False]) + assert_series_equal(result, expected) - # add lhs / rhs switched data - data = data + [(r, l) for l, r in data] + result = left != np.nan + expected = Series([True, True, True]) + assert_series_equal(result, expected) - for l, r in data: - for dtype in [None, object]: - left = Series(l, dtype=dtype) + @pytest.mark.parametrize('pair', [ + ([pd.Timestamp('2011-01-01'), NaT, pd.Timestamp('2011-01-03')], + [NaT, NaT, pd.Timestamp('2011-01-03')]), - # Series, Index - for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]: - expected = Series([False, False, True]) - assert_series_equal(left == right, expected) + ([pd.Timedelta('1 days'), NaT, pd.Timedelta('3 days')], + [NaT, NaT, pd.Timedelta('3 days')]), - expected = Series([True, True, False]) - assert_series_equal(left != right, expected) + ([pd.Period('2011-01', freq='M'), NaT, pd.Period('2011-03', freq='M')], + [NaT, NaT, pd.Period('2011-03', freq='M')])]) + @pytest.mark.parametrize('reverse', [True, False]) + @pytest.mark.parametrize('box', [Series, Index]) + @pytest.mark.parametrize('dtype', [None, object]) + def test_nat_comparisons(self, dtype, box, reverse, pair): + l, r = pair + if reverse: + # add lhs / rhs switched data + l, r = r, l - expected = Series([False, False, False]) - assert_series_equal(left < right, expected) + left = Series(l, dtype=dtype) + right = box(r, dtype=dtype) + # Series, Index - expected = Series([False, False, False]) - assert_series_equal(left > right, expected) + expected = Series([False, False, True]) + assert_series_equal(left == right, expected) - expected = Series([False, False, True]) - assert_series_equal(left >= right, expected) + expected = Series([True, True, False]) + assert_series_equal(left != right, expected) - expected = Series([False, False, True]) - assert_series_equal(left <= right, expected) + expected = Series([False, False, False]) + assert_series_equal(left < right, expected) - def test_nat_comparisons_scalar(self): - data = [[pd.Timestamp('2011-01-01'), pd.NaT, - pd.Timestamp('2011-01-03')], + expected = Series([False, False, False]) + assert_series_equal(left > right, expected) - [pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')], + expected = Series([False, False, True]) + assert_series_equal(left >= right, expected) - [pd.Period('2011-01', freq='M'), pd.NaT, - pd.Period('2011-03', freq='M')]] + expected = Series([False, False, True]) + assert_series_equal(left <= right, expected) - for l in data: - for dtype in [None, object]: - left = Series(l, dtype=dtype) + @pytest.mark.parametrize('data', [ + [pd.Timestamp('2011-01-01'), NaT, pd.Timestamp('2011-01-03')], + [pd.Timedelta('1 days'), NaT, pd.Timedelta('3 days')], + [pd.Period('2011-01', freq='M'), NaT, pd.Period('2011-03', freq='M')] + ]) + @pytest.mark.parametrize('dtype', [None, object]) + def test_nat_comparisons_scalar(self, dtype, data): + left = Series(data, dtype=dtype) - expected = Series([False, False, False]) - assert_series_equal(left == pd.NaT, expected) - assert_series_equal(pd.NaT == left, expected) + expected = Series([False, False, False]) + assert_series_equal(left == pd.NaT, expected) + assert_series_equal(pd.NaT == left, expected) - expected = Series([True, True, True]) - assert_series_equal(left != pd.NaT, expected) - assert_series_equal(pd.NaT != left, expected) + expected = Series([True, True, True]) + assert_series_equal(left != pd.NaT, expected) + assert_series_equal(pd.NaT != left, expected) - expected = Series([False, False, False]) - assert_series_equal(left < pd.NaT, expected) - assert_series_equal(pd.NaT > left, expected) - assert_series_equal(left <= pd.NaT, expected) - assert_series_equal(pd.NaT >= left, expected) + expected = Series([False, False, False]) + assert_series_equal(left < pd.NaT, expected) + assert_series_equal(pd.NaT > left, expected) + assert_series_equal(left <= pd.NaT, expected) + assert_series_equal(pd.NaT >= left, expected) - assert_series_equal(left > pd.NaT, expected) - assert_series_equal(pd.NaT < left, expected) - assert_series_equal(left >= pd.NaT, expected) - assert_series_equal(pd.NaT <= left, expected) + assert_series_equal(left > pd.NaT, expected) + assert_series_equal(pd.NaT < left, expected) + assert_series_equal(left >= pd.NaT, expected) + assert_series_equal(pd.NaT <= left, expected) def test_comparison_different_length(self): a = Series(['a', 'b', 'c']) @@ -559,27 +555,27 @@ def test_comp_ops_df_compat(self): s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x') s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x') - for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]: + for left, right in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]: msg = "Can only compare identically-labeled Series objects" with tm.assert_raises_regex(ValueError, msg): - l == r + left == right with tm.assert_raises_regex(ValueError, msg): - l != r + left != right with tm.assert_raises_regex(ValueError, msg): - l < r + left < right msg = "Can only compare identically-labeled DataFrame objects" with tm.assert_raises_regex(ValueError, msg): - l.to_frame() == r.to_frame() + left.to_frame() == right.to_frame() with tm.assert_raises_regex(ValueError, msg): - l.to_frame() != r.to_frame() + left.to_frame() != right.to_frame() with tm.assert_raises_regex(ValueError, msg): - l.to_frame() < r.to_frame() + left.to_frame() < right.to_frame() class TestSeriesArithmetic(object): @@ -1231,16 +1227,6 @@ def test_sub_datetime64_not_ns(self, box, assert_func): res = dt64 - obj assert_func(res, -expected) - @pytest.mark.xfail(reason='GH#7996 datetime64 units not converted to nano') - def test_frame_sub_datetime64_not_ns(self): - df = pd.DataFrame(date_range('20130101', periods=3)) - dt64 = np.datetime64('2013-01-01') - assert dt64.dtype == 'datetime64[D]' - res = df - dt64 - expected = pd.DataFrame([Timedelta(days=0), Timedelta(days=1), - Timedelta(days=2)]) - tm.assert_frame_equal(res, expected) - def test_operators_datetimelike(self): def run_ops(ops, get_ser, test_ser): @@ -1381,7 +1367,7 @@ def test_sub_datetime_compat(self): assert_series_equal(s - dt, exp) assert_series_equal(s - Timestamp(dt), exp) - def test_datetime_series_with_timedelta(self): + def test_dt64_series_with_timedelta(self): # scalar timedeltas/np.timedelta64 objects # operate with np.timedelta64 correctly s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) @@ -1400,25 +1386,51 @@ def test_datetime_series_with_timedelta(self): assert_series_equal(result, expected) assert_series_equal(result2, expected) - def test_datetime_series_with_DateOffset(self): + def test_dt64_series_add_tick_DateOffset(self): # GH 4532 # operate with pd.offsets - s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) - - result = s + pd.offsets.Second(5) - result2 = pd.offsets.Second(5) + s + ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) expected = Series([Timestamp('20130101 9:01:05'), Timestamp('20130101 9:02:05')]) + + result = ser + pd.offsets.Second(5) assert_series_equal(result, expected) + + result2 = pd.offsets.Second(5) + ser assert_series_equal(result2, expected) - result = s - pd.offsets.Second(5) - result2 = -pd.offsets.Second(5) + s + def test_dt64_series_sub_tick_DateOffset(self): + # GH 4532 + # operate with pd.offsets + ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) expected = Series([Timestamp('20130101 9:00:55'), Timestamp('20130101 9:01:55')]) + + result = ser - pd.offsets.Second(5) assert_series_equal(result, expected) + + result2 = -pd.offsets.Second(5) + ser assert_series_equal(result2, expected) + with pytest.raises(TypeError): + pd.offsets.Second(5) - ser + + @pytest.mark.parametrize('cls_name', ['Day', 'Hour', 'Minute', 'Second', + 'Milli', 'Micro', 'Nano']) + def test_dt64_series_with_tick_DateOffset_smoke(self, cls_name): + # GH 4532 + # smoke tests for valid DateOffsets + ser = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) + + offset_cls = getattr(pd.offsets, cls_name) + ser + offset_cls(5) + offset_cls(5) + ser + + def test_dt64_series_add_mixed_tick_DateOffset(self): + # GH 4532 + # operate with pd.offsets + s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')]) + result = s + pd.offsets.Milli(5) result2 = pd.offsets.Milli(5) + s expected = Series([Timestamp('20130101 9:01:00.005'), @@ -1431,14 +1443,7 @@ def test_datetime_series_with_DateOffset(self): Timestamp('20130101 9:07:00.005')]) assert_series_equal(result, expected) - # valid DateOffsets - for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli', - 'Nano']: - op = getattr(pd.offsets, do) - s + op(5) - op(5) + s - - def test_dt64_sub_NaT(self): + def test_dt64_series_sub_NaT(self): # GH#18808 dti = pd.DatetimeIndex([pd.NaT, pd.Timestamp('19900315')]) ser = pd.Series(dti) @@ -1497,7 +1502,7 @@ def test_dt64_mul_div_numeric_invalid(self, one, dt64_series): with pytest.raises(TypeError): one / dt64_series - def test_dt64series_arith_overflow(self): + def test_dt64_series_arith_overflow(self): # GH#12534, fixed by #19024 dt = pd.Timestamp('1700-01-31') td = pd.Timedelta('20000 Days') @@ -1670,16 +1675,6 @@ def test_timedelta64_conversions(self, m, unit): result = np.timedelta64(m, unit) / s1 assert_series_equal(result, expected) - # astype - s = Series(date_range('20130101', periods=3)) - result = s.astype(object) - assert isinstance(result.iloc[0], datetime) - assert result.dtype == np.object_ - - result = s1.astype(object) - assert isinstance(result.iloc[0], timedelta) - assert result.dtype == np.object_ - @pytest.mark.parametrize('op', [operator.add, operator.sub]) def test_timedelta64_equal_timedelta_supported_ops(self, op): ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'), @@ -1705,13 +1700,7 @@ def timedelta64(*args): lhs = op(ser, nptd) rhs = op(ser, pytd) - try: - assert_series_equal(lhs, rhs) - except: - raise AssertionError( - "invalid comparison [op->{0},d->{1},h->{2},m->{3}," - "s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s, - us, lhs, rhs)) + assert_series_equal(lhs, rhs) def test_ops_nat_mixed_datetime64_timedelta64(self): # GH 11349 @@ -2091,25 +2080,39 @@ def test_series_radd_str(self): assert_series_equal(ser + 'a', pd.Series(['xa', np.nan, 'xa'])) @pytest.mark.parametrize('dtype', [None, object]) - def test_series_radd_more(self, dtype): - res = 1 + pd.Series([1, 2, 3], dtype=dtype) - exp = pd.Series([2, 3, 4], dtype=dtype) - assert_series_equal(res, exp) - res = pd.Series([1, 2, 3], dtype=dtype) + 1 - assert_series_equal(res, exp) - - res = np.nan + pd.Series([1, 2, 3], dtype=dtype) - exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype) - assert_series_equal(res, exp) - res = pd.Series([1, 2, 3], dtype=dtype) + np.nan - assert_series_equal(res, exp) - - s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'), - pd.Timedelta('3 days')], dtype=dtype) - exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'), - pd.Timedelta('6 days')]) - assert_series_equal(pd.Timedelta('3 days') + s, exp) - assert_series_equal(s + pd.Timedelta('3 days'), exp) + def test_series_with_dtype_radd_timedelta(self, dtype): + ser = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'), + pd.Timedelta('3 days')], dtype=dtype) + expected = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'), + pd.Timedelta('6 days')]) + + result = pd.Timedelta('3 days') + ser + assert_series_equal(result, expected) + + result = ser + pd.Timedelta('3 days') + assert_series_equal(result, expected) + + @pytest.mark.parametrize('dtype', [None, object]) + def test_series_with_dtype_radd_int(self, dtype): + ser = pd.Series([1, 2, 3], dtype=dtype) + expected = pd.Series([2, 3, 4], dtype=dtype) + + result = 1 + ser + assert_series_equal(result, expected) + + result = ser + 1 + assert_series_equal(result, expected) + + @pytest.mark.parametrize('dtype', [None, object]) + def test_series_with_dtype_radd_nan(self, dtype): + ser = pd.Series([1, 2, 3], dtype=dtype) + expected = pd.Series([np.nan, np.nan, np.nan], dtype=dtype) + + result = np.nan + ser + assert_series_equal(result, expected) + + result = ser + np.nan + assert_series_equal(result, expected) @pytest.mark.parametrize('data', [ [1, 2, 3], @@ -2122,36 +2125,6 @@ def test_series_radd_str_invalid(self, dtype, data): with pytest.raises(TypeError): 'foo_' + ser - @pytest.mark.parametrize('data', [ - [1, 2, 3], - [1.1, 2.2, 3.3], - [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.NaT], - ['x', 'y', 1]]) - @pytest.mark.parametrize('dtype', [None, object]) - def test_frame_radd_str_invalid(self, dtype, data): - df = DataFrame(data, dtype=dtype) - with pytest.raises(TypeError): - 'foo_' + df - - @pytest.mark.parametrize('dtype', [None, object]) - def test_frame_radd_more(self, dtype): - res = 1 + pd.DataFrame([1, 2, 3], dtype=dtype) - exp = pd.DataFrame([2, 3, 4], dtype=dtype) - assert_frame_equal(res, exp) - res = pd.DataFrame([1, 2, 3], dtype=dtype) + 1 - assert_frame_equal(res, exp) - - res = np.nan + pd.DataFrame([1, 2, 3], dtype=dtype) - exp = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype) - assert_frame_equal(res, exp) - res = pd.DataFrame([1, 2, 3], dtype=dtype) + np.nan - assert_frame_equal(res, exp) - - def test_frame_radd_str(self): - df = pd.DataFrame(['x', np.nan, 'x']) - assert_frame_equal('a' + df, pd.DataFrame(['ax', np.nan, 'ax'])) - assert_frame_equal(df + 'a', pd.DataFrame(['xa', np.nan, 'xa'])) - def test_operators_frame(self): # rpow does not work with DataFrame df = DataFrame({'A': self.ts}) @@ -2245,24 +2218,23 @@ def test_operators_na_handling(self): assert_series_equal(result, expected) def test_datetime64_with_index(self): - # arithmetic integer ops with an index - s = Series(np.random.randn(5)) - expected = s - s.index.to_series() - result = s - s.index + ser = Series(np.random.randn(5)) + expected = ser - ser.index.to_series() + result = ser - ser.index assert_series_equal(result, expected) # GH 4629 # arithmetic datetime64 ops with an index - s = Series(date_range('20130101', periods=5), - index=date_range('20130101', periods=5)) - expected = s - s.index.to_series() - result = s - s.index + ser = Series(date_range('20130101', periods=5), + index=date_range('20130101', periods=5)) + expected = ser - ser.index.to_series() + result = ser - ser.index assert_series_equal(result, expected) with pytest.raises(TypeError): # GH#18850 - result = s - s.index.to_period() + result = ser - ser.index.to_period() df = DataFrame(np.random.randn(5, 2), index=date_range('20130101', periods=5))
Orthogonal to #19166 since this does not touch `TestTimedeltaSeriesArithmetic`. Fix (some) flake8 complaints about 1-letter variable names.
https://api.github.com/repos/pandas-dev/pandas/pulls/19173
2018-01-10T19:23:33Z
2018-01-15T14:52:33Z
2018-01-15T14:52:33Z
2018-01-15T19:54:27Z
CI: pin 2.7 build to numpy 1.13 for the moment
diff --git a/ci/requirements-2.7.build b/ci/requirements-2.7.build index e24baa98d956e..17d34f3895c64 100644 --- a/ci/requirements-2.7.build +++ b/ci/requirements-2.7.build @@ -2,5 +2,5 @@ python=2.7* python-dateutil=2.5.0 pytz=2013b nomkl -numpy +numpy=1.13* cython=0.24 diff --git a/ci/requirements-3.6_DOC.build b/ci/requirements-3.6_DOC.build index bdcfe28105866..bc72eed2a0d4e 100644 --- a/ci/requirements-3.6_DOC.build +++ b/ci/requirements-3.6_DOC.build @@ -1,5 +1,5 @@ python=3.6* python-dateutil pytz -numpy +numpy=1.13* cython diff --git a/conda.recipe/meta.yaml b/conda.recipe/meta.yaml index 3510496f0b519..87a79f7e5a987 100644 --- a/conda.recipe/meta.yaml +++ b/conda.recipe/meta.yaml @@ -14,14 +14,14 @@ requirements: build: - python - cython - - {{ pin_compatible('numpy') }} + - {{ pin_compatible('numpy', upper_bound='1.14') }} - setuptools >=3.3 - python-dateutil >=2.5.0 - pytz run: - python - - {{ pin_compatible('numpy') }} + - {{ pin_compatible('numpy', upper_bound='1.14') }} - python-dateutil >=2.5.0 - pytz
xref #19138
https://api.github.com/repos/pandas-dev/pandas/pulls/19169
2018-01-10T10:53:32Z
2018-01-10T12:23:38Z
2018-01-10T12:23:38Z
2018-01-10T12:23:38Z
BUG: Creating Index with the `names` argument
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index a5949c62ad913..907c91a742573 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -113,14 +113,18 @@ class Index(IndexOpsMixin, PandasObject): dtype : NumPy dtype (default: object) copy : bool Make a copy of input ndarray - name : object + name : object, optional Name to be stored in the index + names : sequence of objects, optional + Names for the index levels tupleize_cols : bool (default: True) When True, attempt to create a MultiIndex if possible Notes ----- - An Index instance can **only** contain hashable objects + An Index instance can **only** contain hashable objects. + + Only one of `name` and `names` can be specified at the same time. Examples -------- @@ -176,10 +180,25 @@ class Index(IndexOpsMixin, PandasObject): str = CachedAccessor("str", StringMethods) def __new__(cls, data=None, dtype=None, copy=False, name=None, - fastpath=False, tupleize_cols=True, **kwargs): + fastpath=False, tupleize_cols=True, names=None, + **kwargs): + + # The main purpose of `names` is to use it with a `MultiIndex`. + # Although for consistency it's also used to retrieve `name` for a + # one-level indices if `name` is not provided (see GH 19082). + + if names is not None and name is not None: + raise TypeError("Can provide only one of names and name arguments") + + if names is not None and not is_list_like(names): + raise TypeError("names must be list-like") - if name is None and hasattr(data, 'name'): - name = data.name + if name is None: + if hasattr(data, 'name'): + name = data.name + # extract `name` from `names` in case MultiIndex cannot be created + elif names: + name = names[0] if fastpath: return cls._simple_new(data, name) @@ -358,8 +377,7 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, # 10697 if all(isinstance(e, tuple) for e in data): from .multi import MultiIndex - return MultiIndex.from_tuples( - data, names=name or kwargs.get('names')) + return MultiIndex.from_tuples(data, names=names or name) # other iterable of some kind subarr = _asarray_tuplesafe(data, dtype=object) return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs) diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py index c4e8682934369..d7247669ea43b 100644 --- a/pandas/tests/indexes/test_base.py +++ b/pandas/tests/indexes/test_base.py @@ -305,6 +305,30 @@ def test_constructor_simple_new(self): result = idx._simple_new(idx, 'obj') tm.assert_index_equal(result, idx) + def test_constructor_names(self): + # test both `name` and `names` provided + with pytest.raises(TypeError): + idx = Index([1, 2, 3], name='a', names=('a',)) + + # test non-list-like `names` + with pytest.raises(TypeError): + idx = Index([1, 2, 3], names='a') + + # test using `name` for a flat `Index` + idx = Index([1, 2, 3], name='a') + assert idx.name == 'a' + assert idx.names == ('a',) + + # test using `names` for a flat `Index` + idx = Index([1, 2, 3], names=('a',)) + assert idx.name == 'a' + assert idx.names == ('a',) + + # test using `names` for `MultiIndex` creation + idx = Index([('A', 1), ('A', 2)], names=('a', 'b')) + midx = MultiIndex.from_tuples([('A', 1), ('A', 2)], names=('a', 'b')) + tm.assert_index_equal(idx, midx, check_names=True) + def test_constructor_dtypes(self): for idx in [Index(np.array([1, 2, 3], dtype=int)),
- [x] closes #19082 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/19168
2018-01-10T10:11:51Z
2018-02-24T17:30:42Z
null
2018-02-24T17:30:43Z
Parametrize tests for Series[timedelta64] integer arithmetic
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index dda2918bf7615..783fcddac1280 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -28,6 +28,11 @@ from .common import TestData +@pytest.fixture +def tdser(): + return Series(['59 Days', '59 Days', 'NaT'], dtype='timedelta64[ns]') + + class TestSeriesComparisons(object): def test_series_comparison_scalars(self): series = Series(date_range('1/1/2000', periods=10)) @@ -667,83 +672,178 @@ def test_div(self): assert_series_equal(result, expected) -class TestTimedeltaSeriesArithmetic(object): - def test_timedelta_series_ops(self): - # GH11925 - s = Series(timedelta_range('1 day', periods=3)) - ts = Timestamp('2012-01-01') - expected = Series(date_range('2012-01-02', periods=3)) - assert_series_equal(ts + s, expected) - assert_series_equal(s + ts, expected) +class TestTimedeltaSeriesArithmeticWithIntegers(object): + # Tests for Series with dtype 'timedelta64[ns]' arithmetic operations + # with integer and int-like others - expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D')) - assert_series_equal(ts - s, expected2) - assert_series_equal(ts + (-s), expected2) + # ------------------------------------------------------------------ + # Addition and Subtraction + + def test_td64series_add_int_series_invalid(self, tdser): + with pytest.raises(TypeError): + tdser + Series([2, 3, 4]) + + @pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds') + def test_td64series_radd_int_series_invalid(self, tdser): + with pytest.raises(TypeError): + Series([2, 3, 4]) + tdser + + def test_td64series_sub_int_series_invalid(self, tdser): + with pytest.raises(TypeError): + tdser - Series([2, 3, 4]) + + @pytest.mark.xfail(reason='GH#19123 integer interpreted as nanoseconds') + def test_td64series_rsub_int_series_invalid(self, tdser): + with pytest.raises(TypeError): + Series([2, 3, 4]) - tdser - def test_timedelta64_operations_with_integers(self): + @pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)]) + def test_td64series_add_sub_numeric_scalar_invalid(self, scalar, tdser): + with pytest.raises(TypeError): + tdser + scalar + with pytest.raises(TypeError): + scalar + tdser + with pytest.raises(TypeError): + tdser - scalar + with pytest.raises(TypeError): + scalar - tdser + + @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', + 'uint64', 'uint32', 'uint16', 'uint8', + 'float64', 'float32', 'float16']) + @pytest.mark.parametrize('vector', [ + np.array([1, 2, 3]), + pd.Index([1, 2, 3]), + pytest.param(Series([1, 2, 3]), + marks=pytest.mark.xfail(reason='GH#19123 integer ' + 'interpreted as nanos')) + ]) + def test_td64series_add_sub_numeric_array_invalid(self, vector, + dtype, tdser): + vector = vector.astype(dtype) + with pytest.raises(TypeError): + tdser + vector + with pytest.raises(TypeError): + vector + tdser + with pytest.raises(TypeError): + tdser - vector + with pytest.raises(TypeError): + vector - tdser + + # ------------------------------------------------------------------ + # Multiplicaton and Division + + @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', + 'uint64', 'uint32', 'uint16', 'uint8', + 'float64', 'float32', 'float16']) + @pytest.mark.parametrize('vector', [np.array([20, 30, 40]), + pd.Index([20, 30, 40]), + Series([20, 30, 40])]) + def test_td64series_div_numeric_array(self, vector, dtype, tdser): # GH 4521 # divide/multiply by integers - startdate = Series(date_range('2013-01-01', '2013-01-03')) - enddate = Series(date_range('2013-03-01', '2013-03-03')) + vector = vector.astype(dtype) + expected = Series(['2.95D', '1D 23H 12m', 'NaT'], + dtype='timedelta64[ns]') - s1 = enddate - startdate - s1[2] = np.nan - s2 = Series([2, 3, 4]) - expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]') - expected[2] = np.nan - result = s1 / s2 + result = tdser / vector assert_series_equal(result, expected) - s2 = Series([20, 30, 40]) - expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]') - expected[2] = np.nan - result = s1 / s2 + with pytest.raises(TypeError): + vector / tdser + + @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', + 'uint64', 'uint32', 'uint16', 'uint8', + 'float64', 'float32', 'float16']) + @pytest.mark.parametrize('vector', [np.array([20, 30, 40]), + pd.Index([20, 30, 40]), + Series([20, 30, 40])]) + def test_td64series_mul_numeric_array(self, vector, dtype, tdser): + # GH 4521 + # divide/multiply by integers + vector = vector.astype(dtype) + + expected = Series(['1180 Days', '1770 Days', 'NaT'], + dtype='timedelta64[ns]') + + result = tdser * vector assert_series_equal(result, expected) - result = s1 / 2 - expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]') - expected[2] = np.nan + @pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16', + 'uint64', 'uint32', 'uint16', 'uint8', + 'float64', 'float32', 'float16']) + @pytest.mark.parametrize('vector', [ + np.array([20, 30, 40]), + pytest.param(pd.Index([20, 30, 40]), + marks=pytest.mark.xfail(reason='__mul__ raises ' + 'instead of returning ' + 'NotImplemented')), + Series([20, 30, 40]) + ]) + def test_td64series_rmul_numeric_array(self, vector, dtype, tdser): + # GH 4521 + # divide/multiply by integers + vector = vector.astype(dtype) + + expected = Series(['1180 Days', '1770 Days', 'NaT'], + dtype='timedelta64[ns]') + + result = vector * tdser assert_series_equal(result, expected) - s2 = Series([20, 30, 40]) - expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]') - expected[2] = np.nan - result = s1 * s2 + @pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)]) + def test_td64series_mul_numeric_scalar(self, one, tdser): + # GH 4521 + # divide/multiply by integers + expected = Series(['-59 Days', '-59 Days', 'NaT'], + dtype='timedelta64[ns]') + + result = tdser * (-one) + assert_series_equal(result, expected) + result = (-one) * tdser assert_series_equal(result, expected) - for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16', - 'uint8']: - s2 = Series([20, 30, 40], dtype=dtype) - expected = Series( - s1.values.astype(np.int64) * s2.astype(np.int64), - dtype='m8[ns]') - expected[2] = np.nan - result = s1 * s2 - assert_series_equal(result, expected) + expected = Series(['118 Days', '118 Days', 'NaT'], + dtype='timedelta64[ns]') - result = s1 * 2 - expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]') - expected[2] = np.nan + result = tdser * (2 * one) assert_series_equal(result, expected) + result = (2 * one) * tdser + assert_series_equal(result, expected) + + @pytest.mark.parametrize('two', [ + 2, 2.0, + pytest.param(np.array(2), + marks=pytest.mark.xfail(reason='GH#19011 is_list_like ' + 'incorrectly True.')), + pytest.param(np.array(2.0), + marks=pytest.mark.xfail(reason='GH#19011 is_list_like ' + 'incorrectly True.')), + ]) + def test_td64series_div_numeric_scalar(self, two, tdser): + # GH 4521 + # divide/multiply by integers + expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]') - result = s1 * -1 - expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]') - expected[2] = np.nan + result = tdser / two assert_series_equal(result, expected) - # invalid ops - assert_series_equal(s1 / s2.astype(float), - Series([Timedelta('2 days 22:48:00'), Timedelta( - '1 days 23:12:00'), Timedelta('NaT')])) - assert_series_equal(s1 / 2.0, - Series([Timedelta('29 days 12:00:00'), Timedelta( - '29 days 12:00:00'), Timedelta('NaT')])) - - for op in ['__add__', '__sub__']: - sop = getattr(s1, op, None) - if sop is not None: - pytest.raises(TypeError, sop, 1) - pytest.raises(TypeError, sop, s2.values) + +class TestTimedeltaSeriesArithmetic(object): + def test_td64series_add_sub_timestamp(self): + # GH11925 + tdser = Series(timedelta_range('1 day', periods=3)) + ts = Timestamp('2012-01-01') + expected = Series(date_range('2012-01-02', periods=3)) + assert_series_equal(ts + tdser, expected) + assert_series_equal(tdser + ts, expected) + + expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D')) + assert_series_equal(ts - tdser, expected2) + assert_series_equal(ts + (-tdser), expected2) + + with pytest.raises(TypeError): + tdser - ts def test_timedelta64_operations_with_DateOffset(self): # GH 10699
Breaks up a few big tests, parametrizes, covers cases much more thoroughly including identifying a few new xfails. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19166
2018-01-10T06:44:58Z
2018-01-12T11:42:18Z
2018-01-12T11:42:18Z
2018-02-11T21:59:16Z
CLN: ASV Timestamp
diff --git a/asv_bench/benchmarks/timestamp.py b/asv_bench/benchmarks/timestamp.py index 62abaca17d22f..8c435c435aed3 100644 --- a/asv_bench/benchmarks/timestamp.py +++ b/asv_bench/benchmarks/timestamp.py @@ -1,10 +1,10 @@ +import datetime + from pandas import Timestamp import pytz -import datetime class TimestampConstruction(object): - # TODO: classmethod constructors: fromordinal, fromtimestamp... def time_parse_iso8601_no_tz(self): Timestamp('2017-08-25 08:16:14') @@ -21,6 +21,12 @@ def time_parse_today(self): def time_parse_now(self): Timestamp('now') + def time_fromordinal(self): + Timestamp.fromordinal(730120) + + def time_fromtimestamp(self): + Timestamp.fromtimestamp(1515448538) + class TimestampProperties(object): goal_time = 0.2
Just rearranged the import order and added some contructor benchmarks in a TODO comment: ``` [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 3.57%] ··· Running ....TimestampAcrossDst.time_replace_across_dst 37.5μs [ 7.14%] ··· Running ...tamp.TimestampConstruction.time_fromordinal 15.5μs [ 10.71%] ··· Running ...mp.TimestampConstruction.time_fromtimestamp 21.4μs [ 14.29%] ··· Running ...p.TimestampConstruction.time_parse_dateutil 428μs [ 17.86%] ··· Running ...estampConstruction.time_parse_iso8601_no_tz 14.4μs [ 21.43%] ··· Running ...TimestampConstruction.time_parse_iso8601_tz 51.2μs [ 25.00%] ··· Running timestamp.TimestampConstruction.time_parse_now 22.3μs [ 28.57%] ··· Running ...tamp.TimestampConstruction.time_parse_today 22.4μs [ 32.14%] ··· Running timestamp.TimestampOps.time_replace_None ok [ 32.14%] ···· ============ ======== tz ------------ -------- None 18.1μs US/Eastern 37.9μs ============ ======== [ 35.71%] ··· Running timestamp.TimestampOps.time_replace_tz ok [ 35.71%] ···· ============ ======== tz ------------ -------- None 45.6μs US/Eastern 70.2μs ============ ======== [ 39.29%] ··· Running timestamp.TimestampOps.time_to_pydatetime ok [ 39.29%] ···· ============ ======== tz ------------ -------- None 12.2μs US/Eastern 13.7μs ============ ======== [ 42.86%] ··· Running timestamp.TimestampProperties.time_dayofweek ok [ 42.86%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 9.95μs None B 11.6μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 9.91μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 11.3μs ================================================ ====== ======== [ 46.43%] ··· Running timestamp.TimestampProperties.time_dayofyear ok [ 46.43%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 40.3μs None B 45.4μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 40.3μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 44.1μs ================================================ ====== ======== [ 50.00%] ··· Running ...tamp.TimestampProperties.time_days_in_month ok [ 50.00%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 40.3μs None B 44.1μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 40.8μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 44.5μs ================================================ ====== ======== [ 53.57%] ··· Running timestamp.TimestampProperties.time_freqstr ok [ 53.57%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 11.0μs None B 13.3μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 11.5μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 13.0μs ================================================ ====== ======== [ 57.14%] ··· Running ...stamp.TimestampProperties.time_is_leap_year ok [ 57.14%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 42.7μs None B 52.1μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 43.0μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 50.8μs ================================================ ====== ======== [ 60.71%] ··· Running ...stamp.TimestampProperties.time_is_month_end ok [ 60.71%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 42.1μs None B 51.6μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 43.1μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 49.7μs ================================================ ====== ======== [ 64.29%] ··· Running ...amp.TimestampProperties.time_is_month_start ok [ 64.29%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 42.5μs None B 51.6μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 42.4μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 50.4μs ================================================ ====== ======== [ 67.86%] ··· Running ...amp.TimestampProperties.time_is_quarter_end ok [ 67.86%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 43.1μs None B 49.0μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 42.9μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 50.8μs ================================================ ====== ======== [ 71.43%] ··· Running ...p.TimestampProperties.time_is_quarter_start ok [ 71.43%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 42.5μs None B 50.5μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 43.1μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 50.0μs ================================================ ====== ======== [ 75.00%] ··· Running timestamp.TimestampProperties.time_is_year_end ok [ 75.00%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 43.1μs None B 50.8μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 42.2μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 51.9μs ================================================ ====== ======== [ 78.57%] ··· Running ...tamp.TimestampProperties.time_is_year_start ok [ 78.57%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 43.0μs None B 51.0μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 42.5μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 51.6μs ================================================ ====== ======== [ 82.14%] ··· Running timestamp.TimestampProperties.time_microsecond ok [ 82.14%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 8.91μs None B 9.93μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 8.64μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 9.49μs ================================================ ====== ======== [ 85.71%] ··· Running timestamp.TimestampProperties.time_offset ok [ 85.71%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 14.9μs None B 15.8μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 14.5μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 16.2μs ================================================ ====== ======== [ 85.71%] ····· For parameters: None, None /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timestamp.py:46: FutureWarning: .offset is deprecated. Use .freq instead self.ts.offset For parameters: None, 'B' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timestamp.py:46: FutureWarning: .offset is deprecated. Use .freq instead self.ts.offset For parameters: <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>, None /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timestamp.py:46: FutureWarning: .offset is deprecated. Use .freq instead self.ts.offset For parameters: <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD>, 'B' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timestamp.py:46: FutureWarning: .offset is deprecated. Use .freq instead self.ts.offset [ 89.29%] ··· Running timestamp.TimestampProperties.time_quarter ok [ 89.29%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 40.5μs None B 44.8μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 40.5μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 52.4μs ================================================ ====== ======== [ 92.86%] ··· Running timestamp.TimestampProperties.time_tz ok [ 92.86%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 9.82μs None B 10.5μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 9.78μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 10.5μs ================================================ ====== ======== [ 96.43%] ··· Running timestamp.TimestampProperties.time_week ok [ 96.43%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 40.9μs None B 44.0μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 39.4μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 45.1μs ================================================ ====== ======== [100.00%] ··· Running ...stamp.TimestampProperties.time_weekday_name ok [100.00%] ···· ================================================ ====== ======== tz freq ------------------------------------------------ ------ -------- None None 11.7μs None B 13.8μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> None 11.9μs <DstTzInfo 'Europe/Amsterdam' LMT+0:20:00 STD> B 13.5μs ================================================ ====== ======== ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19164
2018-01-10T05:55:54Z
2018-01-10T13:00:50Z
2018-01-10T13:00:50Z
2018-01-10T17:44:25Z
Doc: Add example of merging a Series with a Dataframe
diff --git a/doc/source/merging.rst b/doc/source/merging.rst index ebade853313ab..6c8061f5cbdfd 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -714,6 +714,31 @@ either the left or right tables, the values in the joined table will be labels=['left', 'right'], vertical=False); plt.close('all'); +To join a Series and a DataFrame, the Series has to be transformed into a DataFrame first: + +.. ipython:: python + + df = pd.DataFrame({"Let": ["A", "B", "C"], "Num": [1, 2, 3]}) + df + + # The series has a multi-index with levels corresponding to columns in the DataFrame we want to merge with + ser = pd.Series( + ['a', 'b', 'c', 'd', 'e', 'f'], + index=pd.MultiIndex.from_arrays([["A", "B", "C"]*2, [1, 2, 3, 4, 5, 6]]) + ) + ser + + # Name the row index levels + ser.index.names=['Let','Num'] + ser + + # reset_index turns the multi-level row index into columns, which requires a DataFrame + df2 = ser.reset_index() + type(df2) + + # Now we merge the DataFrames + pd.merge(df, df2, on=['Let','Num']) + Here is another example with duplicate join keys in ``DataFrame``s: .. ipython:: python
- [X] closes #12550
https://api.github.com/repos/pandas-dev/pandas/pulls/19160
2018-01-09T20:46:12Z
2018-01-14T03:39:36Z
null
2018-01-14T03:40:46Z
CI: fix botocore to last known good version
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index 272e7f2e05d14..dfe83aaaf657f 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -113,7 +113,7 @@ if [ -e ${REQ} ]; then fi time conda install -n pandas pytest>=3.1.0 -time pip install pytest-xdist moto +time pip install pytest-xdist moto botocore==1.18.23 if [ "$LINT" ]; then conda install flake8=3.4.1
https://api.github.com/repos/pandas-dev/pandas/pulls/19154
2018-01-09T12:28:24Z
2018-01-09T12:53:10Z
null
2018-01-09T12:53:10Z
No SettingWithCopyWarning on groupby results
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index d7a3f0d077302..79479d081e72f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -425,6 +425,7 @@ Groupby/Resample/Rolling - Bug when grouping by a single column and aggregating with a class like ``list`` or ``tuple`` (:issue:`18079`) - Fixed regression in :func:`DataFrame.groupby` which would not emit an error when called with a tuple key not in the index (:issue:`18798`) +- Modifying a group dataframe while generating ``groupby`` results does not trigger a ``SettingWithCopyWarning`` (:issue:`19151`) - - diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py index 082b6e2a8b1a0..e08df6877a177 100644 --- a/pandas/core/groupby.py +++ b/pandas/core/groupby.py @@ -4722,7 +4722,9 @@ def __iter__(self): # if start >= end: # raise AssertionError('Start %s must be less than end %s' # % (str(start), str(end))) - yield i, self._chop(sdata, slice(start, end)) + group_data = self._chop(sdata, slice(start, end)) + group_data._is_copy = None + yield i, group_data def _get_sorted_data(self): return self.data._take(self.sort_idx, axis=self.axis, convert=False) diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py index 5172efe25d697..0e756d331a301 100644 --- a/pandas/tests/groupby/test_groupby.py +++ b/pandas/tests/groupby/test_groupby.py @@ -2758,6 +2758,12 @@ def test_tuple_correct_keyerror(self): with tm.assert_raises_regex(KeyError, "(7, 8)"): df.groupby((7, 8)).mean() + def test_no_setting_with_copy_warning(self): + df = pd.DataFrame({'x': range(4), 'c': list('aabb')}) + for _, gdf in df.groupby('c'): + with tm.assert_produces_warning(None): + gdf['x'] = 1 + def _check_groupby(df, result, keys, field, f=lambda x: x.sum()): tups = lmap(tuple, df[keys].values)
The dataframe referred to in the warning is a result of an internal slicing operation and should not lead to a warning in userspace. - [x] closes #19151 - [ ] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19152
2018-01-09T07:10:22Z
2018-01-09T12:05:20Z
null
2023-05-11T01:17:07Z
CLN: ASV timedelta
diff --git a/asv_bench/benchmarks/timedelta.py b/asv_bench/benchmarks/timedelta.py index 1897b0287ed19..3fe75b3c34299 100644 --- a/asv_bench/benchmarks/timedelta.py +++ b/asv_bench/benchmarks/timedelta.py @@ -1,12 +1,11 @@ import datetime import numpy as np -import pandas as pd - -from pandas import to_timedelta, Timestamp, Timedelta +from pandas import Series, timedelta_range, to_timedelta, Timestamp, Timedelta class TimedeltaConstructor(object): + goal_time = 0.2 def time_from_int(self): @@ -36,35 +35,44 @@ def time_from_missing(self): class ToTimedelta(object): + goal_time = 0.2 def setup(self): - self.arr = np.random.randint(0, 1000, size=10000) - self.arr2 = ['{0} days'.format(i) for i in self.arr] - - self.arr3 = np.random.randint(0, 60, size=10000) - self.arr3 = ['00:00:{0:02d}'.format(i) for i in self.arr3] - - self.arr4 = list(self.arr2) - self.arr4[-1] = 'apple' + self.ints = np.random.randint(0, 60, size=10000) + self.str_days = [] + self.str_seconds = [] + for i in self.ints: + self.str_days.append('{0} days'.format(i)) + self.str_seconds.append('00:00:{0:02d}'.format(i)) def time_convert_int(self): - to_timedelta(self.arr, unit='s') + to_timedelta(self.ints, unit='s') - def time_convert_string(self): - to_timedelta(self.arr2) + def time_convert_string_days(self): + to_timedelta(self.str_days) def time_convert_string_seconds(self): - to_timedelta(self.arr3) + to_timedelta(self.str_seconds) + + +class ToTimedeltaErrors(object): - def time_convert_coerce(self): - to_timedelta(self.arr4, errors='coerce') + goal_time = 0.2 + params = ['coerce', 'ignore'] + param_names = ['errors'] + + def setup(self, errors): + ints = np.random.randint(0, 60, size=10000) + self.arr = ['{0} days'.format(i) for i in ints] + self.arr[-1] = 'apple' - def time_convert_ignore(self): - to_timedelta(self.arr4, errors='ignore') + def time_convert(self, errors): + to_timedelta(self.arr, errors=errors) class TimedeltaOps(object): + goal_time = 0.2 def setup(self): @@ -76,43 +84,46 @@ def time_add_td_ts(self): class TimedeltaProperties(object): + goal_time = 0.2 - def setup(self): - self.td = Timedelta(days=365, minutes=35, seconds=25, milliseconds=35) + def setup_cache(self): + td = Timedelta(days=365, minutes=35, seconds=25, milliseconds=35) + return td - def time_timedelta_days(self): - self.td.days + def time_timedelta_days(self, td): + td.days - def time_timedelta_seconds(self): - self.td.seconds + def time_timedelta_seconds(self, td): + td.seconds - def time_timedelta_microseconds(self): - self.td.microseconds + def time_timedelta_microseconds(self, td): + td.microseconds - def time_timedelta_nanoseconds(self): - self.td.nanoseconds + def time_timedelta_nanoseconds(self, td): + td.nanoseconds class DatetimeAccessor(object): + goal_time = 0.2 - def setup(self): - self.N = 100000 - self.series = pd.Series( - pd.timedelta_range('1 days', periods=self.N, freq='h')) + def setup_cache(self): + N = 100000 + series = Series(timedelta_range('1 days', periods=N, freq='h')) + return series - def time_dt_accessor(self): - self.series.dt + def time_dt_accessor(self, series): + series.dt - def time_timedelta_dt_accessor_days(self): - self.series.dt.days + def time_timedelta_days(self, series): + series.dt.days - def time_timedelta_dt_accessor_seconds(self): - self.series.dt.seconds + def time_timedelta_seconds(self, series): + series.dt.seconds - def time_timedelta_dt_accessor_microseconds(self): - self.series.dt.microseconds + def time_timedelta_microseconds(self, series): + series.dt.microseconds - def time_timedelta_dt_accessor_nanoseconds(self): - self.series.dt.nanoseconds + def time_timedelta_nanoseconds(self, series): + series.dt.nanoseconds
Cleaned up some setups and used `setup_cache` where applicable. Although the `time_from_iso_format` benchmark failed here, it looks like it passed in #19065. ``` [ 0.00%] ··· Setting up /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timedelta.py:111 [ 4.55%] ··· Running timedelta.DatetimeAccessor.time_dt_accessor 99.1μs [ 9.09%] ··· Running timedelta.DatetimeAccessor.time_timedelta_days 2.17s [ 13.64%] ··· Running ...atetimeAccessor.time_timedelta_microseconds 2.15s [ 18.18%] ··· Running ...DatetimeAccessor.time_timedelta_nanoseconds 2.14s [ 22.73%] ··· Running ...lta.DatetimeAccessor.time_timedelta_seconds 2.16s [ 22.73%] ··· Setting up /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timedelta.py:90 [ 27.27%] ··· Running ...lta.TimedeltaProperties.time_timedelta_days 10.2μs [ 31.82%] ··· Running ...deltaProperties.time_timedelta_microseconds 10.2μs [ 36.36%] ··· Running ...edeltaProperties.time_timedelta_nanoseconds 9.99μs [ 40.91%] ··· Running ....TimedeltaProperties.time_timedelta_seconds 11.3μs [ 45.45%] ··· Running ...a.TimedeltaConstructor.time_from_components 63.7μs [ 50.00%] ··· Running ...ltaConstructor.time_from_datetime_timedelta 34.9μs [ 54.55%] ··· Running timedelta.TimedeltaConstructor.time_from_int 32.7μs [ 59.09%] ··· Running ...a.TimedeltaConstructor.time_from_iso_format failed [ 59.09%] ····· Traceback (most recent call last): File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 818, in <module> commands[mode](args) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 795, in main_run result = benchmark.do_run() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 349, in do_run return self.run(*self._current_params) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 424, in run samples, number = self.benchmark_timing(timer, repeat, warmup_time, number=number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 471, in benchmark_timing timing = timer.timeit(number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 202, in timeit timing = self.inner(it, self.timer) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 100, in inner _func() File "/home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timedelta.py", line 31, in time_from_iso_format Timedelta('P4DT12H30M5S') File "pandas/_libs/tslib.pyx", line 2588, in pandas._libs.tslib.Timedelta.__new__ File "pandas/_libs/tslibs/timedeltas.pyx", line 159, in pandas._libs.tslibs.timedeltas.parse_timedelta_string File "pandas/_libs/tslibs/timedeltas.pyx", line 296, in pandas._libs.tslibs.timedeltas.timedelta_from_spec ValueError: invalid abbreviation: P [ 63.64%] ··· Running ...elta.TimedeltaConstructor.time_from_missing 19.2μs [ 68.18%] ··· Running ...TimedeltaConstructor.time_from_np_timedelta 31.3μs [ 72.73%] ··· Running ...delta.TimedeltaConstructor.time_from_string 42.1μs [ 77.27%] ··· Running timedelta.TimedeltaConstructor.time_from_unit 40.0μs [ 81.82%] ··· Running timedelta.TimedeltaOps.time_add_td_ts 26.3ms [ 86.36%] ··· Running timedelta.ToTimedelta.time_convert_int 545μs [ 90.91%] ··· Running timedelta.ToTimedelta.time_convert_string_days 152ms [ 95.45%] ··· Running ...lta.ToTimedelta.time_convert_string_seconds 146ms [100.00%] ··· Running timedelta.ToTimedeltaErrors.time_convert ok [100.00%] ···· ======== ======= errors -------- ------- coerce 401ms ignore 394ms ======== ======= ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19150
2018-01-09T05:45:00Z
2018-01-09T12:54:03Z
2018-01-09T12:54:03Z
2018-01-09T17:45:27Z
Followup Cleanup DTI test_arithmetic, ASV
diff --git a/asv_bench/benchmarks/timestamp.py b/asv_bench/benchmarks/timestamp.py index 62abaca17d22f..8e05df4a05453 100644 --- a/asv_bench/benchmarks/timestamp.py +++ b/asv_bench/benchmarks/timestamp.py @@ -36,9 +36,6 @@ def setup(self, tz, freq): def time_tz(self, tz, freq): self.ts.tz - def time_offset(self, tz, freq): - self.ts.offset - def time_dayofweek(self, tz, freq): self.ts.dayofweek diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index 381e2ef3041e7..fb804266259dc 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -447,6 +447,111 @@ def test_dti_with_offset_series(self, tz, names): tm.assert_series_equal(res3, expected_sub) +@pytest.mark.parametrize('klass,assert_func', [ + (Series, tm.assert_series_equal), + (DatetimeIndex, tm.assert_index_equal)]) +def test_dt64_with_offset_array(klass, assert_func): + # GH#10699 + # array of offsets + box = Series if klass is Series else pd.Index + with tm.assert_produces_warning(PerformanceWarning): + s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')]) + result = s + box([pd.offsets.DateOffset(years=1), + pd.offsets.MonthEnd()]) + exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')]) + assert_func(result, exp) + + # same offset + result = s + box([pd.offsets.DateOffset(years=1), + pd.offsets.DateOffset(years=1)]) + exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')]) + assert_func(result, exp) + + +@pytest.mark.parametrize('klass,assert_func', [ + (Series, tm.assert_series_equal), + (DatetimeIndex, tm.assert_index_equal)]) +def test_dt64_with_DateOffsets_relativedelta(klass, assert_func): + # GH#10699 + vec = klass([Timestamp('2000-01-05 00:15:00'), + Timestamp('2000-01-31 00:23:00'), + Timestamp('2000-01-01'), + Timestamp('2000-03-31'), + Timestamp('2000-02-29'), + Timestamp('2000-12-31'), + Timestamp('2000-05-15'), + Timestamp('2001-06-15')]) + + # DateOffset relativedelta fastpath + relative_kwargs = [('years', 2), ('months', 5), ('days', 3), + ('hours', 5), ('minutes', 10), ('seconds', 2), + ('microseconds', 5)] + for i, kwd in enumerate(relative_kwargs): + op = pd.DateOffset(**dict([kwd])) + assert_func(klass([x + op for x in vec]), vec + op) + assert_func(klass([x - op for x in vec]), vec - op) + op = pd.DateOffset(**dict(relative_kwargs[:i + 1])) + assert_func(klass([x + op for x in vec]), vec + op) + assert_func(klass([x - op for x in vec]), vec - op) + + +@pytest.mark.parametrize('cls_name', [ + 'YearBegin', ('YearBegin', {'month': 5}), + 'YearEnd', ('YearEnd', {'month': 5}), + 'MonthBegin', 'MonthEnd', + 'SemiMonthEnd', 'SemiMonthBegin', + 'Week', ('Week', {'weekday': 3}), + 'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin', + 'CustomBusinessDay', 'CDay', 'CBMonthEnd', + 'CBMonthBegin', 'BMonthBegin', 'BMonthEnd', + 'BusinessHour', 'BYearBegin', 'BYearEnd', + 'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}), + ('FY5253Quarter', {'qtr_with_extra_week': 1, + 'startingMonth': 1, + 'weekday': 2, + 'variation': 'nearest'}), + ('FY5253', {'weekday': 0, 'startingMonth': 2, 'variation': 'nearest'}), + ('WeekOfMonth', {'weekday': 2, 'week': 2}), + 'Easter', ('DateOffset', {'day': 4}), + ('DateOffset', {'month': 5})]) +@pytest.mark.parametrize('normalize', [True, False]) +@pytest.mark.parametrize('klass,assert_func', [ + (Series, tm.assert_series_equal), + (DatetimeIndex, tm.assert_index_equal)]) +def test_dt64_with_DateOffsets(klass, assert_func, normalize, cls_name): + # GH#10699 + # assert these are equal on a piecewise basis + vec = klass([Timestamp('2000-01-05 00:15:00'), + Timestamp('2000-01-31 00:23:00'), + Timestamp('2000-01-01'), + Timestamp('2000-03-31'), + Timestamp('2000-02-29'), + Timestamp('2000-12-31'), + Timestamp('2000-05-15'), + Timestamp('2001-06-15')]) + + if isinstance(cls_name, tuple): + # If cls_name param is a tuple, then 2nd entry is kwargs for + # the offset constructor + cls_name, kwargs = cls_name + else: + kwargs = {} + + offset_cls = getattr(pd.offsets, cls_name) + + with warnings.catch_warnings(record=True): + for n in [0, 5]: + if (cls_name in ['WeekOfMonth', 'LastWeekOfMonth', + 'FY5253Quarter', 'FY5253'] and n == 0): + # passing n = 0 is invalid for these offset classes + continue + + offset = offset_cls(n, normalize=normalize, **kwargs) + assert_func(klass([x + offset for x in vec]), vec + offset) + assert_func(klass([x - offset for x in vec]), vec - offset) + assert_func(klass([offset + x for x in vec]), offset + vec) + + # GH 10699 @pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex], [tm.assert_series_equal, @@ -480,84 +585,3 @@ def test_datetime64_with_DateOffset(klass, assert_func): Timestamp('2000-02-29', tz='US/Central')], name='a') assert_func(result, exp) assert_func(result2, exp) - - # array of offsets - valid for Series only - if klass is Series: - with tm.assert_produces_warning(PerformanceWarning): - s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')]) - result = s + Series([pd.offsets.DateOffset(years=1), - pd.offsets.MonthEnd()]) - exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29') - ]) - assert_func(result, exp) - - # same offset - result = s + Series([pd.offsets.DateOffset(years=1), - pd.offsets.DateOffset(years=1)]) - exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')]) - assert_func(result, exp) - - s = klass([Timestamp('2000-01-05 00:15:00'), - Timestamp('2000-01-31 00:23:00'), - Timestamp('2000-01-01'), - Timestamp('2000-03-31'), - Timestamp('2000-02-29'), - Timestamp('2000-12-31'), - Timestamp('2000-05-15'), - Timestamp('2001-06-15')]) - - # DateOffset relativedelta fastpath - relative_kwargs = [('years', 2), ('months', 5), ('days', 3), - ('hours', 5), ('minutes', 10), ('seconds', 2), - ('microseconds', 5)] - for i, kwd in enumerate(relative_kwargs): - op = pd.DateOffset(**dict([kwd])) - assert_func(klass([x + op for x in s]), s + op) - assert_func(klass([x - op for x in s]), s - op) - op = pd.DateOffset(**dict(relative_kwargs[:i + 1])) - assert_func(klass([x + op for x in s]), s + op) - assert_func(klass([x - op for x in s]), s - op) - - # assert these are equal on a piecewise basis - offsets = ['YearBegin', ('YearBegin', {'month': 5}), - 'YearEnd', ('YearEnd', {'month': 5}), - 'MonthBegin', 'MonthEnd', - 'SemiMonthEnd', 'SemiMonthBegin', - 'Week', ('Week', {'weekday': 3}), - 'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin', - 'CustomBusinessDay', 'CDay', 'CBMonthEnd', - 'CBMonthBegin', 'BMonthBegin', 'BMonthEnd', - 'BusinessHour', 'BYearBegin', 'BYearEnd', - 'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}), - ('FY5253Quarter', {'qtr_with_extra_week': 1, - 'startingMonth': 1, - 'weekday': 2, - 'variation': 'nearest'}), - ('FY5253', {'weekday': 0, - 'startingMonth': 2, - 'variation': - 'nearest'}), - ('WeekOfMonth', {'weekday': 2, - 'week': 2}), - 'Easter', ('DateOffset', {'day': 4}), - ('DateOffset', {'month': 5})] - - with warnings.catch_warnings(record=True): - for normalize in (True, False): - for do in offsets: - if isinstance(do, tuple): - do, kwargs = do - else: - do = do - kwargs = {} - - for n in [0, 5]: - if (do in ['WeekOfMonth', 'LastWeekOfMonth', - 'FY5253Quarter', 'FY5253'] and n == 0): - continue - op = getattr(pd.offsets, do)(n, - normalize=normalize, - **kwargs) - assert_func(klass([x + op for x in s]), s + op) - assert_func(klass([x - op for x in s]), s - op) - assert_func(klass([op + x for x in s]), op + s) diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index dbfeb9715c59e..dda2918bf7615 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -1005,9 +1005,7 @@ def test_operators_timedelta64_with_timedelta_invalid(self, scalar_td): @pytest.mark.parametrize('scalar_td', [ timedelta(minutes=5, seconds=4), - pytest.param(Timedelta('5m4s'), - marks=pytest.mark.xfail(reason="Timedelta.__floordiv__ " - "bug GH#18846")), + Timedelta('5m4s'), Timedelta('5m4s').to_timedelta64()]) def test_timedelta_rfloordiv(self, scalar_td): # GH#18831 @@ -1381,21 +1379,23 @@ def test_datetime64_ops_nat(self): assert_series_equal(NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp) + @pytest.mark.parametrize('dt64_series', [ + Series([Timestamp('19900315'), Timestamp('19900315')]), + Series([NaT, Timestamp('19900315')]), + Series([NaT, NaT], dtype='datetime64[ns]')]) + @pytest.mark.parametrize('one', [1, 1.0, np.array(1)]) + def test_dt64_mul_div_numeric_invalid(self, one, dt64_series): # multiplication with pytest.raises(TypeError): - datetime_series * 1 - with pytest.raises(TypeError): - nat_series_dtype_timestamp * 1 + dt64_series * one with pytest.raises(TypeError): - datetime_series * 1.0 - with pytest.raises(TypeError): - nat_series_dtype_timestamp * 1.0 + one * dt64_series # division with pytest.raises(TypeError): - nat_series_dtype_timestamp / 1.0 + dt64_series / one with pytest.raises(TypeError): - nat_series_dtype_timestamp / 1 + one / dt64_series def test_dt64series_arith_overflow(self): # GH#12534, fixed by #19024 @@ -1574,6 +1574,7 @@ def test_timedelta64_conversions(self): expected = s1.apply( lambda x: Timedelta(np.timedelta64(m, unit)) / x) result = np.timedelta64(m, unit) / s1 + assert_series_equal(result, expected) # astype s = Series(date_range('20130101', periods=3)) @@ -1990,69 +1991,69 @@ def test_series_frame_radd_bug(self): with pytest.raises(TypeError): self.ts + datetime.now() - def test_series_radd_more(self): - data = [[1, 2, 3], - [1.1, 2.2, 3.3], - [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), - pd.NaT], - ['x', 'y', 1]] - - for d in data: - for dtype in [None, object]: - s = Series(d, dtype=dtype) - with pytest.raises(TypeError): - 'foo_' + s - - for dtype in [None, object]: - res = 1 + pd.Series([1, 2, 3], dtype=dtype) - exp = pd.Series([2, 3, 4], dtype=dtype) - assert_series_equal(res, exp) - res = pd.Series([1, 2, 3], dtype=dtype) + 1 - assert_series_equal(res, exp) - - res = np.nan + pd.Series([1, 2, 3], dtype=dtype) - exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype) - assert_series_equal(res, exp) - res = pd.Series([1, 2, 3], dtype=dtype) + np.nan - assert_series_equal(res, exp) - - s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'), - pd.Timedelta('3 days')], dtype=dtype) - exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'), - pd.Timedelta('6 days')]) - assert_series_equal(pd.Timedelta('3 days') + s, exp) - assert_series_equal(s + pd.Timedelta('3 days'), exp) - - s = pd.Series(['x', np.nan, 'x']) - assert_series_equal('a' + s, pd.Series(['ax', np.nan, 'ax'])) - assert_series_equal(s + 'a', pd.Series(['xa', np.nan, 'xa'])) - - def test_frame_radd_more(self): - data = [[1, 2, 3], - [1.1, 2.2, 3.3], - [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), - pd.NaT], - ['x', 'y', 1]] - - for d in data: - for dtype in [None, object]: - s = DataFrame(d, dtype=dtype) - with pytest.raises(TypeError): - 'foo_' + s - - for dtype in [None, object]: - res = 1 + pd.DataFrame([1, 2, 3], dtype=dtype) - exp = pd.DataFrame([2, 3, 4], dtype=dtype) - assert_frame_equal(res, exp) - res = pd.DataFrame([1, 2, 3], dtype=dtype) + 1 - assert_frame_equal(res, exp) - - res = np.nan + pd.DataFrame([1, 2, 3], dtype=dtype) - exp = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype) - assert_frame_equal(res, exp) - res = pd.DataFrame([1, 2, 3], dtype=dtype) + np.nan - assert_frame_equal(res, exp) - + def test_series_radd_str(self): + ser = pd.Series(['x', np.nan, 'x']) + assert_series_equal('a' + ser, pd.Series(['ax', np.nan, 'ax'])) + assert_series_equal(ser + 'a', pd.Series(['xa', np.nan, 'xa'])) + + @pytest.mark.parametrize('dtype', [None, object]) + def test_series_radd_more(self, dtype): + res = 1 + pd.Series([1, 2, 3], dtype=dtype) + exp = pd.Series([2, 3, 4], dtype=dtype) + assert_series_equal(res, exp) + res = pd.Series([1, 2, 3], dtype=dtype) + 1 + assert_series_equal(res, exp) + + res = np.nan + pd.Series([1, 2, 3], dtype=dtype) + exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype) + assert_series_equal(res, exp) + res = pd.Series([1, 2, 3], dtype=dtype) + np.nan + assert_series_equal(res, exp) + + s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'), + pd.Timedelta('3 days')], dtype=dtype) + exp = pd.Series([pd.Timedelta('4 days'), pd.Timedelta('5 days'), + pd.Timedelta('6 days')]) + assert_series_equal(pd.Timedelta('3 days') + s, exp) + assert_series_equal(s + pd.Timedelta('3 days'), exp) + + @pytest.mark.parametrize('data', [ + [1, 2, 3], + [1.1, 2.2, 3.3], + [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.NaT], + ['x', 'y', 1]]) + @pytest.mark.parametrize('dtype', [None, object]) + def test_series_radd_str_invalid(self, dtype, data): + ser = Series(data, dtype=dtype) + with pytest.raises(TypeError): + 'foo_' + ser + + @pytest.mark.parametrize('data', [ + [1, 2, 3], + [1.1, 2.2, 3.3], + [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'), pd.NaT], + ['x', 'y', 1]]) + @pytest.mark.parametrize('dtype', [None, object]) + def test_frame_radd_str_invalid(self, dtype, data): + df = DataFrame(data, dtype=dtype) + with pytest.raises(TypeError): + 'foo_' + df + + @pytest.mark.parametrize('dtype', [None, object]) + def test_frame_radd_more(self, dtype): + res = 1 + pd.DataFrame([1, 2, 3], dtype=dtype) + exp = pd.DataFrame([2, 3, 4], dtype=dtype) + assert_frame_equal(res, exp) + res = pd.DataFrame([1, 2, 3], dtype=dtype) + 1 + assert_frame_equal(res, exp) + + res = np.nan + pd.DataFrame([1, 2, 3], dtype=dtype) + exp = pd.DataFrame([np.nan, np.nan, np.nan], dtype=dtype) + assert_frame_equal(res, exp) + res = pd.DataFrame([1, 2, 3], dtype=dtype) + np.nan + assert_frame_equal(res, exp) + + def test_frame_radd_str(self): df = pd.DataFrame(['x', np.nan, 'x']) assert_frame_equal('a' + df, pd.DataFrame(['ax', np.nan, 'ax'])) assert_frame_equal(df + 'a', pd.DataFrame(['xa', np.nan, 'xa']))
Remove ASV for Timestamp.offset since that attribute has been removed. tests.indexes.datetimes.test_arithmetic has a giant offsets test that this splits up into reasonably sized pieces. - one piece of that test currently only runs if `klass is Series`, but that sub-test is now valid for DatetimeIndex too, so this PR removes that condition. - parametrize tests that currently iterate over a dictionary of inputs. - A mistaken-looking indentation in that iteration ATM prevents a bunch of cases from running. This PR fixes that. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19149
2018-01-09T05:14:39Z
2018-01-10T13:02:11Z
2018-01-10T13:02:11Z
2018-01-23T04:40:34Z
CLN: ASV timeseries
diff --git a/asv_bench/benchmarks/timeseries.py b/asv_bench/benchmarks/timeseries.py index fe282df25e9c5..ea2f077f980d0 100644 --- a/asv_bench/benchmarks/timeseries.py +++ b/asv_bench/benchmarks/timeseries.py @@ -1,358 +1,330 @@ +from datetime import timedelta + +import numpy as np +from pandas import to_datetime, date_range, Series, DataFrame, period_range +from pandas.tseries.frequencies import infer_freq try: from pandas.plotting._converter import DatetimeConverter except ImportError: from pandas.tseries.converter import DatetimeConverter -import pandas as pd -from pandas import to_datetime, date_range, Series, DataFrame, period_range - -import datetime as dt -from pandas.tseries.frequencies import infer_freq -import numpy as np - -if hasattr(Series, 'convert'): - Series.resample = Series.convert +from .pandas_vb_common import setup # noqa class DatetimeIndex(object): + goal_time = 0.2 + params = ['dst', 'repeated', 'tz_aware', 'tz_naive'] + param_names = ['index_type'] - def setup(self): - self.N = 100000 - self.rng = date_range(start='1/1/2000', periods=self.N, freq='T') + def setup(self, index_type): + N = 100000 + dtidxes = {'dst': date_range(start='10/29/2000 1:00:00', + end='10/29/2000 1:59:59', freq='S'), + 'repeated': date_range(start='2000', + periods=N / 10, + freq='s').repeat(10), + 'tz_aware': date_range(start='2000', + periods=N, + freq='s', + tz='US/Eastern'), + 'tz_naive': date_range(start='2000', + periods=N, + freq='s')} + self.index = dtidxes[index_type] - self.rng2 = date_range(start='1/1/2000 9:30', periods=10000, - freq='S', tz='US/Eastern') + def time_add_timedelta(self, index_type): + self.index + timedelta(minutes=2) - self.index_repeated = date_range(start='1/1/2000', - periods=1000, freq='T').repeat(10) + def time_normalize(self, index_type): + self.index.normalize() - self.rng3 = date_range(start='1/1/2000', periods=1000, freq='H') - self.df = DataFrame(np.random.randn(len(self.rng3), 2), self.rng3) + def time_unique(self, index_type): + self.index.unique() - self.rng4 = date_range(start='1/1/2000', periods=1000, - freq='H', tz='US/Eastern') - self.df2 = DataFrame(np.random.randn(len(self.rng4), 2), - index=self.rng4) + def time_to_time(self, index_type): + self.index.time - N = 100000 - self.dti = pd.date_range('2011-01-01', freq='H', periods=N).repeat(5) - self.dti_tz = pd.date_range('2011-01-01', freq='H', periods=N, - tz='Asia/Tokyo').repeat(5) + def time_get(self, index_type): + self.index[0] - self.rng5 = date_range(start='1/1/2000', - end='3/1/2000', tz='US/Eastern') + def time_timeseries_is_month_start(self, index_type): + self.index.is_month_start - self.dst_rng = date_range(start='10/29/2000 1:00:00', - end='10/29/2000 1:59:59', freq='S') + def time_to_date(self, index_type): + self.index.date + + def time_to_pydatetime(self, index_type): + self.index.to_pydatetime() + + +class TzLocalize(object): + + goal_time = 0.2 + + def setup(self): + dst_rng = date_range(start='10/29/2000 1:00:00', + end='10/29/2000 1:59:59', freq='S') self.index = date_range(start='10/29/2000', end='10/29/2000 00:59:59', freq='S') - self.index = self.index.append(self.dst_rng) - self.index = self.index.append(self.dst_rng) + self.index = self.index.append(dst_rng) + self.index = self.index.append(dst_rng) self.index = self.index.append(date_range(start='10/29/2000 2:00:00', end='10/29/2000 3:00:00', freq='S')) - self.N = 10000 - self.rng6 = date_range(start='1/1/1', periods=self.N, freq='B') - - self.rng7 = date_range(start='1/1/1700', freq='D', periods=100000) - self.no_freq = self.rng7[:50000].append(self.rng7[50002:]) - self.d_freq = self.rng7[:50000].append(self.rng7[50000:]) + def time_infer_dst(self): + self.index.tz_localize('US/Eastern', infer_dst=True) - self.rng8 = date_range(start='1/1/1700', freq='B', periods=75000) - self.b_freq = self.rng8[:50000].append(self.rng8[50000:]) - def time_add_timedelta(self): - (self.rng + dt.timedelta(minutes=2)) +class ResetIndex(object): - def time_normalize(self): - self.rng2.normalize() + goal_time = 0.2 + params = [None, 'US/Eastern'] + param_names = 'tz' - def time_unique(self): - self.index_repeated.unique() + def setup(self, tz): + idx = date_range(start='1/1/2000', periods=1000, freq='H', tz=tz) + self.df = DataFrame(np.random.randn(1000, 2), index=idx) - def time_reset_index(self): + def time_reest_datetimeindex(self, tz): self.df.reset_index() - def time_reset_index_tz(self): - self.df2.reset_index() - - def time_dti_factorize(self): - self.dti.factorize() - def time_dti_tz_factorize(self): - self.dti_tz.factorize() +class Factorize(object): - def time_dti_time(self): - self.dst_rng.time - - def time_timestamp_tzinfo_cons(self): - self.rng5[0] + goal_time = 0.2 + params = [None, 'Asia/Tokyo'] + param_names = 'tz' - def time_infer_dst(self): - self.index.tz_localize('US/Eastern', infer_dst=True) + def setup(self, tz): + N = 100000 + self.dti = date_range('2011-01-01', freq='H', periods=N, tz=tz) + self.dti = self.dti.repeat(5) - def time_timeseries_is_month_start(self): - self.rng6.is_month_start + def time_factorize(self, tz): + self.dti.factorize() - def time_infer_freq_none(self): - infer_freq(self.no_freq) - def time_infer_freq_daily(self): - infer_freq(self.d_freq) +class InferFreq(object): - def time_infer_freq_business(self): - infer_freq(self.b_freq) + goal_time = 0.2 + params = [None, 'D', 'B'] + param_names = ['freq'] - def time_to_date(self): - self.rng.date + def setup(self, freq): + if freq is None: + self.idx = date_range(start='1/1/1700', freq='D', periods=10000) + self.idx.freq = None + else: + self.idx = date_range(start='1/1/1700', freq=freq, periods=10000) - def time_to_pydatetime(self): - self.rng.to_pydatetime() + def time_infer_freq(self, freq): + infer_freq(self.idx) class TimeDatetimeConverter(object): + goal_time = 0.2 def setup(self): - self.N = 100000 - self.rng = date_range(start='1/1/2000', periods=self.N, freq='T') + N = 100000 + self.rng = date_range(start='1/1/2000', periods=N, freq='T') def time_convert(self): DatetimeConverter.convert(self.rng, None, None) class Iteration(object): - goal_time = 0.2 - - def setup(self): - self.N = 1000000 - self.M = 10000 - self.idx1 = date_range(start='20140101', freq='T', periods=self.N) - self.idx2 = period_range(start='20140101', freq='T', periods=self.N) - - def iter_n(self, iterable, n=None): - self.i = 0 - for _ in iterable: - self.i += 1 - if ((n is not None) and (self.i > n)): - break - def time_iter_datetimeindex(self): - self.iter_n(self.idx1) - - def time_iter_datetimeindex_preexit(self): - self.iter_n(self.idx1, self.M) + goal_time = 0.2 + params = [date_range, period_range] + param_names = ['time_index'] - def time_iter_periodindex(self): - self.iter_n(self.idx2) + def setup(self, time_index): + N = 10**6 + self.idx = time_index(start='20140101', freq='T', periods=N) + self.exit = 10000 - def time_iter_periodindex_preexit(self): - self.iter_n(self.idx2, self.M) + def time_iter(self, time_index): + for _ in self.idx: + pass + def time_iter_preexit(self, time_index): + for i, _ in enumerate(self.idx): + if i > self.exit: + break -# ---------------------------------------------------------------------- -# Resampling class ResampleDataFrame(object): - goal_time = 0.2 - - def setup(self): - self.rng = date_range(start='20130101', periods=100000, freq='50L') - self.df = DataFrame(np.random.randn(100000, 2), index=self.rng) - def time_max_numpy(self): - self.df.resample('1s', how=np.max) - - def time_max_string(self): - self.df.resample('1s', how='max') - - def time_mean_numpy(self): - self.df.resample('1s', how=np.mean) - - def time_mean_string(self): - self.df.resample('1s', how='mean') + goal_time = 0.2 + params = ['max', 'mean', 'min'] + param_names = ['method'] - def time_min_numpy(self): - self.df.resample('1s', how=np.min) + def setup(self, method): + rng = date_range(start='20130101', periods=100000, freq='50L') + df = DataFrame(np.random.randn(100000, 2), index=rng) + self.resample = getattr(df.resample('1s'), method) - def time_min_string(self): - self.df.resample('1s', how='min') + def time_method(self, method): + self.resample() class ResampleSeries(object): + + goal_time = 0.2 + params = (['period', 'datetime'], ['5min', '1D'], ['mean', 'ohlc']) + param_names = ['index', 'freq', 'method'] + + def setup(self, index, freq, method): + indexes = {'period': period_range(start='1/1/2000', + end='1/1/2001', + freq='T'), + 'datetime': date_range(start='1/1/2000', + end='1/1/2001', + freq='T')} + idx = indexes[index] + ts = Series(np.random.randn(len(idx)), index=idx) + self.resample = getattr(ts.resample(freq), method) + + def time_resample(self, index, freq, method): + self.resample() + + +class ResampleDatetetime64(object): + # GH 7754 goal_time = 0.2 def setup(self): - self.rng1 = period_range(start='1/1/2000', end='1/1/2001', freq='T') - self.ts1 = Series(np.random.randn(len(self.rng1)), index=self.rng1) + rng3 = date_range(start='2000-01-01 00:00:00', + end='2000-01-01 10:00:00', freq='555000U') + self.dt_ts = Series(5, rng3, dtype='datetime64[ns]') - self.rng2 = date_range(start='1/1/2000', end='1/1/2001', freq='T') - self.ts2 = Series(np.random.randn(len(self.rng2)), index=self.rng2) - - self.rng3 = date_range(start='2000-01-01 00:00:00', - end='2000-01-01 10:00:00', freq='555000U') - self.int_ts = Series(5, self.rng3, dtype='int64') - self.dt_ts = self.int_ts.astype('datetime64[ns]') - - def time_period_downsample_mean(self): - self.ts1.resample('D', how='mean') - - def time_timestamp_downsample_mean(self): - self.ts2.resample('D', how='mean') - - def time_resample_datetime64(self): - # GH 7754 - self.dt_ts.resample('1S', how='last') - - def time_1min_5min_mean(self): - self.ts2[:10000].resample('5min', how='mean') - - def time_1min_5min_ohlc(self): - self.ts2[:10000].resample('5min', how='ohlc') + def time_resample(self): + self.dt_ts.resample('1S').last() class AsOf(object): - goal_time = 0.2 - def setup(self): - self.N = 10000 - self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s') - self.ts = Series(np.random.randn(self.N), index=self.rng) - self.dates = date_range(start='1/1/1990', - periods=(self.N * 10), freq='5s') + goal_time = 0.2 + params = ['DataFrame', 'Series'] + param_names = ['constructor'] + + def setup(self, constructor): + N = 10000 + M = 10 + rng = date_range(start='1/1/1990', periods=N, freq='53s') + data = {'DataFrame': DataFrame(np.random.randn(N, M)), + 'Series': Series(np.random.randn(N))} + self.ts = data[constructor] + self.ts.index = rng self.ts2 = self.ts.copy() - self.ts2[250:5000] = np.nan + self.ts2.iloc[250:5000] = np.nan self.ts3 = self.ts.copy() - self.ts3[-5000:] = np.nan + self.ts3.iloc[-5000:] = np.nan + self.dates = date_range(start='1/1/1990', periods=N * 10, freq='5s') + self.date = self.dates[0] + self.date_last = self.dates[-1] + self.date_early = self.date - timedelta(10) # test speed of pre-computing NAs. - def time_asof(self): + def time_asof(self, constructor): self.ts.asof(self.dates) # should be roughly the same as above. - def time_asof_nan(self): + def time_asof_nan(self, constructor): self.ts2.asof(self.dates) # test speed of the code path for a scalar index # without *while* loop - def time_asof_single(self): - self.ts.asof(self.dates[0]) + def time_asof_single(self, constructor): + self.ts.asof(self.date) # test speed of the code path for a scalar index # before the start. should be the same as above. - def time_asof_single_early(self): - self.ts.asof(self.dates[0] - dt.timedelta(10)) + def time_asof_single_early(self, constructor): + self.ts.asof(self.date_early) # test the speed of the code path for a scalar index # with a long *while* loop. should still be much # faster than pre-computing all the NAs. - def time_asof_nan_single(self): - self.ts3.asof(self.dates[-1]) + def time_asof_nan_single(self, constructor): + self.ts3.asof(self.date_last) -class AsOfDataFrame(object): - goal_time = 0.2 +class SortIndex(object): - def setup(self): - self.N = 10000 - self.M = 100 - self.rng = date_range(start='1/1/1990', periods=self.N, freq='53s') - self.dates = date_range(start='1/1/1990', - periods=(self.N * 10), freq='5s') - self.ts = DataFrame(np.random.randn(self.N, self.M), index=self.rng) - self.ts2 = self.ts.copy() - self.ts2.iloc[250:5000] = np.nan - self.ts3 = self.ts.copy() - self.ts3.iloc[-5000:] = np.nan - - # test speed of pre-computing NAs. - def time_asof(self): - self.ts.asof(self.dates) + goal_time = 0.2 + params = [True, False] + param_names = ['monotonic'] - # should be roughly the same as above. - def time_asof_nan(self): - self.ts2.asof(self.dates) + def setup(self, monotonic): + N = 10**5 + idx = date_range(start='1/1/2000', periods=N, freq='s') + self.s = Series(np.random.randn(N), index=idx) + if not monotonic: + self.s = self.s.sample(frac=1) - # test speed of the code path for a scalar index - # with pre-computing all NAs. - def time_asof_single(self): - self.ts.asof(self.dates[0]) + def time_sort_index(self, monotonic): + self.s.sort_index() - # should be roughly the same as above. - def time_asof_nan_single(self): - self.ts3.asof(self.dates[-1]) + def time_get_slice(self, monotonic): + self.s[:10000] - # test speed of the code path for a scalar index - # before the start. should be without the cost of - # pre-computing all the NAs. - def time_asof_single_early(self): - self.ts.asof(self.dates[0] - dt.timedelta(10)) +class IrregularOps(object): -class TimeSeries(object): goal_time = 0.2 def setup(self): - self.N = 100000 - self.rng = date_range(start='1/1/2000', periods=self.N, freq='s') - self.rng = self.rng.take(np.random.permutation(self.N)) - self.ts = Series(np.random.randn(self.N), index=self.rng) - - self.rng2 = date_range(start='1/1/2000', periods=self.N, freq='T') - self.ts2 = Series(np.random.randn(self.N), index=self.rng2) + N = 10**5 + idx = date_range(start='1/1/2000', periods=N, freq='s') + s = Series(np.random.randn(N), index=idx) + self.left = s.sample(frac=1) + self.right = s.sample(frac=1) - self.lindex = np.random.permutation(self.N)[:(self.N // 2)] - self.rindex = np.random.permutation(self.N)[:(self.N // 2)] - self.left = Series(self.ts2.values.take(self.lindex), - index=self.ts2.index.take(self.lindex)) - self.right = Series(self.ts2.values.take(self.rindex), - index=self.ts2.index.take(self.rindex)) + def time_add(self): + self.left + self.right - self.rng3 = date_range(start='1/1/2000', periods=1500000, freq='S') - self.ts3 = Series(1, index=self.rng3) - def time_sort_index_monotonic(self): - self.ts2.sort_index() +class Lookup(object): - def time_sort_index_non_monotonic(self): - self.ts.sort_index() + goal_time = 0.2 - def time_timeseries_slice_minutely(self): - self.ts2[:10000] + def setup(self): + N = 1500000 + rng = date_range(start='1/1/2000', periods=N, freq='S') + self.ts = Series(1, index=rng) + self.lookup_val = rng[N // 2] - def time_add_irregular(self): - (self.left + self.right) + def time_lookup_and_cleanup(self): + self.ts[self.lookup_val] + self.ts.index._cleanup() - def time_large_lookup_value(self): - self.ts3[self.ts3.index[(len(self.ts3) // 2)]] - self.ts3.index._cleanup() +class ToDatetimeYYYYMMDD(object): -class ToDatetime(object): goal_time = 0.2 def setup(self): - self.rng = date_range(start='1/1/2000', periods=10000, freq='D') - self.stringsD = Series(self.rng.strftime('%Y%m%d')) + rng = date_range(start='1/1/2000', periods=10000, freq='D') + self.stringsD = Series(rng.strftime('%Y%m%d')) - self.rng = date_range(start='1/1/2000', periods=20000, freq='H') - self.strings = self.rng.strftime('%Y-%m-%d %H:%M:%S').tolist() - self.strings_nosep = self.rng.strftime('%Y%m%d %H:%M:%S').tolist() - self.strings_tz_space = [x.strftime('%Y-%m-%d %H:%M:%S') + ' -0800' - for x in self.rng] + def time_format_YYYYMMDD(self): + to_datetime(self.stringsD, format='%Y%m%d') - self.s = Series((['19MAY11', '19MAY11:00:00:00'] * 100000)) - self.s2 = self.s.str.replace(':\\S+$', '') - self.unique_numeric_seconds = range(10000) - self.dup_numeric_seconds = [1000] * 10000 - self.dup_string_dates = ['2000-02-11'] * 10000 - self.dup_string_with_tz = ['2000-02-11 15:00:00-0800'] * 10000 +class ToDatetimeISO8601(object): - def time_format_YYYYMMDD(self): - to_datetime(self.stringsD, format='%Y%m%d') + goal_time = 0.2 + + def setup(self): + rng = date_range(start='1/1/2000', periods=20000, freq='H') + self.strings = rng.strftime('%Y-%m-%d %H:%M:%S').tolist() + self.strings_nosep = rng.strftime('%Y%m%d %H:%M:%S').tolist() + self.strings_tz_space = [x.strftime('%Y-%m-%d %H:%M:%S') + ' -0800' + for x in rng] def time_iso8601(self): to_datetime(self.strings) @@ -369,49 +341,56 @@ def time_iso8601_format_no_sep(self): def time_iso8601_tz_spaceformat(self): to_datetime(self.strings_tz_space) - def time_format_exact(self): + +class ToDatetimeFormat(object): + + goal_time = 0.2 + + def setup(self): + self.s = Series(['19MAY11', '19MAY11:00:00:00'] * 100000) + self.s2 = self.s.str.replace(':\\S+$', '') + + def time_exact(self): to_datetime(self.s2, format='%d%b%y') - def time_format_no_exact(self): + def time_no_exact(self): to_datetime(self.s, format='%d%b%y', exact=False) - def time_cache_true_with_unique_seconds_and_unit(self): - to_datetime(self.unique_numeric_seconds, unit='s', cache=True) - - def time_cache_false_with_unique_seconds_and_unit(self): - to_datetime(self.unique_numeric_seconds, unit='s', cache=False) - def time_cache_true_with_dup_seconds_and_unit(self): - to_datetime(self.dup_numeric_seconds, unit='s', cache=True) +class ToDatetimeCache(object): - def time_cache_false_with_dup_seconds_and_unit(self): - to_datetime(self.dup_numeric_seconds, unit='s', cache=False) + goal_time = 0.2 + params = [True, False] + param_names = ['cache'] - def time_cache_true_with_dup_string_dates(self): - to_datetime(self.dup_string_dates, cache=True) + def setup(self, cache): + N = 10000 + self.unique_numeric_seconds = range(N) + self.dup_numeric_seconds = [1000] * N + self.dup_string_dates = ['2000-02-11'] * N + self.dup_string_with_tz = ['2000-02-11 15:00:00-0800'] * N - def time_cache_false_with_dup_string_dates(self): - to_datetime(self.dup_string_dates, cache=False) + def time_unique_seconds_and_unit(self, cache): + to_datetime(self.unique_numeric_seconds, unit='s', cache=cache) - def time_cache_true_with_dup_string_dates_and_format(self): - to_datetime(self.dup_string_dates, format='%Y-%m-%d', cache=True) + def time_dup_seconds_and_unit(self, cache): + to_datetime(self.dup_numeric_seconds, unit='s', cache=cache) - def time_cache_false_with_dup_string_dates_and_format(self): - to_datetime(self.dup_string_dates, format='%Y-%m-%d', cache=False) + def time_dup_string_dates(self, cache): + to_datetime(self.dup_string_dates, cache=cache) - def time_cache_true_with_dup_string_tzoffset_dates(self): - to_datetime(self.dup_string_with_tz, cache=True) + def time_dup_string_dates_and_format(self, cache): + to_datetime(self.dup_string_dates, format='%Y-%m-%d', cache=cache) - def time_cache_false_with_dup_string_tzoffset_dates(self): - to_datetime(self.dup_string_with_tz, cache=False) + def time_dup_string_tzoffset_dates(self, cache): + to_datetime(self.dup_string_with_tz, cache=cache) class DatetimeAccessor(object): + def setup(self): - self.N = 100000 - self.series = pd.Series( - pd.date_range(start='1/1/2000', periods=self.N, freq='T') - ) + N = 100000 + self.series = Series(date_range(start='1/1/2000', periods=N, freq='T')) def time_dt_accessor(self): self.series.dt
- [x] closes #8144 (benchmarks now have random seed where appropriate) Split up the some benchmarks and utilized `param`s where available. I think `to_datetime(...,cache=True)` is tagged for v0.23 so that's probably why the `ToDatetimeCache` benchmarks don't work yet? ``` [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 2.38%] ··· Running timeseries.AsOf.time_asof ok [ 2.38%] ···· ============= ======== constructor ------------- -------- DataFrame 25.9ms Series 13.8ms ============= ======== [ 4.76%] ··· Running timeseries.AsOf.time_asof_nan ok [ 4.76%] ···· ============= ======== constructor ------------- -------- DataFrame 27.8ms Series 13.4ms ============= ======== [ 7.14%] ··· Running timeseries.AsOf.time_asof_nan_single ok [ 7.14%] ···· ============= ======== constructor ------------- -------- DataFrame 7.96ms Series 6.70ms ============= ======== [ 9.52%] ··· Running timeseries.AsOf.time_asof_single ok [ 9.52%] ···· ============= ======== constructor ------------- -------- DataFrame 7.98ms Series 225μs ============= ======== [ 11.90%] ··· Running timeseries.AsOf.time_asof_single_early ok [ 11.90%] ···· ============= ======= constructor ------------- ------- DataFrame 406μs Series 168μs ============= ======= [ 14.29%] ··· Running timeseries.DatetimeAccessor.time_dt_accessor 141μs [ 16.67%] ··· Running ...DatetimeAccessor.time_dt_accessor_normalize 15.5ms [ 19.05%] ··· Running timeseries.DatetimeIndex.time_add_timedelta ok [ 19.05%] ···· ============ ======== index_type ------------ -------- dst 1.09ms repeated 6.01ms tz_aware 5.13ms tz_naive 5.09ms ============ ======== [ 21.43%] ··· Running timeseries.DatetimeIndex.time_get ok [ 21.43%] ···· ============ ======== index_type ------------ -------- dst 66.7μs repeated 58.7μs tz_aware 91.8μs tz_naive 65.2μs ============ ======== [ 23.81%] ··· Running timeseries.DatetimeIndex.time_normalize ok [ 23.81%] ···· ============ ======== index_type ------------ -------- dst 743μs repeated 11.7ms tz_aware 52.3ms tz_naive 11.6ms ============ ======== [ 26.19%] ··· Running ...atetimeIndex.time_timeseries_is_month_start ok [ 26.19%] ···· ============ ======== index_type ------------ -------- dst 364μs repeated 6.08ms tz_aware 11.4ms tz_naive 6.24ms ============ ======== [ 28.57%] ··· Running timeseries.DatetimeIndex.time_to_date ok [ 28.57%] ···· ============ ======== index_type ------------ -------- dst 18.1ms repeated 498ms tz_aware 1.98s tz_naive 503ms ============ ======== [ 30.95%] ··· Running timeseries.DatetimeIndex.time_to_pydatetime ok [ 30.95%] ···· ============ ======== index_type ------------ -------- dst 2.41ms repeated 64.7ms tz_aware 602ms tz_naive 67.2ms ============ ======== [ 33.33%] ··· Running timeseries.DatetimeIndex.time_to_time ok [ 33.33%] ···· ============ ======== index_type ------------ -------- dst 19.0ms repeated 506ms tz_aware 2.00s tz_naive 509ms ============ ======== [ 35.71%] ··· Running timeseries.DatetimeIndex.time_unique ok [ 35.71%] ···· ============ ======== index_type ------------ -------- dst 592μs repeated 2.21ms tz_aware 7.59ms tz_naive 8.03ms ============ ======== [ 38.10%] ··· Running timeseries.Factorize.time_factorize ok [ 38.10%] ···· ============ ======== t ------------ -------- None 27.6ms Asia/Tokyo 28.8ms ============ ======== [ 40.48%] ··· Running timeseries.InferFreq.time_infer_freq ok [ 40.48%] ···· ====== ======== freq ------ -------- None 1.84ms D 1.84ms B 2.81ms ====== ======== [ 42.86%] ··· Running timeseries.IrregularOps.time_add 477ms [ 45.24%] ··· Running timeseries.Iteration.time_iter ok [ 45.24%] ···· =========================================== ======= time_index ------------------------------------------- ------- <function date_range at 0x7f428622a050> 1.06s <function period_range at 0x7f4285d5c2a8> 5.69s =========================================== ======= [ 47.62%] ··· Running timeseries.Iteration.time_iter_preexit ok [ 47.62%] ···· =========================================== ======== time_index ------------------------------------------- -------- <function date_range at 0x7f428622a050> 23.4ms <function period_range at 0x7f4285d5c2a8> 61.1ms =========================================== ======== [ 50.00%] ··· Running timeseries.Lookup.time_lookup_and_cleanup 6.01ms [ 52.38%] ··· Running timeseries.ResampleDataFrame.time_method ok [ 52.38%] ···· ======== ======== method -------- -------- max 6.67ms mean 5.88ms min 6.60ms ======== ======== [ 54.76%] ··· Running timeseries.ResampleDatetetime64.time_resample 7.28ms [ 57.14%] ··· Running timeseries.ResampleSeries.time_resample ok [ 57.14%] ···· ========== ====== ======== ======== -- method ----------------- ----------------- index freq mean ohlc ========== ====== ======== ======== period 5min 31.0ms 34.2ms period 1D 25.2ms 26.4ms datetime 5min 18.0ms 21.6ms datetime 1D 12.2ms 13.5ms ========== ====== ======== ======== [ 59.52%] ··· Running timeseries.ResetIndex.time_reest_datetimeindex ok [ 59.52%] ···· ============ ======== t ------------ -------- None 1.15ms US/Eastern 1.34ms ============ ======== [ 61.90%] ··· Running timeseries.SortIndex.time_get_slice ok [ 61.90%] ···· =========== ======= monotonic ----------- ------- True 322μs False 283μs =========== ======= [ 64.29%] ··· Running timeseries.SortIndex.time_sort_index ok [ 64.29%] ···· =========== ======== monotonic ----------- -------- True 1.72ms False 17.8ms =========== ======== [ 66.67%] ··· Running timeseries.TimeDatetimeConverter.time_convert 12.5ms [ 69.05%] ··· Running ...s.ToDatetimeCache.time_dup_seconds_and_unit failed [ 69.05%] ···· ======= ======== cache ------- -------- True failed False failed ======= ======== [ 69.05%] ····· For parameters: True Traceback (most recent call last): File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 818, in <module> commands[mode](args) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 795, in main_run result = benchmark.do_run() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 349, in do_run return self.run(*self._current_params) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 424, in run samples, number = self.benchmark_timing(timer, repeat, warmup_time, number=number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 471, in benchmark_timing timing = timer.timeit(number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 202, in timeit timing = self.inner(it, self.timer) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 100, in inner _func() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 415, in <lambda> func = lambda: self.func(*param) File "/home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timeseries.py", line 377, in time_dup_seconds_and_unit to_datetime(self.dup_numeric_seconds, unit='s', cache=cache) TypeError: to_datetime() got an unexpected keyword argument 'cache' For parameters: False Traceback (most recent call last): File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 818, in <module> commands[mode](args) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 795, in main_run result = benchmark.do_run() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 349, in do_run return self.run(*self._current_params) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 424, in run samples, number = self.benchmark_timing(timer, repeat, warmup_time, number=number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 471, in benchmark_timing timing = timer.timeit(number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 202, in timeit timing = self.inner(it, self.timer) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 100, in inner _func() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 415, in <lambda> func = lambda: self.func(*param) File "/home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timeseries.py", line 377, in time_dup_seconds_and_unit to_datetime(self.dup_numeric_seconds, unit='s', cache=cache) TypeError: to_datetime() got an unexpected keyword argument 'cache' [ 71.43%] ··· Running ...eries.ToDatetimeCache.time_dup_string_dates failed [ 71.43%] ···· ======= ======== cache ------- -------- True failed False failed ======= ======== [ 71.43%] ····· For parameters: True Traceback (most recent call last): File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 818, in <module> commands[mode](args) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 795, in main_run result = benchmark.do_run() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 349, in do_run return self.run(*self._current_params) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 424, in run samples, number = self.benchmark_timing(timer, repeat, warmup_time, number=number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 471, in benchmark_timing timing = timer.timeit(number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 202, in timeit timing = self.inner(it, self.timer) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 100, in inner _func() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 415, in <lambda> func = lambda: self.func(*param) File "/home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timeseries.py", line 380, in time_dup_string_dates to_datetime(self.dup_string_dates, cache=cache) TypeError: to_datetime() got an unexpected keyword argument 'cache' For parameters: False Traceback (most recent call last): File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 818, in <module> commands[mode](args) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 795, in main_run result = benchmark.do_run() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 349, in do_run return self.run(*self._current_params) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 424, in run samples, number = self.benchmark_timing(timer, repeat, warmup_time, number=number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 471, in benchmark_timing timing = timer.timeit(number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 202, in timeit timing = self.inner(it, self.timer) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 100, in inner _func() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 415, in <lambda> func = lambda: self.func(*param) File "/home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timeseries.py", line 380, in time_dup_string_dates to_datetime(self.dup_string_dates, cache=cache) TypeError: to_datetime() got an unexpected keyword argument 'cache' [ 73.81%] ··· Running ...etimeCache.time_dup_string_dates_and_format failed [ 73.81%] ···· ======= ======== cache ------- -------- True failed False failed ======= ======== [ 73.81%] ····· For parameters: True Traceback (most recent call last): File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 818, in <module> commands[mode](args) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 795, in main_run result = benchmark.do_run() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 349, in do_run return self.run(*self._current_params) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 424, in run samples, number = self.benchmark_timing(timer, repeat, warmup_time, number=number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 471, in benchmark_timing timing = timer.timeit(number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 202, in timeit timing = self.inner(it, self.timer) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 100, in inner _func() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 415, in <lambda> func = lambda: self.func(*param) File "/home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timeseries.py", line 383, in time_dup_string_dates_and_format to_datetime(self.dup_string_dates, format='%Y-%m-%d', cache=cache) TypeError: to_datetime() got an unexpected keyword argument 'cache' For parameters: False Traceback (most recent call last): File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 818, in <module> commands[mode](args) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 795, in main_run result = benchmark.do_run() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 349, in do_run return self.run(*self._current_params) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 424, in run samples, number = self.benchmark_timing(timer, repeat, warmup_time, number=number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 471, in benchmark_timing timing = timer.timeit(number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 202, in timeit timing = self.inner(it, self.timer) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 100, in inner _func() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 415, in <lambda> func = lambda: self.func(*param) File "/home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timeseries.py", line 383, in time_dup_string_dates_and_format to_datetime(self.dup_string_dates, format='%Y-%m-%d', cache=cache) TypeError: to_datetime() got an unexpected keyword argument 'cache' [ 76.19%] ··· Running ...atetimeCache.time_dup_string_tzoffset_dates failed [ 76.19%] ···· ======= ======== cache ------- -------- True failed False failed ======= ======== [ 76.19%] ····· For parameters: True Traceback (most recent call last): File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 818, in <module> commands[mode](args) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 795, in main_run result = benchmark.do_run() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 349, in do_run return self.run(*self._current_params) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 424, in run samples, number = self.benchmark_timing(timer, repeat, warmup_time, number=number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 471, in benchmark_timing timing = timer.timeit(number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 202, in timeit timing = self.inner(it, self.timer) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 100, in inner _func() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 415, in <lambda> func = lambda: self.func(*param) File "/home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timeseries.py", line 386, in time_dup_string_tzoffset_dates to_datetime(self.dup_string_with_tz, cache=cache) TypeError: to_datetime() got an unexpected keyword argument 'cache' For parameters: False Traceback (most recent call last): File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 818, in <module> commands[mode](args) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 795, in main_run result = benchmark.do_run() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 349, in do_run return self.run(*self._current_params) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 424, in run samples, number = self.benchmark_timing(timer, repeat, warmup_time, number=number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 471, in benchmark_timing timing = timer.timeit(number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 202, in timeit timing = self.inner(it, self.timer) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 100, in inner _func() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 415, in <lambda> func = lambda: self.func(*param) File "/home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timeseries.py", line 386, in time_dup_string_tzoffset_dates to_datetime(self.dup_string_with_tz, cache=cache) TypeError: to_datetime() got an unexpected keyword argument 'cache' [ 78.57%] ··· Running ...oDatetimeCache.time_unique_seconds_and_unit failed [ 78.57%] ···· ======= ======== cache ------- -------- True failed False failed ======= ======== [ 78.57%] ····· For parameters: True Traceback (most recent call last): File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 818, in <module> commands[mode](args) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 795, in main_run result = benchmark.do_run() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 349, in do_run return self.run(*self._current_params) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 424, in run samples, number = self.benchmark_timing(timer, repeat, warmup_time, number=number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 471, in benchmark_timing timing = timer.timeit(number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 202, in timeit timing = self.inner(it, self.timer) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 100, in inner _func() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 415, in <lambda> func = lambda: self.func(*param) File "/home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timeseries.py", line 374, in time_unique_seconds_and_unit to_datetime(self.unique_numeric_seconds, unit='s', cache=cache) TypeError: to_datetime() got an unexpected keyword argument 'cache' For parameters: False Traceback (most recent call last): File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 818, in <module> commands[mode](args) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 795, in main_run result = benchmark.do_run() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 349, in do_run return self.run(*self._current_params) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 424, in run samples, number = self.benchmark_timing(timer, repeat, warmup_time, number=number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 471, in benchmark_timing timing = timer.timeit(number) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 202, in timeit timing = self.inner(it, self.timer) File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/timeit.py", line 100, in inner _func() File "/home/matt/anaconda/envs/pandas_dev/lib/python2.7/site-packages/asv/benchmark.py", line 415, in <lambda> func = lambda: self.func(*param) File "/home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timeseries.py", line 374, in time_unique_seconds_and_unit to_datetime(self.unique_numeric_seconds, unit='s', cache=cache) TypeError: to_datetime() got an unexpected keyword argument 'cache' [ 80.95%] ··· Running timeseries.ToDatetimeFormat.time_exact 2.02s [ 83.33%] ··· Running timeseries.ToDatetimeFormat.time_no_exact 1.93s [ 85.71%] ··· Running timeseries.ToDatetimeISO8601.time_iso8601 9.93ms [ 88.10%] ··· Running ...eries.ToDatetimeISO8601.time_iso8601_format 10.2ms [ 90.48%] ··· Running ...oDatetimeISO8601.time_iso8601_format_no_sep 9.67ms [ 92.86%] ··· Running ...series.ToDatetimeISO8601.time_iso8601_nosep 9.84ms [ 95.24%] ··· Running ...DatetimeISO8601.time_iso8601_tz_spaceformat 619ms [ 97.62%] ··· Running ...ies.ToDatetimeYYYYMMDD.time_format_YYYYMMDD 15.1ms [100.00%] ··· Running timeseries.TzLocalize.time_infer_dst 4.84ms [100.00%] ····· /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/timeseries.py:77: FutureWarning: the infer_dst=True keyword is deprecated, use ambiguous='infer' instead self.index.tz_localize('US/Eastern', infer_dst=True) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19148
2018-01-09T04:55:16Z
2018-01-09T12:53:43Z
2018-01-09T12:53:43Z
2018-01-09T17:45:10Z
Fix incorrect exception raised by Series[datetime64] + int
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index e40318bd65db2..880f9b73d6bc8 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -309,6 +309,7 @@ Other API Changes - ``IntervalDtype`` now returns ``True`` when compared against ``'interval'`` regardless of subtype, and ``IntervalDtype.name`` now returns ``'interval'`` regardless of subtype (:issue:`18980`) - :func:`Series.to_csv` now accepts a ``compression`` argument that works in the same way as the ``compression`` argument in :func:`DataFrame.to_csv` (:issue:`18958`) - Addition or subtraction of ``NaT`` from :class:`TimedeltaIndex` will return ``TimedeltaIndex`` instead of ``DatetimeIndex`` (:issue:`19124`) +- :func:`DatetimeIndex.shift` and :func:`TimedeltaIndex.shift` will now raise ``NullFrequencyError`` (which subclasses ``ValueError``, which was raised in older versions) when the index object frequency is ``None`` (:issue:`19147`) .. _whatsnew_0230.deprecations: diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index 0ee2f8ebce011..7bb6708e03421 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -32,6 +32,7 @@ from pandas.core import common as com, algorithms from pandas.core.algorithms import checked_add_with_arr from pandas.core.common import AbstractMethodError +from pandas.errors import NullFrequencyError import pandas.io.formats.printing as printing from pandas._libs import lib, iNaT, NaT @@ -692,6 +693,9 @@ def __add__(self, other): return self._add_datelike(other) elif isinstance(other, Index): return self._add_datelike(other) + elif is_integer_dtype(other) and self.freq is None: + # GH#19123 + raise NullFrequencyError("Cannot shift with no freq") else: # pragma: no cover return NotImplemented @@ -731,7 +735,9 @@ def __sub__(self, other): raise TypeError("cannot subtract {typ1} and {typ2}" .format(typ1=type(self).__name__, typ2=type(other).__name__)) - + elif is_integer_dtype(other) and self.freq is None: + # GH#19123 + raise NullFrequencyError("Cannot shift with no freq") else: # pragma: no cover return NotImplemented @@ -831,7 +837,7 @@ def shift(self, n, freq=None): return self if self.freq is None: - raise ValueError("Cannot shift with no freq") + raise NullFrequencyError("Cannot shift with no freq") start = self[0] + n * self.freq end = self[-1] + n * self.freq diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 99f7e7309d463..fc3ea106252db 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -20,7 +20,7 @@ from pandas.compat import bind_method import pandas.core.missing as missing -from pandas.errors import PerformanceWarning +from pandas.errors import PerformanceWarning, NullFrequencyError from pandas.core.common import _values_from_object, _maybe_match_name from pandas.core.dtypes.missing import notna, isna from pandas.core.dtypes.common import ( @@ -672,9 +672,8 @@ def wrapper(left, right, name=name, na_op=na_op): left, right = _align_method_SERIES(left, right) if is_datetime64_dtype(left) or is_datetime64tz_dtype(left): - result = op(pd.DatetimeIndex(left), right) + result = dispatch_to_index_op(op, left, right, pd.DatetimeIndex) res_name = _get_series_op_result_name(left, right) - result.name = res_name # needs to be overriden if None return construct_result(left, result, index=left.index, name=res_name, dtype=result.dtype) @@ -703,6 +702,40 @@ def wrapper(left, right, name=name, na_op=na_op): return wrapper +def dispatch_to_index_op(op, left, right, index_class): + """ + Wrap Series left in the given index_class to delegate the operation op + to the index implementation. DatetimeIndex and TimedeltaIndex perform + type checking, timezone handling, overflow checks, etc. + + Parameters + ---------- + op : binary operator (operator.add, operator.sub, ...) + left : Series + right : object + index_class : DatetimeIndex or TimedeltaIndex + + Returns + ------- + result : object, usually DatetimeIndex, TimedeltaIndex, or Series + """ + left_idx = index_class(left) + + # avoid accidentally allowing integer add/sub. For datetime64[tz] dtypes, + # left_idx may inherit a freq from a cached DatetimeIndex. + # See discussion in GH#19147. + if left_idx.freq is not None: + left_idx = left_idx._shallow_copy(freq=None) + try: + result = op(left_idx, right) + except NullFrequencyError: + # DatetimeIndex and TimedeltaIndex with freq == None raise ValueError + # on add/sub of integers (or int-like). We re-raise as a TypeError. + raise TypeError('incompatible type for a datetime/timedelta ' + 'operation [{name}]'.format(name=op.__name__)) + return result + + def _get_series_op_result_name(left, right): # `left` is always a pd.Series if isinstance(right, (ABCSeries, pd.Index)): diff --git a/pandas/errors/__init__.py b/pandas/errors/__init__.py index d843126c60144..22b6d33be9d38 100644 --- a/pandas/errors/__init__.py +++ b/pandas/errors/__init__.py @@ -67,5 +67,13 @@ class MergeError(ValueError): """ +class NullFrequencyError(ValueError): + """ + Error raised when a null `freq` attribute is used in an operation + that needs a non-null frequency, particularly `DatetimeIndex.shift`, + `TimedeltaIndex.shift`, `PeriodIndex.shift`. + """ + + class AccessorRegistrationWarning(Warning): """Warning for attribute conflicts in accessor registration.""" diff --git a/pandas/tests/indexes/datetimes/test_ops.py b/pandas/tests/indexes/datetimes/test_ops.py index a7a6e3caab727..a2a84adbf46c1 100644 --- a/pandas/tests/indexes/datetimes/test_ops.py +++ b/pandas/tests/indexes/datetimes/test_ops.py @@ -7,6 +7,7 @@ from itertools import product import pandas as pd +from pandas.errors import NullFrequencyError import pandas._libs.tslib as tslib from pandas._libs.tslibs.offsets import shift_months import pandas.util.testing as tm @@ -593,6 +594,12 @@ def test_nat_new(self): exp = np.array([tslib.iNaT] * 5, dtype=np.int64) tm.assert_numpy_array_equal(result, exp) + def test_shift_no_freq(self): + # GH#19147 + dti = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01'], freq=None) + with pytest.raises(NullFrequencyError): + dti.shift(2) + def test_shift(self): # GH 9903 for tz in self.tz: diff --git a/pandas/tests/indexes/timedeltas/test_timedelta.py b/pandas/tests/indexes/timedeltas/test_timedelta.py index e25384ebf7d62..5a4d6dabbde3e 100644 --- a/pandas/tests/indexes/timedeltas/test_timedelta.py +++ b/pandas/tests/indexes/timedeltas/test_timedelta.py @@ -4,6 +4,7 @@ from datetime import timedelta import pandas as pd +from pandas.errors import NullFrequencyError import pandas.util.testing as tm from pandas import (timedelta_range, date_range, Series, Timedelta, TimedeltaIndex, Index, DataFrame, @@ -50,6 +51,12 @@ def test_shift(self): '10 days 01:00:03'], freq='D') tm.assert_index_equal(result, expected) + def test_shift_no_freq(self): + # GH#19147 + tdi = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00'], freq=None) + with pytest.raises(NullFrequencyError): + tdi.shift(2) + def test_pickle_compat_construction(self): pass diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 1797dbcc15872..c06435d4b8c42 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -693,6 +693,25 @@ def test_td64series_rsub_int_series_invalid(self, tdser): with pytest.raises(TypeError): Series([2, 3, 4]) - tdser + def test_td64_series_add_intlike(self): + # GH#19123 + tdi = pd.TimedeltaIndex(['59 days', '59 days', 'NaT']) + ser = Series(tdi) + + other = Series([20, 30, 40], dtype='uint8') + + pytest.raises(TypeError, ser.__add__, 1) + pytest.raises(TypeError, ser.__sub__, 1) + + pytest.raises(TypeError, ser.__add__, other) + pytest.raises(TypeError, ser.__sub__, other) + + pytest.raises(TypeError, ser.__add__, other.values) + pytest.raises(TypeError, ser.__sub__, other.values) + + pytest.raises(TypeError, ser.__add__, pd.Index(other)) + pytest.raises(TypeError, ser.__sub__, pd.Index(other)) + @pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)]) def test_td64series_add_sub_numeric_scalar_invalid(self, scalar, tdser): with pytest.raises(TypeError): @@ -1533,6 +1552,26 @@ def test_dt64_series_arith_overflow(self): res = dt - ser tm.assert_series_equal(res, -expected) + @pytest.mark.parametrize('tz', [None, 'Asia/Tokyo']) + def test_dt64_series_add_intlike(self, tz): + # GH#19123 + dti = pd.DatetimeIndex(['2016-01-02', '2016-02-03', 'NaT'], tz=tz) + ser = Series(dti) + + other = Series([20, 30, 40], dtype='uint8') + + pytest.raises(TypeError, ser.__add__, 1) + pytest.raises(TypeError, ser.__sub__, 1) + + pytest.raises(TypeError, ser.__add__, other) + pytest.raises(TypeError, ser.__sub__, other) + + pytest.raises(TypeError, ser.__add__, other.values) + pytest.raises(TypeError, ser.__sub__, other.values) + + pytest.raises(TypeError, ser.__add__, pd.Index(other)) + pytest.raises(TypeError, ser.__sub__, pd.Index(other)) + class TestSeriesOperators(TestData): def test_op_method(self): diff --git a/pandas/tests/series/test_timeseries.py b/pandas/tests/series/test_timeseries.py index 6e711abf4491b..b9c95c372ab9e 100644 --- a/pandas/tests/series/test_timeseries.py +++ b/pandas/tests/series/test_timeseries.py @@ -11,6 +11,8 @@ import pandas.util._test_decorators as td from pandas._libs.tslib import iNaT from pandas.compat import lrange, StringIO, product +from pandas.errors import NullFrequencyError + from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.indexes.datetimes import DatetimeIndex from pandas.tseries.offsets import BDay, BMonthEnd @@ -123,7 +125,7 @@ def test_shift2(self): tm.assert_index_equal(result.index, exp_index) idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04']) - pytest.raises(ValueError, idx.shift, 1) + pytest.raises(NullFrequencyError, idx.shift, 1) def test_shift_dst(self): # GH 13926
Series[datetime64] +/- int is currently raising a ValueError when it should be raising a TypeError. This was introduced in #19024. In the process of fixing this bug, this also goes most of the way towards fixing #19123 by raising on add/sub with integer arrays. Setup: ``` dti = pd.date_range('2016-01-01', periods=2) ser = pd.Series(dti) ``` ATM: ``` >>> ser + 1 [...] ValueError: Cannot shift with no freq ``` After this PR (and also in 0.22.0 and anything before #19024): ``` >>> ser + 1 [...] TypeError: incompatible type for a datetime/timedelta operation [__add__] ``` - [ ] closes #xxxx - [x] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19147
2018-01-09T04:32:23Z
2018-01-17T00:14:02Z
2018-01-17T00:14:02Z
2018-02-11T21:59:09Z
simplify CBMonth.apply to remove roll_monthday
diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 585c904a601ed..1dea41801003d 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -847,29 +847,6 @@ cpdef int roll_convention(int other, int n, int compare): return n -cpdef int roll_monthday(datetime other, int n, datetime compare): - """ - Possibly increment or decrement the number of periods to shift - based on rollforward/rollbackward conventions. - - Parameters - ---------- - other : datetime - n : number of periods to increment, before adjusting for rolling - compare : datetime - - Returns - ------- - n : int number of periods to increment - """ - if n > 0 and other < compare: - n -= 1 - elif n <= 0 and other > compare: - # as if rolled forward already - n += 1 - return n - - cpdef int roll_qtrday(datetime other, int n, int month, object day_opt, int modby=3) except? -1: """ diff --git a/pandas/tests/tseries/offsets/test_liboffsets.py b/pandas/tests/tseries/offsets/test_liboffsets.py index 1e0ecc39084eb..a31a79d2f68ed 100644 --- a/pandas/tests/tseries/offsets/test_liboffsets.py +++ b/pandas/tests/tseries/offsets/test_liboffsets.py @@ -156,22 +156,6 @@ def test_roll_qtrday(): assert roll_qtrday(other, n, month, 'business_end', modby=3) == n -def test_roll_monthday(): - other = Timestamp('2017-12-29', tz='US/Pacific') - before = Timestamp('2017-12-01', tz='US/Pacific') - after = Timestamp('2017-12-31', tz='US/Pacific') - - n = 42 - assert liboffsets.roll_monthday(other, n, other) == n - assert liboffsets.roll_monthday(other, n, before) == n - assert liboffsets.roll_monthday(other, n, after) == n - 1 - - n = -4 - assert liboffsets.roll_monthday(other, n, other) == n - assert liboffsets.roll_monthday(other, n, before) == n + 1 - assert liboffsets.roll_monthday(other, n, after) == n - - def test_roll_convention(): other = 29 before = 1 diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 7c5fe2f0314e4..3c842affd44b7 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1039,55 +1039,62 @@ def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri', _CustomMixin.__init__(self, weekmask, holidays, calendar) @cache_readonly - def cbday(self): - kwds = self.kwds - return CustomBusinessDay(n=self.n, normalize=self.normalize, **kwds) + def cbday_roll(self): + """Define default roll function to be called in apply method""" + cbday = CustomBusinessDay(n=self.n, normalize=False, **self.kwds) + + if self._prefix.endswith('S'): + # MonthBegin + roll_func = cbday.rollforward + else: + # MonthEnd + roll_func = cbday.rollback + return roll_func @cache_readonly def m_offset(self): if self._prefix.endswith('S'): - # MonthBegin: - return MonthBegin(n=1, normalize=self.normalize) + # MonthBegin + moff = MonthBegin(n=1, normalize=False) else: # MonthEnd - return MonthEnd(n=1, normalize=self.normalize) + moff = MonthEnd(n=1, normalize=False) + return moff - -class CustomBusinessMonthEnd(_CustomBusinessMonth): - __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end') - _prefix = 'CBM' + @cache_readonly + def month_roll(self): + """Define default roll function to be called in apply method""" + if self._prefix.endswith('S'): + # MonthBegin + roll_func = self.m_offset.rollback + else: + # MonthEnd + roll_func = self.m_offset.rollforward + return roll_func @apply_wraps def apply(self, other): # First move to month offset - cur_mend = self.m_offset.rollforward(other) + cur_month_offset_date = self.month_roll(other) # Find this custom month offset - compare_date = self.cbday.rollback(cur_mend) - n = liboffsets.roll_monthday(other, self.n, compare_date) + compare_date = self.cbday_roll(cur_month_offset_date) + n = liboffsets.roll_convention(other.day, self.n, compare_date.day) - new = cur_mend + n * self.m_offset - result = self.cbday.rollback(new) + new = cur_month_offset_date + n * self.m_offset + result = self.cbday_roll(new) return result +class CustomBusinessMonthEnd(_CustomBusinessMonth): + __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'end') + _prefix = 'CBM' + + class CustomBusinessMonthBegin(_CustomBusinessMonth): __doc__ = _CustomBusinessMonth.__doc__.replace('[BEGIN/END]', 'beginning') _prefix = 'CBMS' - @apply_wraps - def apply(self, other): - # First move to month offset - cur_mbegin = self.m_offset.rollback(other) - - # Find this custom month offset - compare_date = self.cbday.rollforward(cur_mbegin) - n = liboffsets.roll_monthday(other, self.n, compare_date) - - new = cur_mbegin + n * self.m_offset - result = self.cbday.rollforward(new) - return result - # --------------------------------------------------------------------- # Semi-Month Based Offset Classes
The implementations of CustomBusinessMonthBegin.apply and CustomBusinessMonthEnd.apply are merged. Simplify CustomBusinessMonth.apply so that it uses liboffsets.roll_convention instead of liboffsets.roll_monthday (the latter being much more of a footgun). This is the last place where roll_monthday was used, so this removes it. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19146
2018-01-09T02:12:42Z
2018-01-10T12:58:24Z
2018-01-10T12:58:24Z
2018-01-23T04:40:34Z
Fix URL in FutureWarning for .loc[list-of-labels] with missing labels
diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 4ebc8b82aaa47..750b260c7f228 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -640,6 +640,7 @@ For getting *multiple* indexers, using ``.get_indexer``: dfd.iloc[[0, 2], dfd.columns.get_indexer(['A', 'B'])] +.. _deprecate_loc_reindex_listlike: .. _indexing.deprecate_loc_reindex_listlike: Indexing with list with missing labels is Deprecated diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py index fa6614d27cd19..e2c4043f0508d 100755 --- a/pandas/core/indexing.py +++ b/pandas/core/indexing.py @@ -1479,7 +1479,7 @@ def _has_valid_type(self, key, axis): KeyError in the future, you can use .reindex() as an alternative. See the documentation here: - http://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike""") # noqa + https://pandas.pydata.org/pandas-docs/stable/indexing.html#deprecate-loc-reindex-listlike""") # noqa if not (ax.is_categorical() or ax.is_interval()): warnings.warn(_missing_key_warning,
- [X] closes #19143 - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
https://api.github.com/repos/pandas-dev/pandas/pulls/19145
2018-01-09T01:40:11Z
2018-01-09T12:54:27Z
2018-01-09T12:54:27Z
2018-01-09T19:23:16Z
Fix offset __inits__, apply_index dtypes
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 8a760c5e5b0de..c903c3bfd122a 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -381,7 +381,9 @@ Conversion - Fixed bug where comparing :class:`DatetimeIndex` failed to raise ``TypeError`` when attempting to compare timezone-aware and timezone-naive datetimelike objects (:issue:`18162`) - Bug in :class:`DatetimeIndex` where the repr was not showing high-precision time values at the end of a day (e.g., 23:59:59.999999999) (:issue:`19030`) - Bug where dividing a scalar timedelta-like object with :class:`TimedeltaIndex` performed the reciprocal operation (:issue:`19125`) +- Bug in :class:`WeekOfMonth` and :class:`LastWeekOfMonth` where default keyword arguments for constructor raised ``ValueError`` (:issue:`19142`) - Bug in localization of a naive, datetime string in a ``Series`` constructor with a ``datetime64[ns, tz]`` dtype (:issue:`174151`) +- :func:`Timestamp.replace` will now handle Daylight Savings transitions gracefully (:issue:`18319`) Indexing ^^^^^^^^ @@ -473,4 +475,3 @@ Other ^^^^^ - Improved error message when attempting to use a Python keyword as an identifier in a ``numexpr`` backed query (:issue:`18221`) -- :func:`Timestamp.replace` will now handle Daylight Savings transitions gracefully (:issue:`18319`) diff --git a/pandas/_libs/tslibs/offsets.pyx b/pandas/_libs/tslibs/offsets.pyx index 1dea41801003d..700ba5b6e48f7 100644 --- a/pandas/_libs/tslibs/offsets.pyx +++ b/pandas/_libs/tslibs/offsets.pyx @@ -290,27 +290,6 @@ class CacheableOffset(object): _cacheable = True -class EndMixin(object): - # helper for vectorized offsets - - def _end_apply_index(self, i, freq): - """Offsets index to end of Period frequency""" - - off = i.to_perioddelta('D') - - base, mult = get_freq_code(freq) - base_period = i.to_period(base) - if self.n > 0: - # when adding, dates on end roll to next - roll = np.where(base_period.to_timestamp(how='end') == i - off, - self.n, self.n - 1) - else: - roll = self.n - - base = (base_period + roll).to_timestamp(how='end') - return base + off - - # --------------------------------------------------------------------- # Base Classes @@ -675,11 +654,8 @@ def shift_months(int64_t[:] dtindex, int months, object day=None): months_to_roll = months compare_day = get_firstbday(dts.year, dts.month) - if months_to_roll > 0 and dts.day < compare_day: - months_to_roll -= 1 - elif months_to_roll <= 0 and dts.day > compare_day: - # as if rolled forward already - months_to_roll += 1 + months_to_roll = roll_convention(dts.day, months_to_roll, + compare_day) dts.year = year_add_months(dts, months_to_roll) dts.month = month_add_months(dts, months_to_roll) @@ -698,11 +674,8 @@ def shift_months(int64_t[:] dtindex, int months, object day=None): months_to_roll = months compare_day = get_lastbday(dts.year, dts.month) - if months_to_roll > 0 and dts.day < compare_day: - months_to_roll -= 1 - elif months_to_roll <= 0 and dts.day > compare_day: - # as if rolled forward already - months_to_roll += 1 + months_to_roll = roll_convention(dts.day, months_to_roll, + compare_day) dts.year = year_add_months(dts, months_to_roll) dts.month = month_add_months(dts, months_to_roll) @@ -823,7 +796,7 @@ cpdef int get_day_of_month(datetime other, day_opt) except? -1: raise ValueError(day_opt) -cpdef int roll_convention(int other, int n, int compare): +cpdef int roll_convention(int other, int n, int compare) nogil: """ Possibly increment or decrement the number of periods to shift based on rollforward/rollbackward conventions. diff --git a/pandas/tests/indexes/datetimes/test_arithmetic.py b/pandas/tests/indexes/datetimes/test_arithmetic.py index fb804266259dc..011b33a4d6f35 100644 --- a/pandas/tests/indexes/datetimes/test_arithmetic.py +++ b/pandas/tests/indexes/datetimes/test_arithmetic.py @@ -495,7 +495,7 @@ def test_dt64_with_DateOffsets_relativedelta(klass, assert_func): assert_func(klass([x - op for x in vec]), vec - op) -@pytest.mark.parametrize('cls_name', [ +@pytest.mark.parametrize('cls_and_kwargs', [ 'YearBegin', ('YearBegin', {'month': 5}), 'YearEnd', ('YearEnd', {'month': 5}), 'MonthBegin', 'MonthEnd', @@ -518,7 +518,7 @@ def test_dt64_with_DateOffsets_relativedelta(klass, assert_func): @pytest.mark.parametrize('klass,assert_func', [ (Series, tm.assert_series_equal), (DatetimeIndex, tm.assert_index_equal)]) -def test_dt64_with_DateOffsets(klass, assert_func, normalize, cls_name): +def test_dt64_with_DateOffsets(klass, assert_func, normalize, cls_and_kwargs): # GH#10699 # assert these are equal on a piecewise basis vec = klass([Timestamp('2000-01-05 00:15:00'), @@ -530,11 +530,12 @@ def test_dt64_with_DateOffsets(klass, assert_func, normalize, cls_name): Timestamp('2000-05-15'), Timestamp('2001-06-15')]) - if isinstance(cls_name, tuple): + if isinstance(cls_and_kwargs, tuple): # If cls_name param is a tuple, then 2nd entry is kwargs for # the offset constructor - cls_name, kwargs = cls_name + cls_name, kwargs = cls_and_kwargs else: + cls_name = cls_and_kwargs kwargs = {} offset_cls = getattr(pd.offsets, cls_name) diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 23e627aeba017..b086884ecd250 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -3087,6 +3087,13 @@ def test_get_offset_day_error(): DateOffset()._get_offset_day(datetime.now()) +def test_valid_default_arguments(offset_types): + # GH#19142 check that the calling the constructors without passing + # any keyword arguments produce valid offsets + cls = offset_types + cls() + + @pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds))) def test_valid_month_attributes(kwd, month_classes): # GH#18226 diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 3c842affd44b7..e6b9f66c094c1 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -16,7 +16,7 @@ from pandas._libs import tslib, Timestamp, OutOfBoundsDatetime, Timedelta from pandas.util._decorators import cache_readonly -from pandas._libs.tslibs import ccalendar +from pandas._libs.tslibs import ccalendar, frequencies as libfrequencies from pandas._libs.tslibs.timedeltas import delta_to_nanoseconds import pandas._libs.tslibs.offsets as liboffsets from pandas._libs.tslibs.offsets import ( @@ -27,7 +27,6 @@ apply_index_wraps, roll_yearday, shift_month, - EndMixin, BaseOffset) @@ -1233,7 +1232,19 @@ def _get_roll(self, i, before_day_of_month, after_day_of_month): return roll def _apply_index_days(self, i, roll): - i += (roll % 2) * Timedelta(days=self.day_of_month).value + """Add days portion of offset to DatetimeIndex i + + Parameters + ---------- + i : DatetimeIndex + roll : ndarray[int64_t] + + Returns + ------- + result : DatetimeIndex + """ + nanos = (roll % 2) * Timedelta(days=self.day_of_month).value + i += nanos.astype('timedelta64[ns]') return i + Timedelta(days=-1) @@ -1278,13 +1289,25 @@ def _get_roll(self, i, before_day_of_month, after_day_of_month): return roll def _apply_index_days(self, i, roll): - return i + (roll % 2) * Timedelta(days=self.day_of_month - 1).value + """Add days portion of offset to DatetimeIndex i + + Parameters + ---------- + i : DatetimeIndex + roll : ndarray[int64_t] + + Returns + ------- + result : DatetimeIndex + """ + nanos = (roll % 2) * Timedelta(days=self.day_of_month - 1).value + return i + nanos.astype('timedelta64[ns]') # --------------------------------------------------------------------- # Week-Based Offset Classes -class Week(EndMixin, DateOffset): +class Week(DateOffset): """ Weekly offset @@ -1332,7 +1355,34 @@ def apply_index(self, i): return ((i.to_period('W') + self.n).to_timestamp() + i.to_perioddelta('W')) else: - return self._end_apply_index(i, self.freqstr) + return self._end_apply_index(i) + + def _end_apply_index(self, dtindex): + """Add self to the given DatetimeIndex, specialized for case where + self.weekday is non-null. + + Parameters + ---------- + dtindex : DatetimeIndex + + Returns + ------- + result : DatetimeIndex + """ + off = dtindex.to_perioddelta('D') + + base, mult = libfrequencies.get_freq_code(self.freqstr) + base_period = dtindex.to_period(base) + if self.n > 0: + # when adding, dates on end roll to next + normed = dtindex - off + roll = np.where(base_period.to_timestamp(how='end') == normed, + self.n, self.n - 1) + else: + roll = self.n + + base = (base_period + roll).to_timestamp(how='end') + return base + off def onOffset(self, dt): if self.normalize and not _is_normalized(dt): @@ -1387,9 +1437,9 @@ class WeekOfMonth(_WeekOfMonthMixin, DateOffset): Parameters ---------- n : int - week : {0, 1, 2, 3, ...}, default None + week : {0, 1, 2, 3, ...}, default 0 0 is 1st week of month, 1 2nd week, etc. - weekday : {0, 1, ..., 6}, default None + weekday : {0, 1, ..., 6}, default 0 0: Mondays 1: Tuesdays 2: Wednesdays @@ -1401,7 +1451,7 @@ class WeekOfMonth(_WeekOfMonthMixin, DateOffset): _prefix = 'WOM' _adjust_dst = True - def __init__(self, n=1, normalize=False, week=None, weekday=None): + def __init__(self, n=1, normalize=False, week=0, weekday=0): self.n = self._validate_n(n) self.normalize = normalize self.weekday = weekday @@ -1464,7 +1514,7 @@ class LastWeekOfMonth(_WeekOfMonthMixin, DateOffset): Parameters ---------- n : int, default 1 - weekday : {0, 1, ..., 6}, default None + weekday : {0, 1, ..., 6}, default 0 0: Mondays 1: Tuesdays 2: Wednesdays @@ -1477,7 +1527,7 @@ class LastWeekOfMonth(_WeekOfMonthMixin, DateOffset): _prefix = 'LWOM' _adjust_dst = True - def __init__(self, n=1, normalize=False, weekday=None): + def __init__(self, n=1, normalize=False, weekday=0): self.n = self._validate_n(n) self.normalize = normalize self.weekday = weekday
This PR does two main things: 1) Change the default kwargs for offset `__init__` methods so that the defaults are valid; ATM `WeekOfMonth()` and `LastWeekOfMonth()` will both raise `ValueError`. 2) ATM `Week.apply_index` adds an int64 array to a `DatetimeIndex`. This happens to have the desired output because the int64 array represents nanoseconds. This explicitly casts the array to `timedelta64[ns]` to make this explicit. <b>This is a necessary step towards fixing #19123.</b> In addition there are two small pieces of refactoring: 3) Remove `liboffsets.EndMixin` since it is only mixed in to `Week`. 4) Use `liboffsets.roll_convention` to de-duplicate some code in `liboffsets.shift_months`. - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19142
2018-01-08T22:22:25Z
2018-01-12T11:45:21Z
2018-01-12T11:45:21Z
2018-02-11T22:01:15Z
Doc: Example of merging Dataframe and Series
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst index 5da0f4fd07819..7ed0b1c800183 100644 --- a/doc/source/gotchas.rst +++ b/doc/source/gotchas.rst @@ -332,3 +332,97 @@ using something similar to the following: See `the NumPy documentation on byte order <https://docs.scipy.org/doc/numpy/user/basics.byteswapping.html>`__ for more details. + + +Alternative to storing lists in Pandas DataFrame Cells +------------------------------------------------------ +Storing nested lists/arrays inside a pandas object should be avoided for performance and memory use reasons. Instead they should be "exploded" into a flat DataFrame structure. + +Example of exploding nested lists into a DataFrame: + +.. ipython:: python + + from collections import OrderedDict + df = (pd.DataFrame(OrderedDict([('name', ['A.J. Price']*3), + ('opponent', ['76ers', 'blazers', 'bobcats']), + ('attribute x', ['A','B','C']) + ]) + )) + df + + nn = [['Zach LaVine', 'Jeremy Lin', 'Nate Robinson', 'Isaia']]*3 + nn + + # Step 1: Create an index with the "parent" columns to be included in the final Dataframe + df2 = pd.concat([df[['name','opponent']], pd.DataFrame(nn)], axis=1) + df2 + + # Step 2: Transform the column with lists into series, which become columns in a new Dataframe. + # Note that only the index from the original df is retained - + # any other columns in the original df are not part of the new df + df3 = df2.set_index(['name', 'opponent']) + df3 + + # Step 3: Stack the new columns as rows; this creates a new index level we'll want to drop in the next step. + # Note that at this point we have a Series, not a Dataframe + ser = df3.stack() + ser + + # Step 4: Drop the extraneous index level created by the stack + ser.reset_index(level=2, drop=True, inplace=True) + ser + + # Step 5: Create a Dataframe from the Series + df4 = ser.to_frame('nearest_neighbors') + df4 + + # All steps in one stack + df4 = (df2.set_index(['name', 'opponent']) + .stack() + .reset_index(level=2, drop=True) + .to_frame('nearest_neighbors')) + df4 + +Example of exploding a list embedded in a dataframe: + +.. ipython:: python + + df = (pd.DataFrame(OrderedDict([('name', ['A.J. Price']*3), + ('opponent', ['76ers', 'blazers', 'bobcats']), + ('attribute x', ['A','B','C']), + ('nearest_neighbors', [['Zach LaVine', 'Jeremy Lin', 'Nate Robinson', 'Isaia']]*3) + ]) + )) + + df + + # Step 1: Create an index with the "parent" columns to be included in the final Dataframe + df2 = df.set_index(['name', 'opponent']) + df2 + + # Step 2: Transform the column with lists into series, which become columns in a new Dataframe. + # Note that only the index from the original df is retained - + # any other columns in the original df are not part of the new df + df3 = df2.nearest_neighbors.apply(pd.Series) + df3 + + # Step 3: Stack the new columns as rows; this creates a new index level we'll want to drop in the next step. + # Note that at this point we have a Series, not a Dataframe + ser = df3.stack() + ser + + # Step 4: Drop the extraneous index level created by the stack + ser.reset_index(level=2, drop=True, inplace=True) + ser + + # Step 5: Create a Dataframe from the Series + df4 = ser.to_frame('nearest_neighbors') + df4 + + # All steps in one stack + df4 = (df.set_index(['name', 'opponent']) + .nearest_neighbors.apply(pd.Series) + .stack() + .reset_index(level=2, drop=True) + .to_frame('nearest_neighbors')) + df4 diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 552ddabb7359a..1f9befb4bd59c 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -915,13 +915,17 @@ The dimension of the returned result can also change: So depending on the path taken, and exactly what you are grouping. Thus the grouped columns(s) may be included in the output as well as set the indices. -.. warning:: +.. warnings:: - In the current implementation apply calls func twice on the + * In the current implementation apply calls func twice on the first group to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if func has side-effects, as they will take effect twice for the first group. + + * Apply should not perform in-place operations on the group chunk. + Group chunks should be treated as immutable, and changes to a + group chunk may produce unexpected results. .. ipython:: python @@ -955,6 +959,42 @@ will be (silently) dropped. Thus, this does not pose any problems: df.groupby('A').std() +.. note:: + Decimal columns are also "nuisance" columns. They are excluded from aggregate functions automatically in groupby. + + If you do wish to include decimal columns in the aggregation, you must do so explicitly: + +.. ipython:: python + + from decimal import Decimal + dec = pd.DataFrame( + {'name': ['foo', 'bar', 'foo', 'bar'], + 'title': ['boo', 'far', 'boo', 'far'], + 'id': [123, 456, 123, 456], + 'int_column': [1, 2, 3, 4], + 'dec_column1': [Decimal('0.50'), Decimal('0.15'), Decimal('0.25'), Decimal('0.40')], + 'dec_column2': [Decimal('0.20'), Decimal('0.30'), Decimal('0.55'), Decimal('0.60')] + }, + columns=['name','title','id','int_column','dec_column1','dec_column2'] + ) + + dec.head() + + dec.dtypes + + # Decimal columns excluded from sum by default + dec.groupby(['name', 'title', 'id'], as_index=False).sum() + + # Decimal columns can be sum'd explicitly by themselves... + dec.groupby(['name', 'title', 'id'], as_index=False)['dec_column1','dec_column2'].sum() + + # ...but cannot be combined with standard data types or they will be excluded + dec.groupby(['name', 'title', 'id'], as_index=False)['int_column','dec_column1','dec_column2'].sum() + + # Use .agg function to aggregate over standard and "nuisance" data types at the same time + dec.groupby(['name', 'title', 'id'], as_index=False).agg({'int_column': 'sum', 'dec_column1': 'sum', 'dec_column2': 'sum'}) + + .. _groupby.missing: NA and NaT group handling diff --git a/doc/source/merging.rst b/doc/source/merging.rst index 86d2ec2254057..9aad7e5bf079f 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -712,6 +712,32 @@ either the left or right tables, the values in the joined table will be labels=['left', 'right'], vertical=False); plt.close('all'); +To join a Series and a DataFrame, the Series has to be transformed into a DataFrame first: + +.. ipython:: python + + df = pd.DataFrame({"Let": ["A", "B", "C"], "Num": [1, 2, 3]}) + df + + # The series has a multi-index with levels corresponding to columns in the DataFrame we want to merge with + ser = pd.Series( + ['a', 'b', 'c', 'd', 'e', 'f'], + index=pd.MultiIndex.from_arrays([["A", "B", "C"]*2, [1, 2, 3, 4, 5, 6]]) + ) + ser + + # Name the row index levels + ser.index.names=['Let','Num'] + ser + + # reset_index turns the multi-level row index into columns, which requires a DataFrame + df2 = ser.reset_index() + type(df2) + + # Now we merge the DataFrames + pd.merge(df, df2, on=['Let','Num']) + + Here is another example with duplicate join keys in DataFrames: .. ipython:: python
- [ ] closes #12550
https://api.github.com/repos/pandas-dev/pandas/pulls/19140
2018-01-08T20:55:10Z
2018-01-12T22:18:26Z
null
2023-05-11T01:17:06Z
Make TimedeltaIndex +/- pd.NaT return TimedeltaIndex
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 14949267fc37d..9df7563d3b869 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -273,6 +273,7 @@ Other API Changes - The default ``Timedelta`` constructor now accepts an ``ISO 8601 Duration`` string as an argument (:issue:`19040`) - ``IntervalDtype`` now returns ``True`` when compared against ``'interval'`` regardless of subtype, and ``IntervalDtype.name`` now returns ``'interval'`` regardless of subtype (:issue:`18980`) - :func:`Series.to_csv` now accepts a ``compression`` argument that works in the same way as the ``compression`` argument in :func:`DataFrame.to_csv` (:issue:`18958`) +- Addition or subtraction of ``NaT`` from :class:`TimedeltaIndex` will return ``TimedeltaIndex`` instead of ``DatetimeIndex`` (:issue:`19124`) .. _whatsnew_0230.deprecations: diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index 12ca26cfe0266..866329b16c830 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -403,7 +403,8 @@ def _add_datelike(self, other): # adding a timedeltaindex to a datetimelike from pandas import Timestamp, DatetimeIndex if other is NaT: - result = self._nat_new(box=False) + # GH#19124 pd.NaT is treated like a timedelta + return self._nat_new() else: other = Timestamp(other) i8 = self.asi8 @@ -413,12 +414,13 @@ def _add_datelike(self, other): return DatetimeIndex(result, name=self.name, copy=False) def _sub_datelike(self, other): - from pandas import DatetimeIndex + # GH#19124 Timedelta - datetime is not in general well-defined. + # We make an exception for pd.NaT, which in this case quacks + # like a timedelta. if other is NaT: - result = self._nat_new(box=False) + return self._nat_new() else: raise TypeError("cannot subtract a datelike from a TimedeltaIndex") - return DatetimeIndex(result, name=self.name, copy=False) def _add_offset_array(self, other): # Array/Index of DateOffset objects diff --git a/pandas/tests/scalar/test_nat.py b/pandas/tests/scalar/test_nat.py index d0d204253e3f1..73e8c783ba882 100644 --- a/pandas/tests/scalar/test_nat.py +++ b/pandas/tests/scalar/test_nat.py @@ -302,14 +302,30 @@ def test_nat_arithmetic_index(): tm.assert_index_equal(left - right, exp) tm.assert_index_equal(right - left, exp) - # timedelta + # timedelta # GH#19124 tdi = TimedeltaIndex(['1 day', '2 day'], name='x') - exp = DatetimeIndex([NaT, NaT], name='x') - for (left, right) in [(NaT, tdi)]: - tm.assert_index_equal(left + right, exp) - tm.assert_index_equal(right + left, exp) - tm.assert_index_equal(left - right, exp) - tm.assert_index_equal(right - left, exp) + tdi_nat = TimedeltaIndex([NaT, NaT], name='x') + + tm.assert_index_equal(tdi + NaT, tdi_nat) + tm.assert_index_equal(NaT + tdi, tdi_nat) + tm.assert_index_equal(tdi - NaT, tdi_nat) + tm.assert_index_equal(NaT - tdi, tdi_nat) + + +@pytest.mark.parametrize('box, assert_func', [ + (TimedeltaIndex, tm.assert_index_equal), + pytest.param(Series, tm.assert_series_equal, + marks=pytest.mark.xfail(reason='NaT - Series returns NaT')) +]) +def test_nat_arithmetic_td64_vector(box, assert_func): + # GH#19124 + vec = box(['1 day', '2 day'], dtype='timedelta64[ns]') + box_nat = box([NaT, NaT], dtype='timedelta64[ns]') + + assert_func(vec + NaT, box_nat) + assert_func(NaT + vec, box_nat) + assert_func(vec - NaT, box_nat) + assert_func(NaT - vec, box_nat) def test_nat_pinned_docstrings():
- [x] closes #19124 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19139
2018-01-08T17:45:00Z
2018-01-16T00:02:43Z
2018-01-16T00:02:43Z
2018-01-23T04:40:29Z
CI: Pin NumPy in 3.6 build
diff --git a/ci/requirements-3.6.build b/ci/requirements-3.6.build index 1c4b46aea3865..94e1152450d87 100644 --- a/ci/requirements-3.6.build +++ b/ci/requirements-3.6.build @@ -2,5 +2,5 @@ python=3.6* python-dateutil pytz nomkl -numpy +numpy=1.13.* cython
Closes #19137
https://api.github.com/repos/pandas-dev/pandas/pulls/19138
2018-01-08T15:15:55Z
2018-01-08T16:41:39Z
2018-01-08T16:41:39Z
2018-01-09T11:32:05Z
BUG: read_parquet, to_parquet for s3 destinations
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index dc305f36f32ec..a0ce50b881915 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -415,6 +415,7 @@ I/O - Bug in :func:`read_sas` where a file with 0 variables gave an ``AttributeError`` incorrectly. Now it gives an ``EmptyDataError`` (:issue:`18184`) - Bug in :func:`DataFrame.to_latex()` where pairs of braces meant to serve as invisible placeholders were escaped (:issue:`18667`) - Bug in :func:`read_json` where large numeric values were causing an ``OverflowError`` (:issue:`18842`) +- Bug in :func:`DataFrame.to_parquet` where an exception was raised if the write destination is S3 (:issue:`19134`) - Plotting diff --git a/pandas/io/common.py b/pandas/io/common.py index da60698fe529f..c2d1da5a1035d 100644 --- a/pandas/io/common.py +++ b/pandas/io/common.py @@ -91,14 +91,6 @@ def _is_url(url): return False -def _is_s3_url(url): - """Check for an s3, s3n, or s3a url""" - try: - return parse_url(url).scheme in ['s3', 's3n', 's3a'] - except: - return False - - def _expand_user(filepath_or_buffer): """Return the argument with an initial component of ~ or ~user replaced by that user's home directory. @@ -168,8 +160,16 @@ def _stringify_path(filepath_or_buffer): return filepath_or_buffer +def is_s3_url(url): + """Check for an s3, s3n, or s3a url""" + try: + return parse_url(url).scheme in ['s3', 's3n', 's3a'] + except: # noqa + return False + + def get_filepath_or_buffer(filepath_or_buffer, encoding=None, - compression=None): + compression=None, mode=None): """ If the filepath_or_buffer is a url, translate and return the buffer. Otherwise passthrough. @@ -179,10 +179,11 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None, filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path), or buffer encoding : the encoding to use to decode py3 bytes, default is 'utf-8' + mode : str, optional Returns ------- - a filepath_or_buffer, the encoding, the compression + a filepath_ or buffer or S3File instance, the encoding, the compression """ filepath_or_buffer = _stringify_path(filepath_or_buffer) @@ -195,11 +196,12 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None, reader = BytesIO(req.read()) return reader, encoding, compression - if _is_s3_url(filepath_or_buffer): + if is_s3_url(filepath_or_buffer): from pandas.io import s3 return s3.get_filepath_or_buffer(filepath_or_buffer, encoding=encoding, - compression=compression) + compression=compression, + mode=mode) if isinstance(filepath_or_buffer, (compat.string_types, compat.binary_type, diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index 0c88706a3bec2..e28e53a840e3b 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -5,7 +5,7 @@ from pandas import DataFrame, RangeIndex, Int64Index, get_option from pandas.compat import string_types from pandas.core.common import AbstractMethodError -from pandas.io.common import get_filepath_or_buffer +from pandas.io.common import get_filepath_or_buffer, is_s3_url def get_engine(engine): @@ -107,7 +107,7 @@ def write(self, df, path, compression='snappy', self.validate_dataframe(df) if self._pyarrow_lt_070: self._validate_write_lt_070(df) - path, _, _ = get_filepath_or_buffer(path) + path, _, _ = get_filepath_or_buffer(path, mode='wb') if self._pyarrow_lt_060: table = self.api.Table.from_pandas(df, timestamps_to_ms=True) @@ -194,14 +194,32 @@ def write(self, df, path, compression='snappy', **kwargs): # thriftpy/protocol/compact.py:339: # DeprecationWarning: tostring() is deprecated. # Use tobytes() instead. - path, _, _ = get_filepath_or_buffer(path) + + if is_s3_url(path): + # path is s3:// so we need to open the s3file in 'wb' mode. + # TODO: Support 'ab' + + path, _, _ = get_filepath_or_buffer(path, mode='wb') + # And pass the opened s3file to the fastparquet internal impl. + kwargs['open_with'] = lambda path, _: path + else: + path, _, _ = get_filepath_or_buffer(path) + with catch_warnings(record=True): self.api.write(path, df, compression=compression, **kwargs) def read(self, path, columns=None, **kwargs): - path, _, _ = get_filepath_or_buffer(path) - parquet_file = self.api.ParquetFile(path) + if is_s3_url(path): + # When path is s3:// an S3File is returned. + # We need to retain the original path(str) while also + # pass the S3File().open function to fsatparquet impl. + s3, _, _ = get_filepath_or_buffer(path) + parquet_file = self.api.ParquetFile(path, open_with=s3.s3.open) + else: + path, _, _ = get_filepath_or_buffer(path) + parquet_file = self.api.ParquetFile(path) + return parquet_file.to_pandas(columns=columns, **kwargs) diff --git a/pandas/io/s3.py b/pandas/io/s3.py index 5e48de757d00e..e2650e29c0db3 100644 --- a/pandas/io/s3.py +++ b/pandas/io/s3.py @@ -19,10 +19,14 @@ def _strip_schema(url): def get_filepath_or_buffer(filepath_or_buffer, encoding=None, - compression=None): + compression=None, mode=None): + + if mode is None: + mode = 'rb' + fs = s3fs.S3FileSystem(anon=False) try: - filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer)) + filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode) except (OSError, NoCredentialsError): # boto3 has troubles when trying to access a public file # when credentialed... @@ -31,5 +35,5 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None, # A NoCredentialsError is raised if you don't have creds # for that bucket. fs = s3fs.S3FileSystem(anon=True) - filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer)) + filepath_or_buffer = fs.open(_strip_schema(filepath_or_buffer), mode) return filepath_or_buffer, None, compression diff --git a/pandas/tests/io/test_parquet.py b/pandas/tests/io/test_parquet.py index 31c2ded49b7a0..d472a5ed23c75 100644 --- a/pandas/tests/io/test_parquet.py +++ b/pandas/tests/io/test_parquet.py @@ -204,6 +204,22 @@ def test_cross_engine_fp_pa(df_cross_compat, pa, fp): tm.assert_frame_equal(result, df[['a', 'd']]) +def check_round_trip_equals(df, path, engine, + write_kwargs, read_kwargs, + expected, check_names): + + df.to_parquet(path, engine, **write_kwargs) + actual = read_parquet(path, engine, **read_kwargs) + tm.assert_frame_equal(expected, actual, + check_names=check_names) + + # repeat + df.to_parquet(path, engine, **write_kwargs) + actual = read_parquet(path, engine, **read_kwargs) + tm.assert_frame_equal(expected, actual, + check_names=check_names) + + class Base(object): def check_error_on_write(self, df, engine, exc): @@ -212,28 +228,32 @@ def check_error_on_write(self, df, engine, exc): with tm.ensure_clean() as path: to_parquet(df, path, engine, compression=None) - def check_round_trip(self, df, engine, expected=None, + def check_round_trip(self, df, engine, expected=None, path=None, write_kwargs=None, read_kwargs=None, check_names=True): + if write_kwargs is None: - write_kwargs = {} + write_kwargs = {'compression': None} + if read_kwargs is None: read_kwargs = {} - with tm.ensure_clean() as path: - df.to_parquet(path, engine, **write_kwargs) - result = read_parquet(path, engine, **read_kwargs) - if expected is None: - expected = df - tm.assert_frame_equal(result, expected, check_names=check_names) - - # repeat - to_parquet(df, path, engine, **write_kwargs) - result = pd.read_parquet(path, engine, **read_kwargs) + if expected is None: + expected = df - if expected is None: - expected = df - tm.assert_frame_equal(result, expected, check_names=check_names) + if path is None: + with tm.ensure_clean() as path: + check_round_trip_equals(df, path, engine, + write_kwargs=write_kwargs, + read_kwargs=read_kwargs, + expected=expected, + check_names=check_names) + else: + check_round_trip_equals(df, path, engine, + write_kwargs=write_kwargs, + read_kwargs=read_kwargs, + expected=expected, + check_names=check_names) class TestBasic(Base): @@ -251,7 +271,7 @@ def test_columns_dtypes(self, engine): # unicode df.columns = [u'foo', u'bar'] - self.check_round_trip(df, engine, write_kwargs={'compression': None}) + self.check_round_trip(df, engine) def test_columns_dtypes_invalid(self, engine): @@ -292,7 +312,6 @@ def test_read_columns(self, engine): expected = pd.DataFrame({'string': list('abc')}) self.check_round_trip(df, engine, expected=expected, - write_kwargs={'compression': None}, read_kwargs={'columns': ['string']}) def test_write_index(self, engine): @@ -304,7 +323,7 @@ def test_write_index(self, engine): pytest.skip("pyarrow is < 0.7.0") df = pd.DataFrame({'A': [1, 2, 3]}) - self.check_round_trip(df, engine, write_kwargs={'compression': None}) + self.check_round_trip(df, engine) indexes = [ [2, 3, 4], @@ -315,15 +334,12 @@ def test_write_index(self, engine): # non-default index for index in indexes: df.index = index - self.check_round_trip( - df, engine, - write_kwargs={'compression': None}, - check_names=check_names) + self.check_round_trip(df, engine, check_names=check_names) # index with meta-data df.index = [0, 1, 2] df.index.name = 'foo' - self.check_round_trip(df, engine, write_kwargs={'compression': None}) + self.check_round_trip(df, engine) def test_write_multiindex(self, pa_ge_070): # Not suppoprted in fastparquet as of 0.1.3 or older pyarrow version @@ -332,7 +348,7 @@ def test_write_multiindex(self, pa_ge_070): df = pd.DataFrame({'A': [1, 2, 3]}) index = pd.MultiIndex.from_tuples([('a', 1), ('a', 2), ('b', 1)]) df.index = index - self.check_round_trip(df, engine, write_kwargs={'compression': None}) + self.check_round_trip(df, engine) def test_write_column_multiindex(self, engine): # column multi-index @@ -426,6 +442,11 @@ def test_categorical_unsupported(self, pa_lt_070): df = pd.DataFrame({'a': pd.Categorical(list('abc'))}) self.check_error_on_write(df, pa, NotImplementedError) + def test_s3_roundtrip(self, df_compat, s3_resource, pa): + # GH #19134 + self.check_round_trip(df_compat, pa, + path='s3://pandas-test/pyarrow.parquet') + class TestParquetFastParquet(Base): @@ -436,7 +457,7 @@ def test_basic(self, fp, df_full): # additional supported types for fastparquet df['timedelta'] = pd.timedelta_range('1 day', periods=3) - self.check_round_trip(df, fp, write_kwargs={'compression': None}) + self.check_round_trip(df, fp) @pytest.mark.skip(reason="not supported") def test_duplicate_columns(self, fp): @@ -449,8 +470,7 @@ def test_duplicate_columns(self, fp): def test_bool_with_none(self, fp): df = pd.DataFrame({'a': [True, None, False]}) expected = pd.DataFrame({'a': [1.0, np.nan, 0.0]}, dtype='float16') - self.check_round_trip(df, fp, expected=expected, - write_kwargs={'compression': None}) + self.check_round_trip(df, fp, expected=expected) def test_unsupported(self, fp): @@ -466,7 +486,7 @@ def test_categorical(self, fp): if LooseVersion(fastparquet.__version__) < LooseVersion("0.1.3"): pytest.skip("CategoricalDtype not supported for older fp") df = pd.DataFrame({'a': pd.Categorical(list('abc'))}) - self.check_round_trip(df, fp, write_kwargs={'compression': None}) + self.check_round_trip(df, fp) def test_datetime_tz(self, fp): # doesn't preserve tz @@ -475,8 +495,7 @@ def test_datetime_tz(self, fp): # warns on the coercion with catch_warnings(record=True): - self.check_round_trip(df, fp, df.astype('datetime64[ns]'), - write_kwargs={'compression': None}) + self.check_round_trip(df, fp, df.astype('datetime64[ns]')) def test_filter_row_groups(self, fp): d = {'a': list(range(0, 3))} @@ -486,3 +505,8 @@ def test_filter_row_groups(self, fp): row_group_offsets=1) result = read_parquet(path, fp, filters=[('a', '==', 0)]) assert len(result) == 1 + + def test_s3_roundtrip(self, df_compat, s3_resource, fp): + # GH #19134 + self.check_round_trip(df_compat, fp, + path='s3://pandas-test/fastparquet.parquet') diff --git a/pandas/tests/io/test_s3.py b/pandas/tests/io/test_s3.py index 8c2a32af33765..7a3062f470ce8 100644 --- a/pandas/tests/io/test_s3.py +++ b/pandas/tests/io/test_s3.py @@ -1,8 +1,8 @@ -from pandas.io.common import _is_s3_url +from pandas.io.common import is_s3_url class TestS3URL(object): def test_is_s3_url(self): - assert _is_s3_url("s3://pandas/somethingelse.com") - assert not _is_s3_url("s4://pandas/somethingelse.com") + assert is_s3_url("s3://pandas/somethingelse.com") + assert not is_s3_url("s4://pandas/somethingelse.com")
- [x] closes #19134 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` closes #19134
https://api.github.com/repos/pandas-dev/pandas/pulls/19135
2018-01-08T12:51:32Z
2018-01-18T00:49:18Z
2018-01-18T00:49:18Z
2018-01-18T09:38:20Z
DOC: Add documentation regarding no compression in to_parquet
diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 62993a3d168db..a8c4053850548 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -1670,8 +1670,8 @@ def to_parquet(self, fname, engine='auto', compression='snappy', Parquet reader library to use. If 'auto', then the option 'io.parquet.engine' is used. If 'auto', then the first library to be installed is used. - compression : str, optional, default 'snappy' - compression method, includes {'gzip', 'snappy', 'brotli'} + compression : {'snappy', 'gzip', 'brotli', 'None'} + Name of the compression to use. Use ``None`` for no compression kwargs Additional keyword arguments passed to the engine """ diff --git a/pandas/io/parquet.py b/pandas/io/parquet.py index e431c9447e8f8..0c88706a3bec2 100644 --- a/pandas/io/parquet.py +++ b/pandas/io/parquet.py @@ -218,8 +218,8 @@ def to_parquet(df, path, engine='auto', compression='snappy', **kwargs): Parquet reader library to use. If 'auto', then the option 'io.parquet.engine' is used. If 'auto', then the first library to be installed is used. - compression : str, optional, default 'snappy' - compression method, includes {'gzip', 'snappy', 'brotli'} + compression : {'snappy', 'gzip', 'brotli', 'None'} + Name of the compression to use. Use ``None`` for no compression kwargs Additional keyword arguments passed to the engine """
https://api.github.com/repos/pandas-dev/pandas/pulls/19131
2018-01-08T07:12:25Z
2018-01-08T19:32:10Z
2018-01-08T19:32:10Z
2018-01-09T04:35:25Z
Parametrized test_json_table_schema module
diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index 76748f30e639b..fc3790287d7da 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -88,82 +88,82 @@ def test_multiindex(self): class TestTableSchemaType(object): - def test_as_json_table_type_int_data(self): + @pytest.mark.parametrize('int_type', [ + np.int, np.int16, np.int32, np.int64]) + def test_as_json_table_type_int_data(self, int_type): int_data = [1, 2, 3] - int_types = [np.int, np.int16, np.int32, np.int64] - for t in int_types: - assert as_json_table_type(np.array( - int_data, dtype=t)) == 'integer' + assert as_json_table_type(np.array( + int_data, dtype=int_type)) == 'integer' - def test_as_json_table_type_float_data(self): + @pytest.mark.parametrize('float_type', [ + np.float, np.float16, np.float32, np.float64]) + def test_as_json_table_type_float_data(self, float_type): float_data = [1., 2., 3.] - float_types = [np.float, np.float16, np.float32, np.float64] - for t in float_types: - assert as_json_table_type(np.array( - float_data, dtype=t)) == 'number' + assert as_json_table_type(np.array( + float_data, dtype=float_type)) == 'number' - def test_as_json_table_type_bool_data(self): + @pytest.mark.parametrize('bool_type', [bool, np.bool]) + def test_as_json_table_type_bool_data(self, bool_type): bool_data = [True, False] - bool_types = [bool, np.bool] - for t in bool_types: - assert as_json_table_type(np.array( - bool_data, dtype=t)) == 'boolean' - - def test_as_json_table_type_date_data(self): - date_data = [pd.to_datetime(['2016']), - pd.to_datetime(['2016'], utc=True), - pd.Series(pd.to_datetime(['2016'])), - pd.Series(pd.to_datetime(['2016'], utc=True)), - pd.period_range('2016', freq='A', periods=3)] - for arr in date_data: - assert as_json_table_type(arr) == 'datetime' - - def test_as_json_table_type_string_data(self): - strings = [pd.Series(['a', 'b']), pd.Index(['a', 'b'])] - for t in strings: - assert as_json_table_type(t) == 'string' - - def test_as_json_table_type_categorical_data(self): - assert as_json_table_type(pd.Categorical(['a'])) == 'any' - assert as_json_table_type(pd.Categorical([1])) == 'any' - assert as_json_table_type(pd.Series(pd.Categorical([1]))) == 'any' - assert as_json_table_type(pd.CategoricalIndex([1])) == 'any' - assert as_json_table_type(pd.Categorical([1])) == 'any' + assert as_json_table_type(np.array( + bool_data, dtype=bool_type)) == 'boolean' + + @pytest.mark.parametrize('date_data', [ + pd.to_datetime(['2016']), + pd.to_datetime(['2016'], utc=True), + pd.Series(pd.to_datetime(['2016'])), + pd.Series(pd.to_datetime(['2016'], utc=True)), + pd.period_range('2016', freq='A', periods=3) + ]) + def test_as_json_table_type_date_data(self, date_data): + assert as_json_table_type(date_data) == 'datetime' + + @pytest.mark.parametrize('str_data', [ + pd.Series(['a', 'b']), pd.Index(['a', 'b'])]) + def test_as_json_table_type_string_data(self, str_data): + assert as_json_table_type(str_data) == 'string' + + @pytest.mark.parametrize('cat_data', [ + pd.Categorical(['a']), + pd.Categorical([1]), + pd.Series(pd.Categorical([1])), + pd.CategoricalIndex([1]), + pd.Categorical([1])]) + def test_as_json_table_type_categorical_data(self, cat_data): + assert as_json_table_type(cat_data) == 'any' # ------ # dtypes # ------ - def test_as_json_table_type_int_dtypes(self): - integers = [np.int, np.int16, np.int32, np.int64] - for t in integers: - assert as_json_table_type(t) == 'integer' - - def test_as_json_table_type_float_dtypes(self): - floats = [np.float, np.float16, np.float32, np.float64] - for t in floats: - assert as_json_table_type(t) == 'number' - - def test_as_json_table_type_bool_dtypes(self): - bools = [bool, np.bool] - for t in bools: - assert as_json_table_type(t) == 'boolean' - - def test_as_json_table_type_date_dtypes(self): + @pytest.mark.parametrize('int_dtype', [ + np.int, np.int16, np.int32, np.int64]) + def test_as_json_table_type_int_dtypes(self, int_dtype): + assert as_json_table_type(int_dtype) == 'integer' + + @pytest.mark.parametrize('float_dtype', [ + np.float, np.float16, np.float32, np.float64]) + def test_as_json_table_type_float_dtypes(self, float_dtype): + assert as_json_table_type(float_dtype) == 'number' + + @pytest.mark.parametrize('bool_dtype', [bool, np.bool]) + def test_as_json_table_type_bool_dtypes(self, bool_dtype): + assert as_json_table_type(bool_dtype) == 'boolean' + + @pytest.mark.parametrize('date_dtype', [ + np.datetime64, np.dtype("<M8[ns]"), PeriodDtype(), + DatetimeTZDtype('ns', 'US/Central')]) + def test_as_json_table_type_date_dtypes(self, date_dtype): # TODO: datedate.date? datetime.time? - dates = [np.datetime64, np.dtype("<M8[ns]"), PeriodDtype(), - DatetimeTZDtype('ns', 'US/Central')] - for t in dates: - assert as_json_table_type(t) == 'datetime' + assert as_json_table_type(date_dtype) == 'datetime' - def test_as_json_table_type_timedelta_dtypes(self): - durations = [np.timedelta64, np.dtype("<m8[ns]")] - for t in durations: - assert as_json_table_type(t) == 'duration' + @pytest.mark.parametrize('td_dtype', [ + np.timedelta64, np.dtype("<m8[ns]")]) + def test_as_json_table_type_timedelta_dtypes(self, td_dtype): + assert as_json_table_type(td_dtype) == 'duration' - def test_as_json_table_type_string_dtypes(self): - strings = [object] # TODO - for t in strings: - assert as_json_table_type(t) == 'string' + @pytest.mark.parametrize('str_dtype', [object]) # TODO + def test_as_json_table_type_string_dtypes(self, str_dtype): + assert as_json_table_type(str_dtype) == 'string' def test_as_json_table_type_categorical_dtypes(self): # TODO: I think before is_categorical_dtype(Categorical) @@ -336,61 +336,55 @@ def test_date_format_raises(self): self.df.to_json(orient='table', date_format='iso') self.df.to_json(orient='table') - def test_convert_pandas_type_to_json_field_int(self): + @pytest.mark.parametrize('kind', [pd.Series, pd.Index]) + def test_convert_pandas_type_to_json_field_int(self, kind): data = [1, 2, 3] - kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')] - for kind in kinds: - result = convert_pandas_type_to_json_field(kind) - expected = {"name": "name", "type": 'integer'} - assert result == expected + result = convert_pandas_type_to_json_field(kind(data, name='name')) + expected = {"name": "name", "type": "integer"} + assert result == expected - def test_convert_pandas_type_to_json_field_float(self): + @pytest.mark.parametrize('kind', [pd.Series, pd.Index]) + def test_convert_pandas_type_to_json_field_float(self, kind): data = [1., 2., 3.] - kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')] - for kind in kinds: - result = convert_pandas_type_to_json_field(kind) - expected = {"name": "name", "type": 'number'} - assert result == expected + result = convert_pandas_type_to_json_field(kind(data, name='name')) + expected = {"name": "name", "type": "number"} + assert result == expected - def test_convert_pandas_type_to_json_field_datetime(self): + @pytest.mark.parametrize('dt_args,extra_exp', [ + ({}, {}), ({'utc': True}, {'tz': 'UTC'})]) + @pytest.mark.parametrize('wrapper', [None, pd.Series]) + def test_convert_pandas_type_to_json_field_datetime(self, dt_args, + extra_exp, wrapper): data = [1., 2., 3.] - kinds = [pd.Series(pd.to_datetime(data), name='values'), - pd.to_datetime(data)] - for kind in kinds: - result = convert_pandas_type_to_json_field(kind) - expected = {"name": "values", "type": 'datetime'} - assert result == expected - - kinds = [pd.Series(pd.to_datetime(data, utc=True), name='values'), - pd.to_datetime(data, utc=True)] - for kind in kinds: - result = convert_pandas_type_to_json_field(kind) - expected = {"name": "values", "type": 'datetime', "tz": "UTC"} - assert result == expected + data = pd.to_datetime(data, **dt_args) + if wrapper is pd.Series: + data = pd.Series(data, name='values') + result = convert_pandas_type_to_json_field(data) + expected = {"name": "values", "type": 'datetime'} + expected.update(extra_exp) + assert result == expected + def test_convert_pandas_type_to_json_period_range(self): arr = pd.period_range('2016', freq='A-DEC', periods=4) result = convert_pandas_type_to_json_field(arr) expected = {"name": "values", "type": 'datetime', "freq": "A-DEC"} assert result == expected - def test_convert_pandas_type_to_json_field_categorical(self): + @pytest.mark.parametrize('kind', [pd.Categorical, pd.CategoricalIndex]) + @pytest.mark.parametrize('ordered', [True, False]) + def test_convert_pandas_type_to_json_field_categorical(self, kind, + ordered): data = ['a', 'b', 'c'] - ordereds = [True, False] - - for ordered in ordereds: - arr = pd.Series(pd.Categorical(data, ordered=ordered), name='cats') - result = convert_pandas_type_to_json_field(arr) - expected = {"name": "cats", "type": "any", - "constraints": {"enum": data}, - "ordered": ordered} - assert result == expected - - arr = pd.CategoricalIndex(data, ordered=ordered, name='cats') - result = convert_pandas_type_to_json_field(arr) - expected = {"name": "cats", "type": "any", - "constraints": {"enum": data}, - "ordered": ordered} - assert result == expected + if kind is pd.Categorical: + arr = pd.Series(kind(data, ordered=ordered), name='cats') + elif kind is pd.CategoricalIndex: + arr = kind(data, ordered=ordered, name='cats') + + result = convert_pandas_type_to_json_field(arr) + expected = {"name": "cats", "type": "any", + "constraints": {"enum": data}, + "ordered": ordered} + assert result == expected @pytest.mark.parametrize("inp,exp", [ ({'type': 'integer'}, 'int64'), @@ -440,35 +434,22 @@ def test_categorical(self): OrderedDict([('idx', 2), ('values', 'a')])])]) assert result == expected - def test_set_default_names_unset(self): - data = pd.Series(1, pd.Index([1])) - result = set_default_names(data) - assert result.index.name == 'index' - - def test_set_default_names_set(self): - data = pd.Series(1, pd.Index([1], name='myname')) - result = set_default_names(data) - assert result.index.name == 'myname' - - def test_set_default_names_mi_unset(self): - data = pd.Series( - 1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')])) - result = set_default_names(data) - assert result.index.names == ['level_0', 'level_1'] - - def test_set_default_names_mi_set(self): - data = pd.Series( - 1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')], - names=['n1', 'n2'])) - result = set_default_names(data) - assert result.index.names == ['n1', 'n2'] - - def test_set_default_names_mi_partion(self): - data = pd.Series( - 1, pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')], - names=['n1', None])) + @pytest.mark.parametrize('idx,nm,prop', [ + (pd.Index([1]), 'index', 'name'), + (pd.Index([1], name='myname'), 'myname', 'name'), + (pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')]), + ['level_0', 'level_1'], 'names'), + (pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')], + names=['n1', 'n2']), + ['n1', 'n2'], 'names'), + (pd.MultiIndex.from_product([('a', 'b'), ('c', 'd')], + names=['n1', None]), + ['n1', 'level_1'], 'names') + ]) + def test_set_names_unset(self, idx, nm, prop): + data = pd.Series(1, idx) result = set_default_names(data) - assert result.index.names == ['n1', 'level_1'] + assert getattr(result.index, prop) == nm def test_timestamp_in_columns(self): df = pd.DataFrame([[1, 2]], columns=[pd.Timestamp('2016'), @@ -478,20 +459,15 @@ def test_timestamp_in_columns(self): assert js['schema']['fields'][1]['name'] == 1451606400000 assert js['schema']['fields'][2]['name'] == 10000 - def test_overlapping_names(self): - cases = [ - pd.Series([1], index=pd.Index([1], name='a'), name='a'), - pd.DataFrame({"A": [1]}, index=pd.Index([1], name="A")), - pd.DataFrame({"A": [1]}, index=pd.MultiIndex.from_arrays([ - ['a'], [1] - ], names=["A", "a"])), - ] - - for data in cases: - with pytest.raises(ValueError) as excinfo: - data.to_json(orient='table') - - assert 'Overlapping' in str(excinfo.value) + @pytest.mark.parametrize('case', [ + pd.Series([1], index=pd.Index([1], name='a'), name='a'), + pd.DataFrame({"A": [1]}, index=pd.Index([1], name="A")), + pd.DataFrame({"A": [1]}, index=pd.MultiIndex.from_arrays([ + ['a'], [1]], names=["A", "a"])) + ]) + def test_overlapping_names(self, case): + with tm.assert_raises_regex(ValueError, 'Overlapping'): + case.to_json(orient='table') def test_mi_falsey_name(self): # GH 16203
- [X] closes #19070 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Could potentially consolidate all of the ``dtype`` tests further but didn't want to rock the boat too much
https://api.github.com/repos/pandas-dev/pandas/pulls/19128
2018-01-08T04:09:37Z
2018-01-08T19:30:13Z
2018-01-08T19:30:13Z
2018-02-27T01:32:15Z
TST: Add test for setting with loc on DataFrame with one row
diff --git a/pandas/tests/indexing/test_loc.py b/pandas/tests/indexing/test_loc.py index fb5f094f9462b..433b0d87ac005 100644 --- a/pandas/tests/indexing/test_loc.py +++ b/pandas/tests/indexing/test_loc.py @@ -420,6 +420,13 @@ def test_loc_setitem_consistency(self): df.loc[:, 'date'] = 1.0 tm.assert_frame_equal(df, expected) + # GH 15494 + # setting on frame with single row + df = DataFrame({'date': Series([Timestamp('20180101')])}) + df.loc[:, 'date'] = 'string' + expected = DataFrame({'date': Series(['string'])}) + tm.assert_frame_equal(df, expected) + def test_loc_setitem_consistency_empty(self): # empty (essentially noops) expected = DataFrame(columns=['x', 'y'])
- [X] closes #15494 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` Issue appears to be have been fixed for a while, since 0.21.x at least, just added a test to ensure there's no regression.
https://api.github.com/repos/pandas-dev/pandas/pulls/19127
2018-01-08T02:57:48Z
2018-01-10T00:17:29Z
2018-01-10T00:17:29Z
2018-01-15T18:26:24Z
Fix and test TimedeltaIndex.__rfloordiv__ bug
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 07e633ded942a..d7a3f0d077302 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -376,7 +376,8 @@ Conversion - Bug in :class:`Series` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` could return a ``Series`` with an incorrect name (issue:`19043`) - Fixed bug where comparing :class:`DatetimeIndex` failed to raise ``TypeError`` when attempting to compare timezone-aware and timezone-naive datetimelike objects (:issue:`18162`) - Bug in :class:`DatetimeIndex` where the repr was not showing high-precision time values at the end of a day (e.g., 23:59:59.999999999) (:issue:`19030`) - +- Bug where dividing a scalar timedelta-like object with :class:`TimedeltaIndex` performed the reciprocal operation (:issue:`19125`) +- Indexing ^^^^^^^^ diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 55a26d57fa1d6..f634d809560ee 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -3861,7 +3861,7 @@ def dropna(self, how='any'): return self._shallow_copy(self.values[~self._isnan]) return self._shallow_copy() - def _evaluate_with_timedelta_like(self, other, op, opstr): + def _evaluate_with_timedelta_like(self, other, op, opstr, reversed=False): raise TypeError("can only perform ops with timedelta like values") def _evaluate_with_datetime_like(self, other, op, opstr): @@ -4025,7 +4025,8 @@ def _evaluate_numeric_binop(self, other): # handle time-based others if isinstance(other, (ABCDateOffset, np.timedelta64, Timedelta, datetime.timedelta)): - return self._evaluate_with_timedelta_like(other, op, opstr) + return self._evaluate_with_timedelta_like(other, op, opstr, + reversed) elif isinstance(other, (Timestamp, np.datetime64)): return self._evaluate_with_datetime_like(other, op, opstr) diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index ccedaa9cf71ee..12ca26cfe0266 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -372,7 +372,7 @@ def _add_delta(self, delta): result = TimedeltaIndex(new_values, freq='infer', name=name) return result - def _evaluate_with_timedelta_like(self, other, op, opstr): + def _evaluate_with_timedelta_like(self, other, op, opstr, reversed=False): if isinstance(other, ABCSeries): # GH#19042 return NotImplemented @@ -386,10 +386,14 @@ def _evaluate_with_timedelta_like(self, other, op, opstr): "division by pd.NaT not implemented") i8 = self.asi8 + left, right = i8, other.value + if reversed: + left, right = right, left + if opstr in ['__floordiv__']: - result = i8 // other.value + result = left // right else: - result = op(i8, float(other.value)) + result = op(left, float(right)) result = self._maybe_mask_results(result, convert='float64') return Index(result, name=self.name, copy=False) @@ -972,6 +976,7 @@ def _is_convertible_to_index(other): def _is_convertible_to_td(key): + # TODO: Not all DateOffset objects are convertible to Timedelta return isinstance(key, (DateOffset, timedelta, Timedelta, np.timedelta64, compat.string_types)) diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 2581a8fad078a..3ec918e391860 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -286,6 +286,23 @@ def test_tdi_radd_timestamp(self): # ------------------------------------------------------------- + @pytest.mark.parametrize('scalar_td', [ + timedelta(minutes=10, seconds=7), + Timedelta('10m7s'), + Timedelta('10m7s').to_timedelta64()]) + def test_tdi_floordiv_timedelta_scalar(self, scalar_td): + # GH#19125 + tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None) + expected = pd.Index([2.0, 2.0, np.nan]) + + res = tdi.__rfloordiv__(scalar_td) + tm.assert_index_equal(res, expected) + + expected = pd.Index([0.0, 0.0, np.nan]) + + res = tdi // (scalar_td) + tm.assert_index_equal(res, expected) + # TODO: Split by operation, better name def test_ops_compat(self):
I thought we were ready to get rid of _TimeOp, but doing so turned up a couple of new bugs. Setup: ``` scalar_td = pd.Timedelta('10m7s') tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None) ``` Before: ``` >>> tdi.__rfloordiv__(scalar_td) Float64Index([0.0, 0.0, nan], dtype='float64') ``` After: ``` >>> tdi.__rfloordiv__(scalar_td) Float64Index([2.0, 2.0, nan], dtype='float64') ``` - [ ] closes #xxxx - [x] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19125
2018-01-07T20:24:11Z
2018-01-07T22:22:33Z
2018-01-07T22:22:33Z
2018-01-07T22:26:39Z
BUG: Avoid using realpath to make list_future_warnings.sh compatible on macosx
diff --git a/scripts/list_future_warnings.sh b/scripts/list_future_warnings.sh index e100f305267c1..0c4046bbb5f49 100755 --- a/scripts/list_future_warnings.sh +++ b/scripts/list_future_warnings.sh @@ -28,7 +28,7 @@ EXCLUDE+="^pandas/util/_depr_module.py$|" # generic deprecate module that raise EXCLUDE+="^pandas/util/testing.py$|" # contains function to evaluate if warning is raised EXCLUDE+="^pandas/io/parsers.py$" # implements generic deprecation system in io reading -BASE_DIR="$(dirname $(dirname $(realpath $0)))" +BASE_DIR="$(dirname $0)/.." cd $BASE_DIR FILES=`grep -RIl "FutureWarning" pandas/* | grep -vE "$EXCLUDE"` OUTPUT=()
…with Mac - [ ] closes #xxxx - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19122
2018-01-07T17:02:52Z
2018-01-07T22:14:15Z
null
2018-01-07T22:14:23Z
DEPR: Removing previously deprecated flavor parameter from SQLiteData…
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 07e633ded942a..11ac1a37e05a0 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -314,6 +314,7 @@ Removal of prior version deprecations/changes - The ``Panel4D`` and ``PanelND`` classes have been removed (:issue:`13776`) - The ``Panel``class has dropped the ``to_long``and ``toLong`` methods (:issue:`19077`) - The options ``display.line_with`` and ``display.height`` are removed in favor of ``display.width`` and ``display.max_rows`` respectively (:issue:`4391`, :issue:`19107`) +- The ``flavor`` parameter have been removed from func:`to_sql` method (:issue:`13611`) .. _whatsnew_0230.performance: diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c9672a43a95a8..cef1e551f948e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1842,8 +1842,8 @@ def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs): return packers.to_msgpack(path_or_buf, self, encoding=encoding, **kwargs) - def to_sql(self, name, con, flavor=None, schema=None, if_exists='fail', - index=True, index_label=None, chunksize=None, dtype=None): + def to_sql(self, name, con, schema=None, if_exists='fail', index=True, + index_label=None, chunksize=None, dtype=None): """ Write records stored in a DataFrame to a SQL database. @@ -1854,10 +1854,6 @@ def to_sql(self, name, con, flavor=None, schema=None, if_exists='fail', con : SQLAlchemy engine or DBAPI2 connection (legacy mode) Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. - flavor : 'sqlite', default None - .. deprecated:: 0.19.0 - 'sqlite' is the only supported option if SQLAlchemy is not - used. schema : string, default None Specify the schema (if database flavor supports this). If None, use default schema. @@ -1880,9 +1876,9 @@ def to_sql(self, name, con, flavor=None, schema=None, if_exists='fail', """ from pandas.io import sql - sql.to_sql(self, name, con, flavor=flavor, schema=schema, - if_exists=if_exists, index=index, index_label=index_label, - chunksize=chunksize, dtype=dtype) + sql.to_sql(self, name, con, schema=schema, if_exists=if_exists, + index=index, index_label=index_label, chunksize=chunksize, + dtype=dtype) def to_pickle(self, path, compression='infer', protocol=pkl.HIGHEST_PROTOCOL): diff --git a/pandas/io/sql.py b/pandas/io/sql.py index e2f3033c580a5..437e279e90979 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -41,24 +41,6 @@ class DatabaseError(IOError): _SQLALCHEMY_INSTALLED = None -def _validate_flavor_parameter(flavor): - """ - Checks whether a database 'flavor' was specified. - If not None, produces FutureWarning if 'sqlite' and - raises a ValueError if anything else. - """ - if flavor is not None: - if flavor == 'sqlite': - warnings.warn("the 'flavor' parameter is deprecated " - "and will be removed in a future version, " - "as 'sqlite' is the only supported option " - "when SQLAlchemy is not installed.", - FutureWarning, stacklevel=2) - else: - raise ValueError("database flavor {flavor} is not " - "supported".format(flavor=flavor)) - - def _is_sqlalchemy_connectable(con): global _SQLALCHEMY_INSTALLED if _SQLALCHEMY_INSTALLED is None: @@ -415,8 +397,8 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None, chunksize=chunksize) -def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail', - index=True, index_label=None, chunksize=None, dtype=None): +def to_sql(frame, name, con, schema=None, if_exists='fail', index=True, + index_label=None, chunksize=None, dtype=None): """ Write records stored in a DataFrame to a SQL database. @@ -430,10 +412,6 @@ def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail', Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. - flavor : 'sqlite', default None - .. deprecated:: 0.19.0 - 'sqlite' is the only supported option if SQLAlchemy is not - used. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). @@ -459,7 +437,7 @@ def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail', if if_exists not in ('fail', 'replace', 'append'): raise ValueError("'{0}' is not valid for if_exists".format(if_exists)) - pandas_sql = pandasSQL_builder(con, schema=schema, flavor=flavor) + pandas_sql = pandasSQL_builder(con, schema=schema) if isinstance(frame, Series): frame = frame.to_frame() @@ -472,7 +450,7 @@ def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail', chunksize=chunksize, dtype=dtype) -def has_table(table_name, con, flavor=None, schema=None): +def has_table(table_name, con, schema=None): """ Check if DataBase has named table. @@ -484,10 +462,6 @@ def has_table(table_name, con, flavor=None, schema=None): Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. - flavor : 'sqlite', default None - .. deprecated:: 0.19.0 - 'sqlite' is the only supported option if SQLAlchemy is not - installed. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). @@ -496,7 +470,7 @@ def has_table(table_name, con, flavor=None, schema=None): ------- boolean """ - pandas_sql = pandasSQL_builder(con, flavor=flavor, schema=schema) + pandas_sql = pandasSQL_builder(con, schema=schema) return pandas_sql.has_table(table_name) @@ -521,14 +495,12 @@ def _engine_builder(con): return con -def pandasSQL_builder(con, flavor=None, schema=None, meta=None, +def pandasSQL_builder(con, schema=None, meta=None, is_cursor=False): """ Convenience function to return the correct PandasSQL subclass based on the provided parameters. """ - _validate_flavor_parameter(flavor) - # When support for DBAPI connections is removed, # is_cursor should not be necessary. con = _engine_builder(con) @@ -1378,9 +1350,7 @@ class SQLiteDatabase(PandasSQL): """ - def __init__(self, con, flavor=None, is_cursor=False): - _validate_flavor_parameter(flavor) - + def __init__(self, con, is_cursor=False): self.is_cursor = is_cursor self.con = con @@ -1534,7 +1504,7 @@ def _create_sql_schema(self, frame, table_name, keys=None, dtype=None): return str(table.sql_schema()) -def get_schema(frame, name, flavor=None, keys=None, con=None, dtype=None): +def get_schema(frame, name, keys=None, con=None, dtype=None): """ Get the SQL db table schema for the given frame. @@ -1549,15 +1519,11 @@ def get_schema(frame, name, flavor=None, keys=None, con=None, dtype=None): Using SQLAlchemy makes it possible to use any DB supported by that library, default: None If a DBAPI2 object, only sqlite3 is supported. - flavor : 'sqlite', default None - .. deprecated:: 0.19.0 - 'sqlite' is the only supported option if SQLAlchemy is not - installed. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. """ - pandas_sql = pandasSQL_builder(con=con, flavor=flavor) + pandas_sql = pandasSQL_builder(con=con) return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype) diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 217fc8e67483d..0cc4101cd6304 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -2333,31 +2333,6 @@ def clean_up(test_table_to_drop): clean_up(table_name) -@pytest.mark.single -class TestSQLFlavorDeprecation(object): - """ - gh-13611: test that the 'flavor' parameter - is appropriately deprecated by checking the - functions that directly raise the warning - """ - - con = 1234 # don't need real connection for this - funcs = ['SQLiteDatabase', 'pandasSQL_builder'] - - def test_unsupported_flavor(self): - msg = 'is not supported' - - for func in self.funcs: - tm.assert_raises_regex(ValueError, msg, getattr(sql, func), - self.con, flavor='mysql') - - def test_deprecated_flavor(self): - for func in self.funcs: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - getattr(sql, func)(self.con, flavor='sqlite') - - @pytest.mark.single @pytest.mark.skip(reason="gh-13611: there is no support for MySQL " "if SQLAlchemy is not installed")
…base (#6581) - [X] related to #6581 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19121
2018-01-07T16:02:53Z
2018-01-10T12:58:38Z
2018-01-10T12:58:38Z
2018-01-10T12:58:40Z
DEPR: Removing previously deprecated Categorical.labels (#6581)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index ca48b5525369b..45c259a1cdf0b 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -314,6 +314,7 @@ Removal of prior version deprecations/changes - The ``Panel4D`` and ``PanelND`` classes have been removed (:issue:`13776`) - The ``Panel``class has dropped the ``to_long``and ``toLong`` methods (:issue:`19077`) - The options ``display.line_with`` and ``display.height`` are removed in favor of ``display.width`` and ``display.max_rows`` respectively (:issue:`4391`, :issue:`19107`) +- The ``labels`` attribute of the ``Categorical`` class has been removed in favor of :attribute:`Categorical.codes` (:issue:`7768`) - The ``flavor`` parameter have been removed from func:`to_sql` method (:issue:`13611`) .. _whatsnew_0230.performance: diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 0880b8668ee77..92fcdc0f4625b 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -590,19 +590,6 @@ def _set_codes(self, codes): codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc) - def _get_labels(self): - """ - Get the category labels (deprecated). - - .. deprecated:: 0.15.0 - Use `.codes()` instead. - """ - warn("'labels' is deprecated. Use 'codes' instead", FutureWarning, - stacklevel=2) - return self.codes - - labels = property(fget=_get_labels, fset=_set_codes) - def _set_categories(self, categories, fastpath=False): """ Sets new categories inplace diff --git a/pandas/tests/categorical/test_api.py b/pandas/tests/categorical/test_api.py index 12db4a9bea28b..0af2857091b74 100644 --- a/pandas/tests/categorical/test_api.py +++ b/pandas/tests/categorical/test_api.py @@ -400,15 +400,6 @@ def test_remove_unused_categories(self): out = cat.remove_unused_categories() assert out.get_values().tolist() == val.tolist() - def test_deprecated_labels(self): - # TODO: labels is deprecated and should be removed in 0.18 or 2017, - # whatever is earlier - cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3]) - exp = cat.codes - with tm.assert_produces_warning(FutureWarning): - res = cat.labels - tm.assert_numpy_array_equal(res, exp) - class TestCategoricalAPIWithFactor(TestCategorical):
- [X] related to #6581 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19120
2018-01-07T15:58:56Z
2018-01-10T13:00:26Z
2018-01-10T13:00:25Z
2018-01-10T13:00:28Z
DEPR: Removing previously deprecated datetools module (#6581)
diff --git a/doc/source/whatsnew/v0.24.0.txt b/doc/source/whatsnew/v0.24.0.txt index 42e286f487a7d..2d0bd9aa0de39 100644 --- a/doc/source/whatsnew/v0.24.0.txt +++ b/doc/source/whatsnew/v0.24.0.txt @@ -391,7 +391,7 @@ Removal of prior version deprecations/changes - The ``LongPanel`` and ``WidePanel`` classes have been removed (:issue:`10892`) - Several private functions were removed from the (non-public) module ``pandas.core.common`` (:issue:`22001`) -- +- Removal of the previously deprecated module ``pandas.core.datetools`` (:issue:`14105`, :issue:`14094`) - .. _whatsnew_0240.performance: diff --git a/pandas/core/api.py b/pandas/core/api.py index 92586235df93c..32df317a602a9 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -32,17 +32,6 @@ from pandas.core.tools.datetimes import to_datetime from pandas.core.tools.timedeltas import to_timedelta -# see gh-14094. -from pandas.util._depr_module import _DeprecatedModule - -_removals = ['day', 'bday', 'businessDay', 'cday', 'customBusinessDay', - 'customBusinessMonthEnd', 'customBusinessMonthBegin', - 'monthEnd', 'yearEnd', 'yearBegin', 'bmonthEnd', 'bmonthBegin', - 'cbmonthEnd', 'cbmonthBegin', 'bquarterEnd', 'quarterEnd', - 'byearEnd', 'week'] -datetools = _DeprecatedModule(deprmod='pandas.core.datetools', - removals=_removals) - from pandas.core.config import (get_option, set_option, reset_option, describe_option, option_context, options) diff --git a/pandas/core/datetools.py b/pandas/core/datetools.py deleted file mode 100644 index 83167a45369c4..0000000000000 --- a/pandas/core/datetools.py +++ /dev/null @@ -1,55 +0,0 @@ -"""A collection of random tools for dealing with dates in Python. - -.. deprecated:: 0.19.0 - Use pandas.tseries module instead. -""" - -# flake8: noqa - -import warnings - -from pandas.core.tools.datetimes import * -from pandas.tseries.offsets import * -from pandas.tseries.frequencies import * - -warnings.warn("The pandas.core.datetools module is deprecated and will be " - "removed in a future version. Please use the pandas.tseries " - "module instead.", FutureWarning, stacklevel=2) - -day = DateOffset() -bday = BDay() -businessDay = bday -try: - cday = CDay() - customBusinessDay = CustomBusinessDay() - customBusinessMonthEnd = CBMonthEnd() - customBusinessMonthBegin = CBMonthBegin() -except NotImplementedError: - cday = None - customBusinessDay = None - customBusinessMonthEnd = None - customBusinessMonthBegin = None -monthEnd = MonthEnd() -yearEnd = YearEnd() -yearBegin = YearBegin() -bmonthEnd = BMonthEnd() -bmonthBegin = BMonthBegin() -cbmonthEnd = customBusinessMonthEnd -cbmonthBegin = customBusinessMonthBegin -bquarterEnd = BQuarterEnd() -quarterEnd = QuarterEnd() -byearEnd = BYearEnd() -week = Week() - -# Functions/offsets to roll dates forward -thisMonthEnd = MonthEnd(0) -thisBMonthEnd = BMonthEnd(0) -thisYearEnd = YearEnd(0) -thisYearBegin = YearBegin(0) -thisBQuarterEnd = BQuarterEnd(0) -thisQuarterEnd = QuarterEnd(0) - -# Functions to check where a date lies -isBusinessDay = BDay().onOffset -isMonthEnd = MonthEnd().onOffset -isBMonthEnd = BMonthEnd().onOffset diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index ddee4894456ea..18d3fbd419c08 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -35,7 +35,7 @@ class TestPDApi(Base): 'util', 'options', 'io'] # these are already deprecated; awaiting removal - deprecated_modules = ['datetools', 'parser', 'json', 'lib', 'tslib'] + deprecated_modules = ['parser', 'json', 'lib', 'tslib'] # misc misc = ['IndexSlice', 'NaT'] @@ -127,19 +127,6 @@ def test_testing(self): self.check(testing, self.funcs) -class TestDatetoolsDeprecation(object): - - def test_deprecation_access_func(self): - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - pd.datetools.to_datetime('2016-01-01') - - def test_deprecation_access_obj(self): - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - pd.datetools.monthEnd - - class TestTopLevelDeprecations(object): # top-level API deprecations
- [X] related to #6581 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19119
2018-01-07T15:55:41Z
2018-08-02T10:45:04Z
2018-08-02T10:45:04Z
2018-08-02T10:45:09Z
DEPR: Changing default of str.extract(expand=False) to str.extract(expand=True)
diff --git a/doc/source/text.rst b/doc/source/text.rst index 2b6459b581c1e..1e620acb1f88a 100644 --- a/doc/source/text.rst +++ b/doc/source/text.rst @@ -218,7 +218,8 @@ Extract first match in each subject (extract) ``DataFrame``, depending on the subject and regular expression pattern (same behavior as pre-0.18.0). When ``expand=True`` it always returns a ``DataFrame``, which is more consistent and less - confusing from the perspective of a user. + confusing from the perspective of a user. ``expand=True`` is the + default since version 0.23.0. The ``extract`` method accepts a `regular expression <https://docs.python.org/3/library/re.html>`__ with at least one diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 69965f44d87a8..0ac27a2f23386 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -296,6 +296,53 @@ Build Changes - Building from source now explicitly requires ``setuptools`` in ``setup.py`` (:issue:`18113`) - Updated conda recipe to be in compliance with conda-build 3.0+ (:issue:`18002`) +Extraction of matching patterns from strings +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +By default, extracting matching patterns from strings with :func:`str.extract` used to return a +``Series`` if a single group was being extracted (a ``DataFrame`` if more than one group was +extracted``). As of Pandas 0.23.0 :func:`str.extract` always returns a ``DataFrame``, unless +``expand`` is set to ``False`` (:issue:`11386`). + +Also, ``None`` was an accepted value for the ``expand`` parameter (which was equivalent to +``False``), but now raises a ``ValueError``. + +Previous Behavior: + +.. code-block:: ipython + + In [1]: s = pd.Series(['number 10', '12 eggs']) + + In [2]: extracted = s.str.extract('.*(\d\d).*') + + In [3]: extracted + Out [3]: + 0 10 + 1 12 + dtype: object + + In [4]: type(extracted) + Out [4]: + pandas.core.series.Series + +New Behavior: + +.. ipython:: python + + s = pd.Series(['number 10', '12 eggs']) + extracted = s.str.extract('.*(\d\d).*') + extracted + type(extracted) + +To restore previous behavior, simply set ``expand`` to ``False``: + +.. ipython:: python + + s = pd.Series(['number 10', '12 eggs']) + extracted = s.str.extract('.*(\d\d).*', expand=False) + extracted + type(extracted) + .. _whatsnew_0230.api: Other API Changes diff --git a/pandas/core/strings.py b/pandas/core/strings.py index 12c7feb5f2b15..b1c1ede66236c 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -598,7 +598,7 @@ def _str_extract_frame(arr, pat, flags=0): dtype=object) -def str_extract(arr, pat, flags=0, expand=None): +def str_extract(arr, pat, flags=0, expand=True): r""" For each subject string in the Series, extract groups from the first match of regular expression pat. @@ -610,7 +610,7 @@ def str_extract(arr, pat, flags=0, expand=None): flags : int, default 0 (no flags) re module flags, e.g. re.IGNORECASE - expand : bool, default False + expand : bool, default True * If True, return DataFrame. * If False, return Series/Index/DataFrame. @@ -676,15 +676,6 @@ def str_extract(arr, pat, flags=0, expand=None): dtype: object """ - if expand is None: - warnings.warn( - "currently extract(expand=None) " + - "means expand=False (return Index/Series/DataFrame) " + - "but in a future version of pandas this will be changed " + - "to expand=True (return DataFrame)", - FutureWarning, - stacklevel=3) - expand = False if not isinstance(expand, bool): raise ValueError("expand must be True or False") if expand: @@ -1739,7 +1730,7 @@ def translate(self, table, deletechars=None): findall = _pat_wrapper(str_findall, flags=True) @copy(str_extract) - def extract(self, pat, flags=0, expand=None): + def extract(self, pat, flags=0, expand=True): return str_extract(self, pat, flags=flags, expand=expand) @copy(str_extractall) diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 973fe74429551..178c5ff655b04 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -612,13 +612,16 @@ def test_match(self): def test_extract_expand_None(self): values = Series(['fooBAD__barBAD', NA, 'foo']) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_raises_regex(ValueError, + 'expand must be True or False'): values.str.extract('.*(BAD[_]+).*(BAD)', expand=None) def test_extract_expand_unspecified(self): values = Series(['fooBAD__barBAD', NA, 'foo']) - with tm.assert_produces_warning(FutureWarning): - values.str.extract('.*(BAD[_]+).*(BAD)') + result_unspecified = values.str.extract('.*(BAD[_]+).*') + assert isinstance(result_unspecified, DataFrame) + result_true = values.str.extract('.*(BAD[_]+).*', expand=True) + tm.assert_frame_equal(result_unspecified, result_true) def test_extract_expand_False(self): # Contains tests like those in test_match and some others.
…o str.extract(expand=True) (#6581) - [X] related to #6581 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19118
2018-01-07T15:52:01Z
2018-02-05T08:39:43Z
2018-02-05T08:39:43Z
2018-02-05T11:02:29Z
DEPR: Adding script to detect deprecated features by version (#6581)
diff --git a/scripts/list_future_warnings.sh b/scripts/list_future_warnings.sh new file mode 100755 index 0000000000000..e100f305267c1 --- /dev/null +++ b/scripts/list_future_warnings.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# Check all future warnings in Python files, and report them with the version +# where the FutureWarning was added. +# +# This is useful to detect features that have been deprecated, and should be +# removed from the code. For example, if a line of code contains: +# +# warning.warn('Method deprecated', FutureWarning, stacklevel=2) +# +# Which is released in Pandas 0.20.0, then it is expected that the method +# is removed before releasing Pandas 0.24.0, including the warning. If it +# is not, this script will list this line, with the version 0.20.0, which +# will make it easy to detect that it had to be removed. +# +# In some cases this script can return false positives, for example in files +# where FutureWarning is used to detect deprecations, or similar. The EXCLUDE +# variable can be used to ignore files that use FutureWarning, but do not +# deprecate functionality. +# +# Usage: +# +# $ ./list_future_warnings.sh + +EXCLUDE="^pandas/tests/|" # tests validate that FutureWarnings are raised +EXCLUDE+="^pandas/util/_decorators.py$|" # generic deprecate function that raises warning +EXCLUDE+="^pandas/util/_depr_module.py$|" # generic deprecate module that raises warnings +EXCLUDE+="^pandas/util/testing.py$|" # contains function to evaluate if warning is raised +EXCLUDE+="^pandas/io/parsers.py$" # implements generic deprecation system in io reading + +BASE_DIR="$(dirname $(dirname $(realpath $0)))" +cd $BASE_DIR +FILES=`grep -RIl "FutureWarning" pandas/* | grep -vE "$EXCLUDE"` +OUTPUT=() +IFS=$'\n' + +for FILE in $FILES; do + FILE_LINES=`git blame -sf $FILE | grep FutureWarning | tr -s " " | cut -d " " -f1,3` + for FILE_LINE in $FILE_LINES; do + TAG=$(git tag --contains $(echo $FILE_LINE | cut -d" " -f1) | head -n1) + OUTPUT_ROW=`printf "%-14s %-16s %s" ${TAG:-"(not released)"} $FILE_LINE $FILE` + OUTPUT+=($OUTPUT_ROW) + done +done + +printf "%s\n" "${OUTPUT[@]}" | sort -V
- [ ] closes #xxxx - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19117
2018-01-07T15:44:01Z
2018-01-07T16:09:51Z
2018-01-07T16:09:51Z
2018-01-07T16:21:19Z
DEPR: removing deprecated stuff: Categorical.labels, pandas.core.date…
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index b1efd0dcb43e2..d147d18b22ccd 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -314,6 +314,10 @@ Removal of prior version deprecations/changes - The ``Panel4D`` and ``PanelND`` classes have been removed (:issue:`13776`) - The ``Panel``class has dropped the ``to_long``and ``toLong`` methods (:issue:`19077`) - The options ``display.line_with`` and ``display.height`` are removed in favor of ``display.width`` and ``display.max_rows`` respectively (:issue:`4391`, :issue:`19107`) +- The ``labels`` attribute of the ``Categorical`` class has been drop in favor of ``codes`` +- Removal of the previously deprecated module ``pandas.core.datetools`` (:issue:`14105`, :issue:`14094`) +- The ``flavor`` parameter have been removed from ``SQLiteDatabase`` and :func:`to_sql` method +- The ``expand`` parameter of :func:`str.extract` method is ``True`` by default (it was `False`) .. _whatsnew_0230.performance: diff --git a/pandas/core/api.py b/pandas/core/api.py index b228a97c99074..4c05df1e9fbb8 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -32,17 +32,6 @@ from pandas.core.tools.datetimes import to_datetime from pandas.core.tools.timedeltas import to_timedelta -# see gh-14094. -from pandas.util._depr_module import _DeprecatedModule - -_removals = ['day', 'bday', 'businessDay', 'cday', 'customBusinessDay', - 'customBusinessMonthEnd', 'customBusinessMonthBegin', - 'monthEnd', 'yearEnd', 'yearBegin', 'bmonthEnd', 'bmonthBegin', - 'cbmonthEnd', 'cbmonthBegin', 'bquarterEnd', 'quarterEnd', - 'byearEnd', 'week'] -datetools = _DeprecatedModule(deprmod='pandas.core.datetools', - removals=_removals) - from pandas.core.config import (get_option, set_option, reset_option, describe_option, option_context, options) diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py index 0880b8668ee77..92fcdc0f4625b 100644 --- a/pandas/core/categorical.py +++ b/pandas/core/categorical.py @@ -590,19 +590,6 @@ def _set_codes(self, codes): codes = property(fget=_get_codes, fset=_set_codes, doc=_codes_doc) - def _get_labels(self): - """ - Get the category labels (deprecated). - - .. deprecated:: 0.15.0 - Use `.codes()` instead. - """ - warn("'labels' is deprecated. Use 'codes' instead", FutureWarning, - stacklevel=2) - return self.codes - - labels = property(fget=_get_labels, fset=_set_codes) - def _set_categories(self, categories, fastpath=False): """ Sets new categories inplace diff --git a/pandas/core/datetools.py b/pandas/core/datetools.py deleted file mode 100644 index 83167a45369c4..0000000000000 --- a/pandas/core/datetools.py +++ /dev/null @@ -1,55 +0,0 @@ -"""A collection of random tools for dealing with dates in Python. - -.. deprecated:: 0.19.0 - Use pandas.tseries module instead. -""" - -# flake8: noqa - -import warnings - -from pandas.core.tools.datetimes import * -from pandas.tseries.offsets import * -from pandas.tseries.frequencies import * - -warnings.warn("The pandas.core.datetools module is deprecated and will be " - "removed in a future version. Please use the pandas.tseries " - "module instead.", FutureWarning, stacklevel=2) - -day = DateOffset() -bday = BDay() -businessDay = bday -try: - cday = CDay() - customBusinessDay = CustomBusinessDay() - customBusinessMonthEnd = CBMonthEnd() - customBusinessMonthBegin = CBMonthBegin() -except NotImplementedError: - cday = None - customBusinessDay = None - customBusinessMonthEnd = None - customBusinessMonthBegin = None -monthEnd = MonthEnd() -yearEnd = YearEnd() -yearBegin = YearBegin() -bmonthEnd = BMonthEnd() -bmonthBegin = BMonthBegin() -cbmonthEnd = customBusinessMonthEnd -cbmonthBegin = customBusinessMonthBegin -bquarterEnd = BQuarterEnd() -quarterEnd = QuarterEnd() -byearEnd = BYearEnd() -week = Week() - -# Functions/offsets to roll dates forward -thisMonthEnd = MonthEnd(0) -thisBMonthEnd = BMonthEnd(0) -thisYearEnd = YearEnd(0) -thisYearBegin = YearBegin(0) -thisBQuarterEnd = BQuarterEnd(0) -thisQuarterEnd = QuarterEnd(0) - -# Functions to check where a date lies -isBusinessDay = BDay().onOffset -isMonthEnd = MonthEnd().onOffset -isBMonthEnd = BMonthEnd().onOffset diff --git a/pandas/core/generic.py b/pandas/core/generic.py index c9672a43a95a8..cef1e551f948e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -1842,8 +1842,8 @@ def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs): return packers.to_msgpack(path_or_buf, self, encoding=encoding, **kwargs) - def to_sql(self, name, con, flavor=None, schema=None, if_exists='fail', - index=True, index_label=None, chunksize=None, dtype=None): + def to_sql(self, name, con, schema=None, if_exists='fail', index=True, + index_label=None, chunksize=None, dtype=None): """ Write records stored in a DataFrame to a SQL database. @@ -1854,10 +1854,6 @@ def to_sql(self, name, con, flavor=None, schema=None, if_exists='fail', con : SQLAlchemy engine or DBAPI2 connection (legacy mode) Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. - flavor : 'sqlite', default None - .. deprecated:: 0.19.0 - 'sqlite' is the only supported option if SQLAlchemy is not - used. schema : string, default None Specify the schema (if database flavor supports this). If None, use default schema. @@ -1880,9 +1876,9 @@ def to_sql(self, name, con, flavor=None, schema=None, if_exists='fail', """ from pandas.io import sql - sql.to_sql(self, name, con, flavor=flavor, schema=schema, - if_exists=if_exists, index=index, index_label=index_label, - chunksize=chunksize, dtype=dtype) + sql.to_sql(self, name, con, schema=schema, if_exists=if_exists, + index=index, index_label=index_label, chunksize=chunksize, + dtype=dtype) def to_pickle(self, path, compression='infer', protocol=pkl.HIGHEST_PROTOCOL): diff --git a/pandas/core/strings.py b/pandas/core/strings.py index e0012c25e366d..bb0d1a9e16dcd 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -598,7 +598,7 @@ def _str_extract_frame(arr, pat, flags=0): dtype=object) -def str_extract(arr, pat, flags=0, expand=None): +def str_extract(arr, pat, flags=0, expand=True): r""" For each subject string in the Series, extract groups from the first match of regular expression pat. @@ -610,7 +610,7 @@ def str_extract(arr, pat, flags=0, expand=None): flags : int, default 0 (no flags) re module flags, e.g. re.IGNORECASE - expand : bool, default False + expand : bool, default True * If True, return DataFrame. * If False, return Series/Index/DataFrame. @@ -676,15 +676,6 @@ def str_extract(arr, pat, flags=0, expand=None): dtype: object """ - if expand is None: - warnings.warn( - "currently extract(expand=None) " + - "means expand=False (return Index/Series/DataFrame) " + - "but in a future version of pandas this will be changed " + - "to expand=True (return DataFrame)", - FutureWarning, - stacklevel=3) - expand = False if not isinstance(expand, bool): raise ValueError("expand must be True or False") if expand: @@ -1708,7 +1699,7 @@ def translate(self, table, deletechars=None): findall = _pat_wrapper(str_findall, flags=True) @copy(str_extract) - def extract(self, pat, flags=0, expand=None): + def extract(self, pat, flags=0, expand=True): return str_extract(self, pat, flags=flags, expand=expand) @copy(str_extractall) diff --git a/pandas/io/sql.py b/pandas/io/sql.py index e2f3033c580a5..437e279e90979 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -41,24 +41,6 @@ class DatabaseError(IOError): _SQLALCHEMY_INSTALLED = None -def _validate_flavor_parameter(flavor): - """ - Checks whether a database 'flavor' was specified. - If not None, produces FutureWarning if 'sqlite' and - raises a ValueError if anything else. - """ - if flavor is not None: - if flavor == 'sqlite': - warnings.warn("the 'flavor' parameter is deprecated " - "and will be removed in a future version, " - "as 'sqlite' is the only supported option " - "when SQLAlchemy is not installed.", - FutureWarning, stacklevel=2) - else: - raise ValueError("database flavor {flavor} is not " - "supported".format(flavor=flavor)) - - def _is_sqlalchemy_connectable(con): global _SQLALCHEMY_INSTALLED if _SQLALCHEMY_INSTALLED is None: @@ -415,8 +397,8 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None, chunksize=chunksize) -def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail', - index=True, index_label=None, chunksize=None, dtype=None): +def to_sql(frame, name, con, schema=None, if_exists='fail', index=True, + index_label=None, chunksize=None, dtype=None): """ Write records stored in a DataFrame to a SQL database. @@ -430,10 +412,6 @@ def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail', Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. - flavor : 'sqlite', default None - .. deprecated:: 0.19.0 - 'sqlite' is the only supported option if SQLAlchemy is not - used. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). @@ -459,7 +437,7 @@ def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail', if if_exists not in ('fail', 'replace', 'append'): raise ValueError("'{0}' is not valid for if_exists".format(if_exists)) - pandas_sql = pandasSQL_builder(con, schema=schema, flavor=flavor) + pandas_sql = pandasSQL_builder(con, schema=schema) if isinstance(frame, Series): frame = frame.to_frame() @@ -472,7 +450,7 @@ def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail', chunksize=chunksize, dtype=dtype) -def has_table(table_name, con, flavor=None, schema=None): +def has_table(table_name, con, schema=None): """ Check if DataBase has named table. @@ -484,10 +462,6 @@ def has_table(table_name, con, flavor=None, schema=None): Using SQLAlchemy makes it possible to use any DB supported by that library. If a DBAPI2 object, only sqlite3 is supported. - flavor : 'sqlite', default None - .. deprecated:: 0.19.0 - 'sqlite' is the only supported option if SQLAlchemy is not - installed. schema : string, default None Name of SQL schema in database to write to (if database flavor supports this). If None, use default schema (default). @@ -496,7 +470,7 @@ def has_table(table_name, con, flavor=None, schema=None): ------- boolean """ - pandas_sql = pandasSQL_builder(con, flavor=flavor, schema=schema) + pandas_sql = pandasSQL_builder(con, schema=schema) return pandas_sql.has_table(table_name) @@ -521,14 +495,12 @@ def _engine_builder(con): return con -def pandasSQL_builder(con, flavor=None, schema=None, meta=None, +def pandasSQL_builder(con, schema=None, meta=None, is_cursor=False): """ Convenience function to return the correct PandasSQL subclass based on the provided parameters. """ - _validate_flavor_parameter(flavor) - # When support for DBAPI connections is removed, # is_cursor should not be necessary. con = _engine_builder(con) @@ -1378,9 +1350,7 @@ class SQLiteDatabase(PandasSQL): """ - def __init__(self, con, flavor=None, is_cursor=False): - _validate_flavor_parameter(flavor) - + def __init__(self, con, is_cursor=False): self.is_cursor = is_cursor self.con = con @@ -1534,7 +1504,7 @@ def _create_sql_schema(self, frame, table_name, keys=None, dtype=None): return str(table.sql_schema()) -def get_schema(frame, name, flavor=None, keys=None, con=None, dtype=None): +def get_schema(frame, name, keys=None, con=None, dtype=None): """ Get the SQL db table schema for the given frame. @@ -1549,15 +1519,11 @@ def get_schema(frame, name, flavor=None, keys=None, con=None, dtype=None): Using SQLAlchemy makes it possible to use any DB supported by that library, default: None If a DBAPI2 object, only sqlite3 is supported. - flavor : 'sqlite', default None - .. deprecated:: 0.19.0 - 'sqlite' is the only supported option if SQLAlchemy is not - installed. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. """ - pandas_sql = pandasSQL_builder(con=con, flavor=flavor) + pandas_sql = pandasSQL_builder(con=con) return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype) diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 8962eb90be828..6bb425b8611bf 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -35,7 +35,7 @@ class TestPDApi(Base): 'util', 'options', 'io'] # these are already deprecated; awaiting removal - deprecated_modules = ['stats', 'datetools', 'parser', + deprecated_modules = ['stats', 'parser', 'json', 'lib', 'tslib'] # misc @@ -140,19 +140,6 @@ def test_testing(self): self.check(testing, self.funcs) -class TestDatetoolsDeprecation(object): - - def test_deprecation_access_func(self): - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - pd.datetools.to_datetime('2016-01-01') - - def test_deprecation_access_obj(self): - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - pd.datetools.monthEnd - - class TestTopLevelDeprecations(object): # top-level API deprecations diff --git a/pandas/tests/categorical/test_api.py b/pandas/tests/categorical/test_api.py index 12db4a9bea28b..0af2857091b74 100644 --- a/pandas/tests/categorical/test_api.py +++ b/pandas/tests/categorical/test_api.py @@ -400,15 +400,6 @@ def test_remove_unused_categories(self): out = cat.remove_unused_categories() assert out.get_values().tolist() == val.tolist() - def test_deprecated_labels(self): - # TODO: labels is deprecated and should be removed in 0.18 or 2017, - # whatever is earlier - cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3]) - exp = cat.codes - with tm.assert_produces_warning(FutureWarning): - res = cat.labels - tm.assert_numpy_array_equal(res, exp) - class TestCategoricalAPIWithFactor(TestCategorical): diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py index 217fc8e67483d..0cc4101cd6304 100644 --- a/pandas/tests/io/test_sql.py +++ b/pandas/tests/io/test_sql.py @@ -2333,31 +2333,6 @@ def clean_up(test_table_to_drop): clean_up(table_name) -@pytest.mark.single -class TestSQLFlavorDeprecation(object): - """ - gh-13611: test that the 'flavor' parameter - is appropriately deprecated by checking the - functions that directly raise the warning - """ - - con = 1234 # don't need real connection for this - funcs = ['SQLiteDatabase', 'pandasSQL_builder'] - - def test_unsupported_flavor(self): - msg = 'is not supported' - - for func in self.funcs: - tm.assert_raises_regex(ValueError, msg, getattr(sql, func), - self.con, flavor='mysql') - - def test_deprecated_flavor(self): - for func in self.funcs: - with tm.assert_produces_warning(FutureWarning, - check_stacklevel=False): - getattr(sql, func)(self.con, flavor='sqlite') - - @pytest.mark.single @pytest.mark.skip(reason="gh-13611: there is no support for MySQL " "if SQLAlchemy is not installed") diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 973fe74429551..178c5ff655b04 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -612,13 +612,16 @@ def test_match(self): def test_extract_expand_None(self): values = Series(['fooBAD__barBAD', NA, 'foo']) - with tm.assert_produces_warning(FutureWarning): + with tm.assert_raises_regex(ValueError, + 'expand must be True or False'): values.str.extract('.*(BAD[_]+).*(BAD)', expand=None) def test_extract_expand_unspecified(self): values = Series(['fooBAD__barBAD', NA, 'foo']) - with tm.assert_produces_warning(FutureWarning): - values.str.extract('.*(BAD[_]+).*(BAD)') + result_unspecified = values.str.extract('.*(BAD[_]+).*') + assert isinstance(result_unspecified, DataFrame) + result_true = values.str.extract('.*(BAD[_]+).*', expand=True) + tm.assert_frame_equal(result_unspecified, result_true) def test_extract_expand_False(self): # Contains tests like those in test_match and some others. diff --git a/scripts/list_future_warnings.sh b/scripts/list_future_warnings.sh new file mode 100755 index 0000000000000..e100f305267c1 --- /dev/null +++ b/scripts/list_future_warnings.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# Check all future warnings in Python files, and report them with the version +# where the FutureWarning was added. +# +# This is useful to detect features that have been deprecated, and should be +# removed from the code. For example, if a line of code contains: +# +# warning.warn('Method deprecated', FutureWarning, stacklevel=2) +# +# Which is released in Pandas 0.20.0, then it is expected that the method +# is removed before releasing Pandas 0.24.0, including the warning. If it +# is not, this script will list this line, with the version 0.20.0, which +# will make it easy to detect that it had to be removed. +# +# In some cases this script can return false positives, for example in files +# where FutureWarning is used to detect deprecations, or similar. The EXCLUDE +# variable can be used to ignore files that use FutureWarning, but do not +# deprecate functionality. +# +# Usage: +# +# $ ./list_future_warnings.sh + +EXCLUDE="^pandas/tests/|" # tests validate that FutureWarnings are raised +EXCLUDE+="^pandas/util/_decorators.py$|" # generic deprecate function that raises warning +EXCLUDE+="^pandas/util/_depr_module.py$|" # generic deprecate module that raises warnings +EXCLUDE+="^pandas/util/testing.py$|" # contains function to evaluate if warning is raised +EXCLUDE+="^pandas/io/parsers.py$" # implements generic deprecation system in io reading + +BASE_DIR="$(dirname $(dirname $(realpath $0)))" +cd $BASE_DIR +FILES=`grep -RIl "FutureWarning" pandas/* | grep -vE "$EXCLUDE"` +OUTPUT=() +IFS=$'\n' + +for FILE in $FILES; do + FILE_LINES=`git blame -sf $FILE | grep FutureWarning | tr -s " " | cut -d " " -f1,3` + for FILE_LINE in $FILE_LINES; do + TAG=$(git tag --contains $(echo $FILE_LINE | cut -d" " -f1) | head -n1) + OUTPUT_ROW=`printf "%-14s %-16s %s" ${TAG:-"(not released)"} $FILE_LINE $FILE` + OUTPUT+=($OUTPUT_ROW) + done +done + +printf "%s\n" "${OUTPUT[@]}" | sort -V
…tools, SQLiteDatabase.flavor, str.extract(expand=None) (#6581) - [X] related to #6581 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry @jreback, from the ones that we discussed, this PR removes all them except Resampler, which is a bit tricky, and I left it for a separate PR.
https://api.github.com/repos/pandas-dev/pandas/pulls/19115
2018-01-07T14:37:18Z
2018-01-07T15:26:06Z
null
2018-01-07T15:26:06Z
Doc: Added warning to treat group chunks as immutable when using apply
diff --git a/doc/source/gotchas.rst b/doc/source/gotchas.rst index 5da0f4fd07819..7ed0b1c800183 100644 --- a/doc/source/gotchas.rst +++ b/doc/source/gotchas.rst @@ -332,3 +332,97 @@ using something similar to the following: See `the NumPy documentation on byte order <https://docs.scipy.org/doc/numpy/user/basics.byteswapping.html>`__ for more details. + + +Alternative to storing lists in Pandas DataFrame Cells +------------------------------------------------------ +Storing nested lists/arrays inside a pandas object should be avoided for performance and memory use reasons. Instead they should be "exploded" into a flat DataFrame structure. + +Example of exploding nested lists into a DataFrame: + +.. ipython:: python + + from collections import OrderedDict + df = (pd.DataFrame(OrderedDict([('name', ['A.J. Price']*3), + ('opponent', ['76ers', 'blazers', 'bobcats']), + ('attribute x', ['A','B','C']) + ]) + )) + df + + nn = [['Zach LaVine', 'Jeremy Lin', 'Nate Robinson', 'Isaia']]*3 + nn + + # Step 1: Create an index with the "parent" columns to be included in the final Dataframe + df2 = pd.concat([df[['name','opponent']], pd.DataFrame(nn)], axis=1) + df2 + + # Step 2: Transform the column with lists into series, which become columns in a new Dataframe. + # Note that only the index from the original df is retained - + # any other columns in the original df are not part of the new df + df3 = df2.set_index(['name', 'opponent']) + df3 + + # Step 3: Stack the new columns as rows; this creates a new index level we'll want to drop in the next step. + # Note that at this point we have a Series, not a Dataframe + ser = df3.stack() + ser + + # Step 4: Drop the extraneous index level created by the stack + ser.reset_index(level=2, drop=True, inplace=True) + ser + + # Step 5: Create a Dataframe from the Series + df4 = ser.to_frame('nearest_neighbors') + df4 + + # All steps in one stack + df4 = (df2.set_index(['name', 'opponent']) + .stack() + .reset_index(level=2, drop=True) + .to_frame('nearest_neighbors')) + df4 + +Example of exploding a list embedded in a dataframe: + +.. ipython:: python + + df = (pd.DataFrame(OrderedDict([('name', ['A.J. Price']*3), + ('opponent', ['76ers', 'blazers', 'bobcats']), + ('attribute x', ['A','B','C']), + ('nearest_neighbors', [['Zach LaVine', 'Jeremy Lin', 'Nate Robinson', 'Isaia']]*3) + ]) + )) + + df + + # Step 1: Create an index with the "parent" columns to be included in the final Dataframe + df2 = df.set_index(['name', 'opponent']) + df2 + + # Step 2: Transform the column with lists into series, which become columns in a new Dataframe. + # Note that only the index from the original df is retained - + # any other columns in the original df are not part of the new df + df3 = df2.nearest_neighbors.apply(pd.Series) + df3 + + # Step 3: Stack the new columns as rows; this creates a new index level we'll want to drop in the next step. + # Note that at this point we have a Series, not a Dataframe + ser = df3.stack() + ser + + # Step 4: Drop the extraneous index level created by the stack + ser.reset_index(level=2, drop=True, inplace=True) + ser + + # Step 5: Create a Dataframe from the Series + df4 = ser.to_frame('nearest_neighbors') + df4 + + # All steps in one stack + df4 = (df.set_index(['name', 'opponent']) + .nearest_neighbors.apply(pd.Series) + .stack() + .reset_index(level=2, drop=True) + .to_frame('nearest_neighbors')) + df4 diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst index 552ddabb7359a..1f9befb4bd59c 100644 --- a/doc/source/groupby.rst +++ b/doc/source/groupby.rst @@ -915,13 +915,17 @@ The dimension of the returned result can also change: So depending on the path taken, and exactly what you are grouping. Thus the grouped columns(s) may be included in the output as well as set the indices. -.. warning:: +.. warnings:: - In the current implementation apply calls func twice on the + * In the current implementation apply calls func twice on the first group to decide whether it can take a fast or slow code path. This can lead to unexpected behavior if func has side-effects, as they will take effect twice for the first group. + + * Apply should not perform in-place operations on the group chunk. + Group chunks should be treated as immutable, and changes to a + group chunk may produce unexpected results. .. ipython:: python @@ -955,6 +959,42 @@ will be (silently) dropped. Thus, this does not pose any problems: df.groupby('A').std() +.. note:: + Decimal columns are also "nuisance" columns. They are excluded from aggregate functions automatically in groupby. + + If you do wish to include decimal columns in the aggregation, you must do so explicitly: + +.. ipython:: python + + from decimal import Decimal + dec = pd.DataFrame( + {'name': ['foo', 'bar', 'foo', 'bar'], + 'title': ['boo', 'far', 'boo', 'far'], + 'id': [123, 456, 123, 456], + 'int_column': [1, 2, 3, 4], + 'dec_column1': [Decimal('0.50'), Decimal('0.15'), Decimal('0.25'), Decimal('0.40')], + 'dec_column2': [Decimal('0.20'), Decimal('0.30'), Decimal('0.55'), Decimal('0.60')] + }, + columns=['name','title','id','int_column','dec_column1','dec_column2'] + ) + + dec.head() + + dec.dtypes + + # Decimal columns excluded from sum by default + dec.groupby(['name', 'title', 'id'], as_index=False).sum() + + # Decimal columns can be sum'd explicitly by themselves... + dec.groupby(['name', 'title', 'id'], as_index=False)['dec_column1','dec_column2'].sum() + + # ...but cannot be combined with standard data types or they will be excluded + dec.groupby(['name', 'title', 'id'], as_index=False)['int_column','dec_column1','dec_column2'].sum() + + # Use .agg function to aggregate over standard and "nuisance" data types at the same time + dec.groupby(['name', 'title', 'id'], as_index=False).agg({'int_column': 'sum', 'dec_column1': 'sum', 'dec_column2': 'sum'}) + + .. _groupby.missing: NA and NaT group handling
- [X] closes issue #14180
https://api.github.com/repos/pandas-dev/pandas/pulls/19114
2018-01-07T06:39:46Z
2018-01-11T00:14:49Z
null
2018-01-12T23:57:30Z
CLN: ASV long and broken benchmarks
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py index 2b48168238ee8..4cecf12a27042 100644 --- a/asv_bench/benchmarks/frame_methods.py +++ b/asv_bench/benchmarks/frame_methods.py @@ -4,7 +4,7 @@ from pandas import (DataFrame, Series, MultiIndex, date_range, period_range, isnull, NaT) -from .pandas_vb_common import setup # noqa +from .pandas_vb_common import setup # noqa class GetNumericData(object): @@ -127,7 +127,7 @@ class ToHTML(object): def setup(self): nrows = 500 self.df2 = DataFrame(np.random.randn(nrows, 10)) - self.df2[0] = period_range('2000', '2010', nrows) + self.df2[0] = period_range('2000', periods=nrows) self.df2[1] = range(nrows) def time_to_html_mixed(self): diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 1978d240abedd..4dfd215e6dc3a 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -1,4 +1,4 @@ -from string import ascii_letters, digits +from string import ascii_letters from itertools import product from functools import partial @@ -275,18 +275,12 @@ class GroupStrings(object): def setup(self): n = 2 * 10**5 - alpha = list(map(''.join, product((ascii_letters + digits), repeat=4))) - self.df = DataFrame({'a': np.repeat(np.random.choice(alpha, - (n // 11)), 11), - 'b': np.repeat(np.random.choice(alpha, - (n // 7)), 7), - 'c': np.repeat(np.random.choice(alpha, - (n // 5)), 5), - 'd': np.repeat(np.random.choice(alpha, - (n // 1)), 1)}) + alpha = list(map(''.join, product(ascii_letters, repeat=4))) + data = np.random.choice(alpha, (n // 5, 4), replace=False) + data = np.repeat(data, 5, axis=0) + self.df = DataFrame(data, columns=list('abcd')) self.df['joe'] = (np.random.randn(len(self.df)) * 10).round(3) - i = np.random.permutation(len(self.df)) - self.df = self.df.iloc[i].reset_index(drop=True) + self.df = self.df.sample(frac=1).reset_index(drop=True) def time_multi_columns(self): self.df.groupby(list('abcd')).max() @@ -356,10 +350,16 @@ class GroupByMethods(object): goal_time = 0.2 - param_names = ['dtype', 'ngroups'] - params = [['int', 'float'], [100, 10000]] + param_names = ['dtype', 'method'] + params = [['int', 'float'], + ['all', 'any', 'count', 'cumcount', 'cummax', 'cummin', + 'cumprod', 'cumsum', 'describe', 'first', 'head', 'last', 'mad', + 'max', 'min', 'median', 'mean', 'nunique', 'pct_change', 'prod', + 'rank', 'sem', 'shift', 'size', 'skew', 'std', 'sum', 'tail', + 'unique', 'value_counts', 'var']] - def setup(self, dtype, ngroups): + def setup(self, dtype, method): + ngroups = 1000 size = ngroups * 2 rng = np.arange(ngroups) values = rng.take(np.random.randint(0, ngroups, size=size)) @@ -369,104 +369,11 @@ def setup(self, dtype, ngroups): key = np.concatenate([np.random.random(ngroups) * 0.1, np.random.random(ngroups) * 10.0]) - self.df = DataFrame({'values': values, - 'key': key}) + df = DataFrame({'values': values, 'key': key}) + self.df_groupby_method = getattr(df.groupby('key')['values'], method) - def time_all(self, dtype, ngroups): - self.df.groupby('key')['values'].all() - - def time_any(self, dtype, ngroups): - self.df.groupby('key')['values'].any() - - def time_count(self, dtype, ngroups): - self.df.groupby('key')['values'].count() - - def time_cumcount(self, dtype, ngroups): - self.df.groupby('key')['values'].cumcount() - - def time_cummax(self, dtype, ngroups): - self.df.groupby('key')['values'].cummax() - - def time_cummin(self, dtype, ngroups): - self.df.groupby('key')['values'].cummin() - - def time_cumprod(self, dtype, ngroups): - self.df.groupby('key')['values'].cumprod() - - def time_cumsum(self, dtype, ngroups): - self.df.groupby('key')['values'].cumsum() - - def time_describe(self, dtype, ngroups): - self.df.groupby('key')['values'].describe() - - def time_diff(self, dtype, ngroups): - self.df.groupby('key')['values'].diff() - - def time_first(self, dtype, ngroups): - self.df.groupby('key')['values'].first() - - def time_head(self, dtype, ngroups): - self.df.groupby('key')['values'].head() - - def time_last(self, dtype, ngroups): - self.df.groupby('key')['values'].last() - - def time_mad(self, dtype, ngroups): - self.df.groupby('key')['values'].mad() - - def time_max(self, dtype, ngroups): - self.df.groupby('key')['values'].max() - - def time_mean(self, dtype, ngroups): - self.df.groupby('key')['values'].mean() - - def time_median(self, dtype, ngroups): - self.df.groupby('key')['values'].median() - - def time_min(self, dtype, ngroups): - self.df.groupby('key')['values'].min() - - def time_nunique(self, dtype, ngroups): - self.df.groupby('key')['values'].nunique() - - def time_pct_change(self, dtype, ngroups): - self.df.groupby('key')['values'].pct_change() - - def time_prod(self, dtype, ngroups): - self.df.groupby('key')['values'].prod() - - def time_rank(self, dtype, ngroups): - self.df.groupby('key')['values'].rank() - - def time_sem(self, dtype, ngroups): - self.df.groupby('key')['values'].sem() - - def time_shift(self, dtype, ngroups): - self.df.groupby('key')['values'].shift() - - def time_size(self, dtype, ngroups): - self.df.groupby('key')['values'].size() - - def time_skew(self, dtype, ngroups): - self.df.groupby('key')['values'].skew() - - def time_std(self, dtype, ngroups): - self.df.groupby('key')['values'].std() - - def time_sum(self, dtype, ngroups): - self.df.groupby('key')['values'].sum() - - def time_tail(self, dtype, ngroups): - self.df.groupby('key')['values'].tail() - - def time_unique(self, dtype, ngroups): - self.df.groupby('key')['values'].unique() - - def time_value_counts(self, dtype, ngroups): - self.df.groupby('key')['values'].value_counts() - - def time_var(self, dtype, ngroups): - self.df.groupby('key')['values'].var() + def time_method(self, dtype, method): + self.df_groupby_method() class Float32(object):
[xref](https://github.com/pandas-dev/pandas/pull/19069#issuecomment-355761409) Shortened the size of the data in `GroupbyMethods` since some benchmarks took +30 seconds on my machine, and additionally fixed some broken benchmarks ``` asv dev -b ^groupby.GroupByMethods · Discovering benchmarks · Running 1 total benchmarks (1 commits * 1 environments * 1 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [100.00%] ··· Running groupby.GroupByMethods.time_method ok [100.00%] ···· ======= ============== ======== dtype method ------- -------------- -------- int all 256ms int any 255ms int count 925μs int cumcount 1.15ms int cummax 1.13ms int cummin 1.15ms int cumprod 1.58ms int cumsum 1.16ms int describe 3.25s int first 1.12ms int head 1.37ms int last 1.12ms int mad 1.42s int max 1.12ms int min 1.16ms int median 1.53ms int mean 1.43ms int nunique 1.40ms int pct_change 1.56s int prod 1.53ms int rank 380ms int sem 414ms int shift 974μs int size 858μs int skew 414ms int std 1.46ms int sum 1.50ms int tail 1.45ms int unique 289ms int value_counts 2.35ms int var 1.34ms float all 402ms float any 406ms float count 1.18ms float cumcount 1.33ms float cummax 1.40ms float cummin 1.40ms float cumprod 1.75ms float cumsum 1.40ms float describe 5.02s float first 1.37ms float head 1.58ms float last 1.36ms float mad 2.01s float max 1.38ms float min 1.37ms float median 1.80ms float mean 1.79ms float nunique 1.60ms float pct_change 2.17s float prod 1.75ms float rank 623ms float sem 416ms float shift 1.18ms float size 1.09ms float skew 646ms float std 1.51ms float sum 1.77ms float tail 1.63ms float unique 457ms float value_counts 2.63ms float var 1.43ms ======= ============== ======== ``` ``` $ asv dev -b ^groupby.GroupStrings · Discovering benchmarks · Running 1 total benchmarks (1 commits * 1 environments * 1 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [100.00%] ··· Running groupby.GroupStrings.time_multi_columns 781ms (pandas_dev)matt@matt-Inspiron-1545:~/Projects/pandas-mroeschke/asv_bench$ asv dev -b ^frame_methods.ToHTML · Discovering benchmarks · Running 1 total benchmarks (1 commits * 1 environments * 1 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [100.00%] ··· Running frame_methods.ToHTML.time_to_html_mixed 565ms ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19113
2018-01-07T06:26:28Z
2018-01-10T00:22:07Z
2018-01-10T00:22:07Z
2018-01-10T00:30:25Z
BUG: IntervalIndex set op bugs for empty results
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index dc305f36f32ec..33ca394db47ca 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -405,6 +405,7 @@ Indexing - Bug in :func:`MultiIndex.__contains__` where non-tuple keys would return ``True`` even if they had been dropped (:issue:`19027`) - Bug in :func:`MultiIndex.set_labels` which would cause casting (and potentially clipping) of the new labels if the ``level`` argument is not 0 or a list like [0, 1, ... ] (:issue:`19057`) - Bug in ``str.extractall`` when there were no matches empty :class:`Index` was returned instead of appropriate :class:`MultiIndex` (:issue:`19034`) +- Bug in :class:`IntervalIndex` where set operations that returned an empty ``IntervalIndex`` had the wrong dtype (:issue:`19101`) I/O ^^^ diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index 43bdc14106b00..baf80173d7362 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -5,7 +5,7 @@ from pandas.core.dtypes.missing import notna, isna from pandas.core.dtypes.generic import ABCDatetimeIndex, ABCPeriodIndex from pandas.core.dtypes.dtypes import IntervalDtype -from pandas.core.dtypes.cast import maybe_convert_platform +from pandas.core.dtypes.cast import maybe_convert_platform, find_common_type from pandas.core.dtypes.common import ( _ensure_platform_int, is_list_like, @@ -16,6 +16,7 @@ is_integer_dtype, is_float_dtype, is_interval_dtype, + is_object_dtype, is_scalar, is_float, is_number, @@ -1289,9 +1290,25 @@ def func(self, other): msg = ('can only do set operations between two IntervalIndex ' 'objects that are closed on the same side') other = self._as_like_interval_index(other, msg) + + # GH 19016: ensure set op will not return a prohibited dtype + subtypes = [self.dtype.subtype, other.dtype.subtype] + common_subtype = find_common_type(subtypes) + if is_object_dtype(common_subtype): + msg = ('can only do {op} between two IntervalIndex ' + 'objects that have compatible dtypes') + raise TypeError(msg.format(op=op_name)) + result = getattr(self._multiindex, op_name)(other._multiindex) result_name = self.name if self.name == other.name else None - return type(self).from_tuples(result.values, closed=self.closed, + + # GH 19101: ensure empty results have correct dtype + if result.empty: + result = result.values.astype(self.dtype.subtype) + else: + result = result.values + + return type(self).from_tuples(result, closed=self.closed, name=result_name) return func diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index 98db34a9f90f4..b6d49c9e7ba19 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -880,6 +880,16 @@ def test_union(self, closed): tm.assert_index_equal(index.union(index), index) tm.assert_index_equal(index.union(index[:1]), index) + # GH 19101: empty result, same dtype + index = IntervalIndex(np.array([], dtype='int64'), closed=closed) + result = index.union(index) + tm.assert_index_equal(result, index) + + # GH 19101: empty result, different dtypes + other = IntervalIndex(np.array([], dtype='float64'), closed=closed) + result = index.union(other) + tm.assert_index_equal(result, index) + def test_intersection(self, closed): index = self.create_index(closed=closed) other = IntervalIndex.from_breaks(range(5, 13), closed=closed) @@ -893,14 +903,48 @@ def test_intersection(self, closed): tm.assert_index_equal(index.intersection(index), index) + # GH 19101: empty result, same dtype + other = IntervalIndex.from_breaks(range(300, 314), closed=closed) + expected = IntervalIndex(np.array([], dtype='int64'), closed=closed) + result = index.intersection(other) + tm.assert_index_equal(result, expected) + + # GH 19101: empty result, different dtypes + breaks = np.arange(300, 314, dtype='float64') + other = IntervalIndex.from_breaks(breaks, closed=closed) + result = index.intersection(other) + tm.assert_index_equal(result, expected) + def test_difference(self, closed): index = self.create_index(closed=closed) tm.assert_index_equal(index.difference(index[:1]), index[1:]) + # GH 19101: empty result, same dtype + result = index.difference(index) + expected = IntervalIndex(np.array([], dtype='int64'), closed=closed) + tm.assert_index_equal(result, expected) + + # GH 19101: empty result, different dtypes + other = IntervalIndex.from_arrays(index.left.astype('float64'), + index.right, closed=closed) + result = index.difference(other) + tm.assert_index_equal(result, expected) + def test_symmetric_difference(self, closed): - idx = self.create_index(closed=closed) - result = idx[1:].symmetric_difference(idx[:-1]) - expected = IntervalIndex([idx[0], idx[-1]]) + index = self.create_index(closed=closed) + result = index[1:].symmetric_difference(index[:-1]) + expected = IntervalIndex([index[0], index[-1]]) + tm.assert_index_equal(result, expected) + + # GH 19101: empty result, same dtype + result = index.symmetric_difference(index) + expected = IntervalIndex(np.array([], dtype='int64'), closed=closed) + tm.assert_index_equal(result, expected) + + # GH 19101: empty result, different dtypes + other = IntervalIndex.from_arrays(index.left.astype('float64'), + index.right, closed=closed) + result = index.symmetric_difference(other) tm.assert_index_equal(result, expected) @pytest.mark.parametrize('op_name', [ @@ -909,17 +953,25 @@ def test_set_operation_errors(self, closed, op_name): index = self.create_index(closed=closed) set_op = getattr(index, op_name) - # test errors + # non-IntervalIndex msg = ('can only do set operations between two IntervalIndex objects ' 'that are closed on the same side') with tm.assert_raises_regex(ValueError, msg): set_op(Index([1, 2, 3])) + # mixed closed for other_closed in {'right', 'left', 'both', 'neither'} - {closed}: other = self.create_index(closed=other_closed) with tm.assert_raises_regex(ValueError, msg): set_op(other) + # GH 19016: incompatible dtypes + other = interval_range(Timestamp('20180101'), periods=9, closed=closed) + msg = ('can only do {op} between two IntervalIndex objects that have ' + 'compatible dtypes').format(op=op_name) + with tm.assert_raises_regex(TypeError, msg): + set_op(other) + def test_isin(self, closed): index = self.create_index(closed=closed)
- [X] closes #19101 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry Fixed bugs described in the issue: - Set ops that result in an empty index no longer raises. - For empty results, the return dtype now matches the dtype of the first index. - Previously would always return `float64` or `object` for empty results, depending on the op. - Didn't mention the raising issue in the whatsnew, since that behavior never occurred on a release, but did mention the dtype change since it occurs on 0.22.0. Added an additional #19016 follow-up: - Raise when set operation would result in a prohibited dtype: - Occurs when the common subtype between the two indexes is `object`. - Ex: `union` between `interval[int64]` and `interval[datetime64[ns]]`. - Behavior is generally still the same as after #19016, but improved _how_ the behavior comes about: - Previously the error was raised _after_ the set op data was computed, now the check occurs before. - Previous error message was generic, now the error message is more specific.
https://api.github.com/repos/pandas-dev/pandas/pulls/19112
2018-01-07T06:23:05Z
2018-01-12T11:46:34Z
2018-01-12T11:46:34Z
2018-01-12T16:44:54Z
BUG: show time values in repr of high precision DatetimeIndex
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index b1efd0dcb43e2..ebb9e5ad41a77 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -375,7 +375,8 @@ Conversion - Bug in :class:`TimedeltaIndex` where division by a ``Series`` would return a ``TimedeltaIndex`` instead of a ``Series`` (issue:`19042`) - Bug in :class:`Series` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` could return a ``Series`` with an incorrect name (issue:`19043`) - Fixed bug where comparing :class:`DatetimeIndex` failed to raise ``TypeError`` when attempting to compare timezone-aware and timezone-naive datetimelike objects (:issue:`18162`) -- +- Bug in :class:`DatetimeIndex` where the repr was not showing high-precision time values at the end of a day (e.g., 23:59:59.999999999) (:issue:`19030`) + Indexing ^^^^^^^^ diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index a4678e5b40849..886a887568d69 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -2188,7 +2188,7 @@ def _is_dates_only(values): consider_values = values_int != iNaT one_day_nanos = (86400 * 1e9) even_days = np.logical_and(consider_values, - values_int % one_day_nanos != 0).sum() == 0 + values_int % int(one_day_nanos) != 0).sum() == 0 if even_days: return True return False diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py index 23b42b612dace..b277d8256e612 100644 --- a/pandas/tests/io/formats/test_format.py +++ b/pandas/tests/io/formats/test_format.py @@ -883,6 +883,29 @@ def test_datetimelike_frame(self): '[10 rows x 2 columns]') assert repr(df) == expected + @pytest.mark.parametrize('start_date', [ + '2017-01-01 23:59:59.999999999', + '2017-01-01 23:59:59.99999999', + '2017-01-01 23:59:59.9999999', + '2017-01-01 23:59:59.999999', + '2017-01-01 23:59:59.99999', + '2017-01-01 23:59:59.9999', + ]) + def test_datetimeindex_highprecision(self, start_date): + # GH19030 + # Check that high-precision time values for the end of day are + # included in repr for DatetimeIndex + df = DataFrame({'A': date_range(start=start_date, + freq='D', periods=5)}) + result = str(df) + assert start_date in result + + dti = date_range(start=start_date, + freq='D', periods=5) + df = DataFrame({'A': range(5)}, index=dti) + result = str(df.index) + assert start_date in result + def test_nonunicode_nonascii_alignment(self): df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]]) rep_str = df.to_string() @@ -1914,6 +1937,27 @@ def test_datetimeindex(self): result = str(s2.index) assert 'NaT' in result + @pytest.mark.parametrize('start_date', [ + '2017-01-01 23:59:59.999999999', + '2017-01-01 23:59:59.99999999', + '2017-01-01 23:59:59.9999999', + '2017-01-01 23:59:59.999999', + '2017-01-01 23:59:59.99999', + '2017-01-01 23:59:59.9999' + ]) + def test_datetimeindex_highprecision(self, start_date): + # GH19030 + # Check that high-precision time values for the end of day are + # included in repr for DatetimeIndex + s1 = Series(date_range(start=start_date, freq='D', periods=5)) + result = str(s1) + assert start_date in result + + dti = date_range(start=start_date, freq='D', periods=5) + s2 = Series(3, index=dti) + result = str(s2.index) + assert start_date in result + def test_timedelta64(self): from datetime import datetime, timedelta
- [ ] closes #19030 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry On master: ``` In [2]: pd.DatetimeIndex(['2017-01-01 23:59:59.999999999']) Out[2]: DatetimeIndex(['2017-01-01'], dtype='datetime64[ns]', freq=None) ``` On my branch: ``` In [3]: pd.DatetimeIndex(['2017-01-01 23:59:59.999999999']) Out[3]: DatetimeIndex(['2017-01-01 23:59:59.999999999'], dtype='datetime64[ns]', freq=None) ``` It seems that it's only a problem when the time value was 23:59:59.999999999 or very similar, but I may need to add some more tests to check this further.
https://api.github.com/repos/pandas-dev/pandas/pulls/19109
2018-01-06T20:23:16Z
2018-01-07T15:27:17Z
2018-01-07T15:27:16Z
2018-01-07T15:44:23Z
DEPR: remove display.line_width and display.height as old prior deprecations
diff --git a/ci/lint.sh b/ci/lint.sh index 54135bd551b69..35b39e2abb3c6 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -120,6 +120,10 @@ if [ "$LINT" ]; then echo "Check for deprecated messages without sphinx directive" grep -R --include="*.py" --include="*.pyx" -E "(DEPRECATED|DEPRECATE|Deprecated)(:|,|\.)" pandas + + if [ $? = "0" ]; then + RET=1 + fi echo "Check for deprecated messages without sphinx directive DONE" else echo "NOT Linting" diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 800599f728de1..6044d0b5e5017 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -313,6 +313,7 @@ Removal of prior version deprecations/changes - The ``freqstr`` keyword has been removed from ``pandas.tseries.frequencies.to_offset`` in favor of ``freq`` (:issue:`13874`) - The ``Panel4D`` and ``PanelND`` classes have been removed (:issue:`13776`) - The ``Panel``class has dropped the ``to_long``and ``toLong`` methods (:issue:`19077`) +- The options ``display.line_with`` and ``display.height`` are removed in favor of ``display.width`` and ``display.max_rows`` respectively (:issue:`4391`, :issue:`19107`) .. _whatsnew_0230.performance: @@ -369,7 +370,7 @@ Conversion - Bug in :class:`Series` floor-division where operating on a scalar ``timedelta`` raises an exception (:issue:`18846`) - Bug in :class:`FY5253Quarter`, :class:`LastWeekOfMonth` where rollback and rollforward behavior was inconsistent with addition and subtraction behavior (:issue:`18854`) - Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) -- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) +- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) - Bug in :class:`Series`` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` had results cast to ``dtype='int64'`` (:issue:`17250`) - Bug in :class:`TimedeltaIndex` where division by a ``Series`` would return a ``TimedeltaIndex`` instead of a ``Series`` (issue:`19042`) - Bug in :class:`Series` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` could return a ``Series`` with an incorrect name (issue:`19043`) diff --git a/pandas/core/config_init.py b/pandas/core/config_init.py index c3307c60b8ed9..da42cdbf10233 100644 --- a/pandas/core/config_init.py +++ b/pandas/core/config_init.py @@ -11,8 +11,7 @@ """ import pandas.core.config as cf from pandas.core.config import (is_int, is_bool, is_text, is_instance_factory, - is_one_of_factory, get_default_val, - is_callable) + is_one_of_factory, is_callable) from pandas.io.formats.console import detect_console_encoding # compute @@ -170,11 +169,6 @@ def use_numexpr_cb(key): frame is truncated (e.g. not display all rows and/or columns) """ -pc_line_width_doc = """ -: int - Deprecated. -""" - pc_east_asian_width_doc = """ : boolean Whether to use the Unicode East Asian Width to calculate the display text @@ -223,11 +217,6 @@ def use_numexpr_cb(key): terminal and hence it is not possible to correctly detect the width. """ -pc_height_doc = """ -: int - Deprecated. -""" - pc_chop_threshold_doc = """ : float or None if set to a float value, all float values smaller then the given threshold @@ -344,13 +333,8 @@ def table_schema_cb(key): validator=is_one_of_factory([True, False, 'truncate'])) cf.register_option('chop_threshold', None, pc_chop_threshold_doc) cf.register_option('max_seq_items', 100, pc_max_seq_items) - cf.register_option('height', 60, pc_height_doc, - validator=is_instance_factory([type(None), int])) cf.register_option('width', 80, pc_width_doc, validator=is_instance_factory([type(None), int])) - # redirected to width, make defval identical - cf.register_option('line_width', get_default_val('display.width'), - pc_line_width_doc) cf.register_option('memory_usage', True, pc_memory_usage_doc, validator=is_one_of_factory([None, True, False, 'deep']))
https://api.github.com/repos/pandas-dev/pandas/pulls/19107
2018-01-06T18:23:44Z
2018-01-07T02:37:47Z
2018-01-07T02:37:47Z
2018-01-07T02:38:23Z
COMPAT: empty IntervalIndexis np.int64 dtype
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py index fd1980f9ab429..43bdc14106b00 100644 --- a/pandas/core/indexes/interval.py +++ b/pandas/core/indexes/interval.py @@ -114,7 +114,7 @@ def maybe_convert_platform_interval(values): # GH 19016 # empty lists/tuples get object dtype by default, but this is not # prohibited for IntervalIndex, so coerce to integer instead - return np.array([], dtype=np.intp) + return np.array([], dtype=np.int64) return maybe_convert_platform(values) diff --git a/pandas/tests/indexes/interval/test_interval.py b/pandas/tests/indexes/interval/test_interval.py index dd673294b128f..98db34a9f90f4 100644 --- a/pandas/tests/indexes/interval/test_interval.py +++ b/pandas/tests/indexes/interval/test_interval.py @@ -140,7 +140,7 @@ def test_constructors_nan(self, closed, data): np.array([], dtype='datetime64[ns]')]) def test_constructors_empty(self, data, closed): # GH 18421 - expected_dtype = getattr(data, 'dtype', np.intp) + expected_dtype = getattr(data, 'dtype', np.int64) expected_values = np.array([], dtype=object) expected_index = IntervalIndex(data, closed=closed)
xref #19022 32-bit wheel failure: https://travis-ci.org/MacPython/pandas-wheels/jobs/325678205
https://api.github.com/repos/pandas-dev/pandas/pulls/19106
2018-01-06T17:50:55Z
2018-01-07T00:48:10Z
2018-01-07T00:48:10Z
2018-01-07T01:01:12Z
DOC: Fixed capitalization of NumFOCUS
diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 4443428ca6c9b..f86b1c67e6843 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -94,7 +94,7 @@ pandas possible. Thanks to `all of our contributors <https://github.com/pandas-d If you're interested in contributing, please visit `Contributing to pandas webpage <http://pandas.pydata.org/pandas-docs/stable/contributing.html>`__. -pandas is a `NUMFocus <https://www.numfocus.org/open-source-projects/>`__ sponsored project. +pandas is a `NumFOCUS <https://www.numfocus.org/open-source-projects/>`__ sponsored project. This will help ensure the success of development of pandas as a world-class open-source project, and makes it possible to `donate <https://pandas.pydata.org/donate.html>`__ to the project. diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt index d5ed0503d9ee3..6e5e113e859d7 100644 --- a/doc/source/whatsnew/v0.17.1.txt +++ b/doc/source/whatsnew/v0.17.1.txt @@ -5,7 +5,7 @@ v0.17.1 (November 21, 2015) .. note:: - We are proud to announce that *pandas* has become a sponsored project of the (`NUMFocus organization`_). This will help ensure the success of development of *pandas* as a world-class open-source project. + We are proud to announce that *pandas* has become a sponsored project of the (`NumFOCUS organization`_). This will help ensure the success of development of *pandas* as a world-class open-source project. .. _numfocus organization: http://www.numfocus.org/blog/numfocus-announces-new-fiscally-sponsored-project-pandas
Closes https://github.com/pandas-dev/pandas/issues/19099
https://api.github.com/repos/pandas-dev/pandas/pulls/19100
2018-01-05T22:06:24Z
2018-01-05T22:18:40Z
2018-01-05T22:18:39Z
2018-01-05T22:18:50Z
MAINT: Added Code of Conduct to .github folder
diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000..a1fbece3284ec --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,63 @@ +# Contributor Code of Conduct + +As contributors and maintainers of this project, and in the interest of +fostering an open and welcoming community, we pledge to respect all people who +contribute through reporting issues, posting feature requests, updating +documentation, submitting pull requests or patches, and other activities. + +We are committed to making participation in this project a harassment-free +experience for everyone, regardless of level of experience, gender, gender +identity and expression, sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, such as physical or electronic + addresses, without explicit permission +* Other unethical or unprofessional conduct + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +By adopting this Code of Conduct, project maintainers commit themselves to +fairly and consistently applying these principles to every aspect of managing +this project. Project maintainers who do not follow or enforce the Code of +Conduct may be permanently removed from the project team. + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +A working group of community members is committed to promptly addressing any +reported issues. The working group is made up of pandas contributors and users. +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the working group by e-mail (pandas-coc@googlegroups.com). +Messages sent to this e-mail address will not be publicly visible but only to +the working group members. The working group currently includes + +- Safia Abdalla +- Tom Augspurger +- Joris Van den Bossche +- Camille Scott +- Nathaniel Smith + +All complaints will be reviewed and investigated and will result in a response +that is deemed necessary and appropriate to the circumstances. Maintainers are +obligated to maintain confidentiality with regard to the reporter of an +incident. + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.3.0, available at +[http://contributor-covenant.org/version/1/3/0/][version], +and the [Swift Code of Conduct][swift]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/3/0/ +[swift]: https://swift.org/community/#code-of-conduct +
https://help.github.com/articles/adding-a-code-of-conduct-to-your-project/ Github treats this file special in a few places. I think https://github.com/pandas-dev/pandas-governance/blob/master/code-of-conduct.md should always be considered the "official" home. Perhaps we could add a note to the one added here about that. [ci-skip] - [ ] closes #xxxx - [ ] tests added / passed - [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19098
2018-01-05T21:34:05Z
2018-01-05T23:34:19Z
2018-01-05T23:34:19Z
2018-05-02T13:09:37Z
BUG: Fixed union_categoricals with unordered cats
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 800599f728de1..4f1e67bad9a54 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -453,8 +453,11 @@ Numeric Categorical ^^^^^^^^^^^ -- Bug in ``Categorical.equals`` between two unordered categories with the same categories, but in a different order (:issue:`16603`) - +- Bug in :func:`pandas.api.types.union_categoricals` returning the wrong result + when all the categoricals had the same categories, but in a different order. + This affected :func:`pandas.concat` with Categorical data (:issue:`19096`). +- Bug in ``Categorical.equals`` between two unordered categories with the same categories, but in a different order (:issue:`16603`) - Other diff --git a/pandas/core/dtypes/concat.py b/pandas/core/dtypes/concat.py index cd98064dee86e..5e6193d673756 100644 --- a/pandas/core/dtypes/concat.py +++ b/pandas/core/dtypes/concat.py @@ -339,7 +339,16 @@ def _maybe_unwrap(x): # identical categories - fastpath categories = first.categories ordered = first.ordered - new_codes = np.concatenate([c.codes for c in to_union]) + + if all(first.categories.equals(other.categories) + for other in to_union[1:]): + new_codes = np.concatenate([c.codes for c in to_union]) + else: + codes = [first.codes] + [_recode_for_categories(other.codes, + other.categories, + first.categories) + for other in to_union[1:]] + new_codes = np.concatenate(codes) if sort_categories and not ignore_order and ordered: raise TypeError("Cannot use sort_categories=True with " diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index 85e3115e96f83..150410e404305 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -481,6 +481,15 @@ def test_concat_categorical(self): tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp) tm.assert_series_equal(s1.append(s2, ignore_index=True), exp) + def test_union_categorical_same_categories_different_order(self): + # https://github.com/pandas-dev/pandas/issues/19096 + a = pd.Series(Categorical(['a', 'b', 'c'], categories=['a', 'b', 'c'])) + b = pd.Series(Categorical(['a', 'b', 'c'], categories=['b', 'a', 'c'])) + result = pd.concat([a, b], ignore_index=True) + expected = pd.Series(Categorical(['a', 'b', 'c', 'a', 'b', 'c'], + categories=['a', 'b', 'c'])) + tm.assert_series_equal(result, expected) + def test_concat_categorical_coercion(self): # GH 13524 diff --git a/pandas/tests/reshape/test_union_categoricals.py b/pandas/tests/reshape/test_union_categoricals.py index 3211574f834f5..8743d11118200 100644 --- a/pandas/tests/reshape/test_union_categoricals.py +++ b/pandas/tests/reshape/test_union_categoricals.py @@ -129,6 +129,15 @@ def test_union_categorical_same_category(self): categories=['x', 'y', 'z']) tm.assert_categorical_equal(res, exp) + def test_union_categorical_same_categories_different_order(self): + # https://github.com/pandas-dev/pandas/issues/19096 + c1 = Categorical(['a', 'b', 'c'], categories=['a', 'b', 'c']) + c2 = Categorical(['a', 'b', 'c'], categories=['b', 'a', 'c']) + result = union_categoricals([c1, c2]) + expected = Categorical(['a', 'b', 'c', 'a', 'b', 'c'], + categories=['a', 'b', 'c']) + tm.assert_categorical_equal(result, expected) + def test_union_categoricals_ordered(self): c1 = Categorical([1, 2, 3], ordered=True) c2 = Categorical([1, 2, 3], ordered=False)
Closes https://github.com/pandas-dev/pandas/issues/19096
https://api.github.com/repos/pandas-dev/pandas/pulls/19097
2018-01-05T21:18:48Z
2018-01-07T00:48:51Z
2018-01-07T00:48:51Z
2018-01-09T20:06:31Z
Fix TimedeltaIndex +/- offset array
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a62a737fbba31..389d167437909 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -308,7 +308,7 @@ Conversion - Bug in :class:`WeekOfMonth` and class:`Week` where addition and subtraction did not roll correctly (:issue:`18510`,:issue:`18672`,:issue:`18864`) - Bug in :meth:`DatetimeIndex.astype` when converting between timezone aware dtypes, and converting from timezone aware to naive (:issue:`18951`) - Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`) -- Bug in :class:`DatetimeIndex` where adding or subtracting an array-like of ``DateOffset`` objects either raised (``np.array``, ``pd.Index``) or broadcast incorrectly (``pd.Series``) (:issue:`18849`) +- Bug in :class:`DatetimeIndex` and :class:`TimedeltaIndex` where adding or subtracting an array-like of ``DateOffset`` objects either raised (``np.array``, ``pd.Index``) or broadcast incorrectly (``pd.Series``) (:issue:`18849`) - Bug in :class:`Series` floor-division where operating on a scalar ``timedelta`` raises an exception (:issue:`18846`) - Bug in :class:`FY5253Quarter`, :class:`LastWeekOfMonth` where rollback and rollforward behavior was inconsistent with addition and subtraction behavior (:issue:`18854`) - Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) diff --git a/pandas/core/indexes/datetimelike.py b/pandas/core/indexes/datetimelike.py index ee2fdd213dd9a..d26dbeb498b7a 100644 --- a/pandas/core/indexes/datetimelike.py +++ b/pandas/core/indexes/datetimelike.py @@ -675,20 +675,20 @@ def __add__(self, other): return NotImplemented elif is_timedelta64_dtype(other): return self._add_delta(other) + elif isinstance(other, (DateOffset, timedelta)): + return self._add_delta(other) + elif is_offsetlike(other): + # Array/Index of DateOffset objects + return self._add_offset_array(other) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): if hasattr(other, '_add_delta'): return other._add_delta(self) raise TypeError("cannot add TimedeltaIndex and {typ}" .format(typ=type(other))) - elif isinstance(other, (DateOffset, timedelta)): - return self._add_delta(other) elif is_integer(other): return self.shift(other) elif isinstance(other, (datetime, np.datetime64)): return self._add_datelike(other) - elif is_offsetlike(other): - # Array/Index of DateOffset objects - return self._add_offset_array(other) elif isinstance(other, Index): return self._add_datelike(other) else: # pragma: no cover @@ -708,6 +708,11 @@ def __sub__(self, other): return NotImplemented elif is_timedelta64_dtype(other): return self._add_delta(-other) + elif isinstance(other, (DateOffset, timedelta)): + return self._add_delta(-other) + elif is_offsetlike(other): + # Array/Index of DateOffset objects + return self._sub_offset_array(other) elif isinstance(self, TimedeltaIndex) and isinstance(other, Index): if not isinstance(other, TimedeltaIndex): raise TypeError("cannot subtract TimedeltaIndex and {typ}" @@ -715,17 +720,12 @@ def __sub__(self, other): return self._add_delta(-other) elif isinstance(other, DatetimeIndex): return self._sub_datelike(other) - elif isinstance(other, (DateOffset, timedelta)): - return self._add_delta(-other) elif is_integer(other): return self.shift(-other) elif isinstance(other, (datetime, np.datetime64)): return self._sub_datelike(other) elif isinstance(other, Period): return self._sub_period(other) - elif is_offsetlike(other): - # Array/Index of DateOffset objects - return self._sub_offset_array(other) elif isinstance(other, Index): raise TypeError("cannot subtract {typ1} and {typ2}" .format(typ1=type(self).__name__, diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index d28a09225e8b8..984e2a26d8c95 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -1,6 +1,8 @@ """ implement the TimedeltaIndex """ from datetime import timedelta +import warnings + import numpy as np from pandas.core.dtypes.common import ( _TD_DTYPE, @@ -364,8 +366,8 @@ def _add_delta(self, delta): # update name when delta is index name = com._maybe_match_name(self, delta) else: - raise ValueError("cannot add the type {0} to a TimedeltaIndex" - .format(type(delta))) + raise TypeError("cannot add the type {0} to a TimedeltaIndex" + .format(type(delta))) result = TimedeltaIndex(new_values, freq='infer', name=name) return result @@ -411,6 +413,47 @@ def _sub_datelike(self, other): raise TypeError("cannot subtract a datelike from a TimedeltaIndex") return DatetimeIndex(result, name=self.name, copy=False) + def _add_offset_array(self, other): + # Array/Index of DateOffset objects + try: + # TimedeltaIndex can only operate with a subset of DateOffset + # subclasses. Incompatible classes will raise AttributeError, + # which we re-raise as TypeError + if isinstance(other, ABCSeries): + return NotImplemented + elif len(other) == 1: + return self + other[0] + else: + from pandas.errors import PerformanceWarning + warnings.warn("Adding/subtracting array of DateOffsets to " + "{} not vectorized".format(type(self)), + PerformanceWarning) + return self.astype('O') + np.array(other) + # TODO: This works for __add__ but loses dtype in __sub__ + except AttributeError: + raise TypeError("Cannot add non-tick DateOffset to TimedeltaIndex") + + def _sub_offset_array(self, other): + # Array/Index of DateOffset objects + try: + # TimedeltaIndex can only operate with a subset of DateOffset + # subclasses. Incompatible classes will raise AttributeError, + # which we re-raise as TypeError + if isinstance(other, ABCSeries): + return NotImplemented + elif len(other) == 1: + return self - other[0] + else: + from pandas.errors import PerformanceWarning + warnings.warn("Adding/subtracting array of DateOffsets to " + "{} not vectorized".format(type(self)), + PerformanceWarning) + res_values = self.astype('O').values - np.array(other) + return self.__class__(res_values, freq='infer') + except AttributeError: + raise TypeError("Cannot subtrack non-tick DateOffset from" + " TimedeltaIndex") + def _format_native_types(self, na_rep=u('NaT'), date_format=None, **kwargs): from pandas.io.formats.format import Timedelta64Formatter diff --git a/pandas/tests/indexes/timedeltas/test_arithmetic.py b/pandas/tests/indexes/timedeltas/test_arithmetic.py index 3ecfcaff63bc5..2581a8fad078a 100644 --- a/pandas/tests/indexes/timedeltas/test_arithmetic.py +++ b/pandas/tests/indexes/timedeltas/test_arithmetic.py @@ -10,6 +10,7 @@ to_timedelta, timedelta_range, date_range, Series, Timestamp, Timedelta) +from pandas.errors import PerformanceWarning @pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2), @@ -28,23 +29,104 @@ def freq(request): class TestTimedeltaIndexArithmetic(object): _holder = TimedeltaIndex - @pytest.mark.xfail(reason='GH#18824 ufunc add cannot use operands...') - def test_tdi_with_offset_array(self): + @pytest.mark.parametrize('box', [np.array, pd.Index]) + def test_tdi_add_offset_array(self, box): # GH#18849 - tdi = pd.TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00']) - offs = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)]) - expected = pd.TimedeltaIndex(['1 days 01:00:00', '3 days 04:02:00']) + tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00']) + other = box([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)]) - res = tdi + offs + expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))], + freq='infer') + + with tm.assert_produces_warning(PerformanceWarning): + res = tdi + other tm.assert_index_equal(res, expected) - res2 = offs + tdi + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + tdi tm.assert_index_equal(res2, expected) - anchored = np.array([pd.offsets.QuarterEnd(), - pd.offsets.Week(weekday=2)]) + anchored = box([pd.offsets.QuarterEnd(), + pd.offsets.Week(weekday=2)]) + + # addition/subtraction ops with anchored offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. + with pytest.raises(TypeError): + with tm.assert_produces_warning(PerformanceWarning): + tdi + anchored + with pytest.raises(TypeError): + with tm.assert_produces_warning(PerformanceWarning): + anchored + tdi + + @pytest.mark.parametrize('box', [np.array, pd.Index]) + def test_tdi_sub_offset_array(self, box): + # GH#18824 + tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00']) + other = box([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)]) + + expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))], + freq='infer') + + with tm.assert_produces_warning(PerformanceWarning): + res = tdi - other + tm.assert_index_equal(res, expected) + + anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) + + # addition/subtraction ops with anchored offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. + with pytest.raises(TypeError): + with tm.assert_produces_warning(PerformanceWarning): + tdi - anchored + with pytest.raises(TypeError): + with tm.assert_produces_warning(PerformanceWarning): + anchored - tdi + + @pytest.mark.parametrize('names', [(None, None, None), + ('foo', 'bar', None), + ('foo', 'foo', 'foo')]) + def test_tdi_with_offset_series(self, names): + # GH#18849 + tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'], + name=names[0]) + other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)], + name=names[1]) + + expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))], + name=names[2]) + + with tm.assert_produces_warning(PerformanceWarning): + res = tdi + other + tm.assert_series_equal(res, expected_add) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + tdi + tm.assert_series_equal(res2, expected_add) + + expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))], + name=names[2]) + + with tm.assert_produces_warning(PerformanceWarning): + res3 = tdi - other + tm.assert_series_equal(res3, expected_sub) + + anchored = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], + name=names[1]) + + # addition/subtraction ops with anchored offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. + with pytest.raises(TypeError): + with tm.assert_produces_warning(PerformanceWarning): + tdi + anchored + with pytest.raises(TypeError): + with tm.assert_produces_warning(PerformanceWarning): + anchored + tdi + with pytest.raises(TypeError): + with tm.assert_produces_warning(PerformanceWarning): + tdi - anchored with pytest.raises(TypeError): - tdi + anchored + with tm.assert_produces_warning(PerformanceWarning): + anchored - tdi # TODO: Split by ops, better name def test_numeric_compat(self):
No issue specific to this, but analogus to #18849. There is a checkbox for this in #18824. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19095
2018-01-05T18:21:32Z
2018-01-07T15:08:31Z
2018-01-07T15:08:31Z
2018-01-07T17:29:13Z
TST: Finish groupby aggregate cleanup
diff --git a/pandas/tests/groupby/aggregate/test_aggregate.py b/pandas/tests/groupby/aggregate/test_aggregate.py index caf2365a54ec8..7cc6c2fa7b88c 100644 --- a/pandas/tests/groupby/aggregate/test_aggregate.py +++ b/pandas/tests/groupby/aggregate/test_aggregate.py @@ -10,285 +10,298 @@ import pandas as pd from pandas import concat, DataFrame, Index, MultiIndex, Series -from pandas.core.groupby import SpecificationError +from pandas.core.groupby import Grouping, SpecificationError from pandas.compat import OrderedDict import pandas.util.testing as tm -class TestGroupByAggregate(object): - - def setup_method(self, method): - self.ts = tm.makeTimeSeries() - - self.seriesd = tm.getSeriesData() - self.tsd = tm.getTimeSeriesData() - self.frame = DataFrame(self.seriesd) - self.tsframe = DataFrame(self.tsd) - - self.df = DataFrame( - {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], - 'C': np.random.randn(8), - 'D': np.random.randn(8)}) - - self.df_mixed_floats = DataFrame( - {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], - 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], - 'C': np.random.randn(8), - 'D': np.array(np.random.randn(8), dtype='float32')}) - - index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], - ['one', 'two', 'three']], - labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], - [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], - names=['first', 'second']) - self.mframe = DataFrame(np.random.randn(10, 3), index=index, - columns=['A', 'B', 'C']) - - self.three_group = DataFrame( - {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar', - 'foo', 'foo', 'foo'], - 'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two', - 'two', 'two', 'one'], - 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny', - 'dull', 'shiny', 'shiny', 'shiny'], - 'D': np.random.randn(11), - 'E': np.random.randn(11), - 'F': np.random.randn(11)}) - - def test_agg_regression1(self): - grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month]) - result = grouped.agg(np.mean) - expected = grouped.mean() - tm.assert_frame_equal(result, expected) - - def test_agg_must_agg(self): - grouped = self.df.groupby('A')['C'] - - msg = "Must produce aggregated value" - with tm.assert_raises_regex(Exception, msg): - grouped.agg(lambda x: x.describe()) - with tm.assert_raises_regex(Exception, msg): - grouped.agg(lambda x: x.index[:2]) - - def test_agg_ser_multi_key(self): - # TODO(wesm): unused - ser = self.df.C # noqa - - f = lambda x: x.sum() - results = self.df.C.groupby([self.df.A, self.df.B]).aggregate(f) - expected = self.df.groupby(['A', 'B']).sum()['C'] - tm.assert_series_equal(results, expected) - - def test_agg_apply_corner(self): - # nothing to group, all NA - grouped = self.ts.groupby(self.ts * np.nan) - assert self.ts.dtype == np.float64 - - # groupby float64 values results in Float64Index - exp = Series([], dtype=np.float64, - index=pd.Index([], dtype=np.float64)) - tm.assert_series_equal(grouped.sum(), exp) - tm.assert_series_equal(grouped.agg(np.sum), exp) - tm.assert_series_equal(grouped.apply(np.sum), exp, - check_index_type=False) - - # DataFrame - grouped = self.tsframe.groupby(self.tsframe['A'] * np.nan) - exp_df = DataFrame(columns=self.tsframe.columns, dtype=float, - index=pd.Index([], dtype=np.float64)) - tm.assert_frame_equal(grouped.sum(), exp_df, check_names=False) - tm.assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False) - tm.assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0], - check_names=False) - - def test_agg_grouping_is_list_tuple(self): - from pandas.core.groupby import Grouping - - df = tm.makeTimeDataFrame() - - grouped = df.groupby(lambda x: x.year) - grouper = grouped.grouper.groupings[0].grouper - grouped.grouper.groupings[0] = Grouping(self.ts.index, list(grouper)) - - result = grouped.agg(np.mean) - expected = grouped.mean() - tm.assert_frame_equal(result, expected) - - grouped.grouper.groupings[0] = Grouping(self.ts.index, tuple(grouper)) - - result = grouped.agg(np.mean) - expected = grouped.mean() - tm.assert_frame_equal(result, expected) - - def test_agg_python_multiindex(self): - grouped = self.mframe.groupby(['A', 'B']) - - result = grouped.agg(np.mean) - expected = grouped.mean() - tm.assert_frame_equal(result, expected) - - @pytest.mark.parametrize('groupbyfunc', [ - lambda x: x.weekday(), - [lambda x: x.month, lambda x: x.weekday()], - ]) - def test_aggregate_str_func(self, groupbyfunc): - grouped = self.tsframe.groupby(groupbyfunc) - - # single series - result = grouped['A'].agg('std') - expected = grouped['A'].std() - tm.assert_series_equal(result, expected) - - # group frame by function name - result = grouped.aggregate('var') - expected = grouped.var() - tm.assert_frame_equal(result, expected) - - # group frame by function dict - result = grouped.agg(OrderedDict([['A', 'var'], - ['B', 'std'], - ['C', 'mean'], - ['D', 'sem']])) - expected = DataFrame(OrderedDict([['A', grouped['A'].var()], - ['B', grouped['B'].std()], - ['C', grouped['C'].mean()], - ['D', grouped['D'].sem()]])) - tm.assert_frame_equal(result, expected) - - def test_aggregate_item_by_item(self): - df = self.df.copy() - df['E'] = ['a'] * len(self.df) - grouped = self.df.groupby('A') - - aggfun = lambda ser: ser.size - result = grouped.agg(aggfun) - foo = (self.df.A == 'foo').sum() - bar = (self.df.A == 'bar').sum() - K = len(result.columns) - - # GH5782 - # odd comparisons can result here, so cast to make easy - exp = pd.Series(np.array([foo] * K), index=list('BCD'), - dtype=np.float64, name='foo') - tm.assert_series_equal(result.xs('foo'), exp) - - exp = pd.Series(np.array([bar] * K), index=list('BCD'), - dtype=np.float64, name='bar') - tm.assert_almost_equal(result.xs('bar'), exp) - - def aggfun(ser): - return ser.size - - result = DataFrame().groupby(self.df.A).agg(aggfun) - assert isinstance(result, DataFrame) - assert len(result) == 0 - - def test_wrap_agg_out(self): - grouped = self.three_group.groupby(['A', 'B']) - - def func(ser): - if ser.dtype == np.object: - raise TypeError - else: - return ser.sum() - - result = grouped.aggregate(func) - exp_grouped = self.three_group.loc[:, self.three_group.columns != 'C'] - expected = exp_grouped.groupby(['A', 'B']).aggregate(func) - tm.assert_frame_equal(result, expected) - - def test_agg_multiple_functions_maintain_order(self): - # GH #610 - funcs = [('mean', np.mean), ('max', np.max), ('min', np.min)] - result = self.df.groupby('A')['C'].agg(funcs) - exp_cols = Index(['mean', 'max', 'min']) - - tm.assert_index_equal(result.columns, exp_cols) - - def test_multiple_functions_tuples_and_non_tuples(self): - # #1359 - funcs = [('foo', 'mean'), 'std'] - ex_funcs = [('foo', 'mean'), ('std', 'std')] - - result = self.df.groupby('A')['C'].agg(funcs) - expected = self.df.groupby('A')['C'].agg(ex_funcs) - tm.assert_frame_equal(result, expected) - - result = self.df.groupby('A').agg(funcs) - expected = self.df.groupby('A').agg(ex_funcs) - tm.assert_frame_equal(result, expected) - - def test_agg_multiple_functions_too_many_lambdas(self): - grouped = self.df.groupby('A') - funcs = ['mean', lambda x: x.mean(), lambda x: x.std()] - - msg = 'Function names must be unique, found multiple named <lambda>' - with tm.assert_raises_regex(SpecificationError, msg): - grouped.agg(funcs) - - def test_more_flexible_frame_multi_function(self): - grouped = self.df.groupby('A') - - exmean = grouped.agg(OrderedDict([['C', np.mean], ['D', np.mean]])) - exstd = grouped.agg(OrderedDict([['C', np.std], ['D', np.std]])) - - expected = concat([exmean, exstd], keys=['mean', 'std'], axis=1) - expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1) - - d = OrderedDict([['C', [np.mean, np.std]], ['D', [np.mean, np.std]]]) - result = grouped.aggregate(d) +@pytest.fixture +def ts(): + return tm.makeTimeSeries() + + +@pytest.fixture +def tsframe(): + return DataFrame(tm.getTimeSeriesData()) + + +@pytest.fixture +def df(): + return DataFrame( + {'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], + 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], + 'C': np.random.randn(8), + 'D': np.random.randn(8)}) + + +@pytest.fixture +def mframe(): + index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], + ['one', 'two', 'three']], + labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], + [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]], + names=['first', 'second']) + return DataFrame(np.random.randn(10, 3), + index=index, + columns=['A', 'B', 'C']) + + +@pytest.fixture +def three_group(): + return DataFrame( + {'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', + 'bar', 'bar', 'foo', 'foo', 'foo'], + 'B': ['one', 'one', 'one', 'two', 'one', 'one', + 'one', 'two', 'two', 'two', 'one'], + 'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', + 'shiny', 'dull', 'shiny', 'shiny', 'shiny'], + 'D': np.random.randn(11), + 'E': np.random.randn(11), + 'F': np.random.randn(11)}) + + +def test_agg_regression1(tsframe): + grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month]) + result = grouped.agg(np.mean) + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + +def test_agg_must_agg(df): + grouped = df.groupby('A')['C'] + + msg = "Must produce aggregated value" + with tm.assert_raises_regex(Exception, msg): + grouped.agg(lambda x: x.describe()) + with tm.assert_raises_regex(Exception, msg): + grouped.agg(lambda x: x.index[:2]) + + +def test_agg_ser_multi_key(df): + # TODO(wesm): unused + ser = df.C # noqa + + f = lambda x: x.sum() + results = df.C.groupby([df.A, df.B]).aggregate(f) + expected = df.groupby(['A', 'B']).sum()['C'] + tm.assert_series_equal(results, expected) + + +def test_agg_apply_corner(ts, tsframe): + # nothing to group, all NA + grouped = ts.groupby(ts * np.nan) + assert ts.dtype == np.float64 + + # groupby float64 values results in Float64Index + exp = Series([], dtype=np.float64, + index=pd.Index([], dtype=np.float64)) + tm.assert_series_equal(grouped.sum(), exp) + tm.assert_series_equal(grouped.agg(np.sum), exp) + tm.assert_series_equal(grouped.apply(np.sum), exp, + check_index_type=False) + + # DataFrame + grouped = tsframe.groupby(tsframe['A'] * np.nan) + exp_df = DataFrame(columns=tsframe.columns, dtype=float, + index=pd.Index([], dtype=np.float64)) + tm.assert_frame_equal(grouped.sum(), exp_df, check_names=False) + tm.assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False) + tm.assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0], + check_names=False) + + +def test_agg_grouping_is_list_tuple(ts): + df = tm.makeTimeDataFrame() + + grouped = df.groupby(lambda x: x.year) + grouper = grouped.grouper.groupings[0].grouper + grouped.grouper.groupings[0] = Grouping(ts.index, list(grouper)) + + result = grouped.agg(np.mean) + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + grouped.grouper.groupings[0] = Grouping(ts.index, tuple(grouper)) + + result = grouped.agg(np.mean) + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + +def test_agg_python_multiindex(mframe): + grouped = mframe.groupby(['A', 'B']) + + result = grouped.agg(np.mean) + expected = grouped.mean() + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize('groupbyfunc', [ + lambda x: x.weekday(), + [lambda x: x.month, lambda x: x.weekday()], +]) +def test_aggregate_str_func(tsframe, groupbyfunc): + grouped = tsframe.groupby(groupbyfunc) + + # single series + result = grouped['A'].agg('std') + expected = grouped['A'].std() + tm.assert_series_equal(result, expected) + + # group frame by function name + result = grouped.aggregate('var') + expected = grouped.var() + tm.assert_frame_equal(result, expected) + + # group frame by function dict + result = grouped.agg(OrderedDict([['A', 'var'], + ['B', 'std'], + ['C', 'mean'], + ['D', 'sem']])) + expected = DataFrame(OrderedDict([['A', grouped['A'].var()], + ['B', grouped['B'].std()], + ['C', grouped['C'].mean()], + ['D', grouped['D'].sem()]])) + tm.assert_frame_equal(result, expected) + + +def test_aggregate_item_by_item(df): + grouped = df.groupby('A') + + aggfun = lambda ser: ser.size + result = grouped.agg(aggfun) + foo = (df.A == 'foo').sum() + bar = (df.A == 'bar').sum() + K = len(result.columns) + + # GH5782 + # odd comparisons can result here, so cast to make easy + exp = pd.Series(np.array([foo] * K), index=list('BCD'), + dtype=np.float64, name='foo') + tm.assert_series_equal(result.xs('foo'), exp) - tm.assert_frame_equal(result, expected) + exp = pd.Series(np.array([bar] * K), index=list('BCD'), + dtype=np.float64, name='bar') + tm.assert_almost_equal(result.xs('bar'), exp) - # be careful - result = grouped.aggregate(OrderedDict([['C', np.mean], - ['D', [np.mean, np.std]]])) - expected = grouped.aggregate(OrderedDict([['C', np.mean], - ['D', [np.mean, np.std]]])) - tm.assert_frame_equal(result, expected) + def aggfun(ser): + return ser.size - def foo(x): - return np.mean(x) + result = DataFrame().groupby(df.A).agg(aggfun) + assert isinstance(result, DataFrame) + assert len(result) == 0 - def bar(x): - return np.std(x, ddof=1) - # this uses column selection & renaming - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - d = OrderedDict([['C', np.mean], - ['D', OrderedDict([['foo', np.mean], - ['bar', np.std]])]]) - result = grouped.aggregate(d) +def test_wrap_agg_out(three_group): + grouped = three_group.groupby(['A', 'B']) - d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]]) + def func(ser): + if ser.dtype == np.object: + raise TypeError + else: + return ser.sum() + + result = grouped.aggregate(func) + exp_grouped = three_group.loc[:, three_group.columns != 'C'] + expected = exp_grouped.groupby(['A', 'B']).aggregate(func) + tm.assert_frame_equal(result, expected) + + +def test_agg_multiple_functions_maintain_order(df): + # GH #610 + funcs = [('mean', np.mean), ('max', np.max), ('min', np.min)] + result = df.groupby('A')['C'].agg(funcs) + exp_cols = Index(['mean', 'max', 'min']) + + tm.assert_index_equal(result.columns, exp_cols) + + +def test_multiple_functions_tuples_and_non_tuples(df): + # #1359 + funcs = [('foo', 'mean'), 'std'] + ex_funcs = [('foo', 'mean'), ('std', 'std')] + + result = df.groupby('A')['C'].agg(funcs) + expected = df.groupby('A')['C'].agg(ex_funcs) + tm.assert_frame_equal(result, expected) + + result = df.groupby('A').agg(funcs) + expected = df.groupby('A').agg(ex_funcs) + tm.assert_frame_equal(result, expected) + + +def test_agg_multiple_functions_too_many_lambdas(df): + grouped = df.groupby('A') + funcs = ['mean', lambda x: x.mean(), lambda x: x.std()] + + msg = 'Function names must be unique, found multiple named <lambda>' + with tm.assert_raises_regex(SpecificationError, msg): + grouped.agg(funcs) + + +def test_more_flexible_frame_multi_function(df): + grouped = df.groupby('A') + + exmean = grouped.agg(OrderedDict([['C', np.mean], ['D', np.mean]])) + exstd = grouped.agg(OrderedDict([['C', np.std], ['D', np.std]])) + + expected = concat([exmean, exstd], keys=['mean', 'std'], axis=1) + expected = expected.swaplevel(0, 1, axis=1).sort_index(level=0, axis=1) + + d = OrderedDict([['C', [np.mean, np.std]], ['D', [np.mean, np.std]]]) + result = grouped.aggregate(d) + + tm.assert_frame_equal(result, expected) + + # be careful + result = grouped.aggregate(OrderedDict([['C', np.mean], + ['D', [np.mean, np.std]]])) + expected = grouped.aggregate(OrderedDict([['C', np.mean], + ['D', [np.mean, np.std]]])) + tm.assert_frame_equal(result, expected) + + def foo(x): + return np.mean(x) + + def bar(x): + return np.std(x, ddof=1) + + # this uses column selection & renaming + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + d = OrderedDict([['C', np.mean], + ['D', OrderedDict([['foo', np.mean], + ['bar', np.std]])]]) + result = grouped.aggregate(d) + + d = OrderedDict([['C', [np.mean]], ['D', [foo, bar]]]) + expected = grouped.aggregate(d) + + tm.assert_frame_equal(result, expected) + + +def test_multi_function_flexible_mix(df): + # GH #1268 + grouped = df.groupby('A') + + # Expected + d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], + ['D', {'sum': 'sum'}]]) + # this uses column selection & renaming + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): expected = grouped.aggregate(d) - tm.assert_frame_equal(result, expected) - - def test_multi_function_flexible_mix(self): - # GH #1268 - grouped = self.df.groupby('A') - - # Expected - d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], - ['D', {'sum': 'sum'}]]) - # this uses column selection & renaming - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - expected = grouped.aggregate(d) - - # Test 1 - d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], - ['D', 'sum']]) - # this uses column selection & renaming - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = grouped.aggregate(d) - tm.assert_frame_equal(result, expected) - - # Test 2 - d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], - ['D', ['sum']]]) - # this uses column selection & renaming - with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): - result = grouped.aggregate(d) - tm.assert_frame_equal(result, expected) + # Test 1 + d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], + ['D', 'sum']]) + # this uses column selection & renaming + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = grouped.aggregate(d) + tm.assert_frame_equal(result, expected) + + # Test 2 + d = OrderedDict([['C', OrderedDict([['foo', 'mean'], ['bar', 'std']])], + ['D', ['sum']]]) + # this uses column selection & renaming + with tm.assert_produces_warning(FutureWarning, check_stacklevel=False): + result = grouped.aggregate(d) + tm.assert_frame_equal(result, expected) diff --git a/pandas/tests/groupby/aggregate/test_other.py b/pandas/tests/groupby/aggregate/test_other.py index f8e44b1548819..575eae1916f4c 100644 --- a/pandas/tests/groupby/aggregate/test_other.py +++ b/pandas/tests/groupby/aggregate/test_other.py @@ -8,13 +8,15 @@ import pytest -from datetime import datetime, timedelta +import datetime as dt from functools import partial import numpy as np import pandas as pd -from pandas import date_range, DataFrame, Index, MultiIndex, Series +from pandas import ( + date_range, DataFrame, Index, MultiIndex, PeriodIndex, period_range, Series +) from pandas.core.groupby import SpecificationError from pandas.io.formats.printing import pprint_thing import pandas.util.testing as tm @@ -50,7 +52,8 @@ def test_agg_datetimes_mixed(): 'value': [x[2] for x in data]}) data = [[row[0], - datetime.strptime(row[1], '%Y-%m-%d').date() if row[1] else None, + (dt.datetime.strptime(row[1], '%Y-%m-%d').date() + if row[1] else None), row[2]] for row in data] @@ -68,7 +71,6 @@ def test_agg_datetimes_mixed(): def test_agg_period_index(): - from pandas import period_range, PeriodIndex prng = period_range('2012-1-1', freq='M', periods=3) df = DataFrame(np.random.randn(3, 2), index=prng) rs = df.groupby(level=0).sum() @@ -125,7 +127,7 @@ def test_agg_dict_parameter_cast_result_dtypes(): def test_agg_cast_results_dtypes(): # similar to GH12821 # xref #11444 - u = [datetime(2015, x + 1, 1) for x in range(12)] + u = [dt.datetime(2015, x + 1, 1) for x in range(12)] v = list('aaabbbbbbccd') df = pd.DataFrame({'X': v, 'Y': u}) @@ -292,9 +294,7 @@ def test_agg_nested_dicts(): def test_agg_item_by_item_raise_typeerror(): - from numpy.random import randint - - df = DataFrame(randint(10, size=(20, 10))) + df = DataFrame(np.random.randint(10, size=(20, 10))) def raiseException(df): pprint_thing('----------------------------------------') @@ -344,7 +344,6 @@ def P1(a): except Exception: return np.nan - import datetime as dt df = DataFrame({'col1': [1, 2, 3, 4], 'col2': [10, 25, 26, 31], 'date': [dt.date(2013, 2, 10), dt.date(2013, 2, 10), @@ -403,7 +402,8 @@ def test_agg_timezone_round_trip(): # GH 15426 ts = pd.Timestamp("2016-01-01 12:00:00", tz='US/Pacific') df = pd.DataFrame({'a': 1, - 'b': [ts + timedelta(minutes=nn) for nn in range(10)]}) + 'b': [ts + dt.timedelta(minutes=nn) + for nn in range(10)]}) result1 = df.groupby('a')['b'].agg(np.min).iloc[0] result2 = df.groupby('a')['b'].agg(lambda x: np.min(x)).iloc[0]
Continuation of #18931 * Converted class-based setup fixtures to pytest fixtures * Consolidated all imports at the top of the file `test_aggregate_item_by_item` has an unused DataFrame in the test so cleaned that up as well Please let me know if you need me to make any changes.
https://api.github.com/repos/pandas-dev/pandas/pulls/19094
2018-01-05T17:12:52Z
2018-01-05T20:41:20Z
2018-01-05T20:41:20Z
2018-01-05T20:55:53Z
TST: Add to_csv test when writing the single column CSV
diff --git a/pandas/tests/io/formats/test_to_csv.py b/pandas/tests/io/formats/test_to_csv.py index e12a7196dce6b..dfa3751bff57a 100644 --- a/pandas/tests/io/formats/test_to_csv.py +++ b/pandas/tests/io/formats/test_to_csv.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- +import sys import numpy as np import pandas as pd import pytest @@ -9,6 +10,37 @@ class TestToCSV(object): + @pytest.mark.xfail((3, 6, 5) > sys.version_info >= (3, 5), + reason=("Python csv library bug " + "(see https://bugs.python.org/issue32255)")) + def test_to_csv_with_single_column(self): + # see gh-18676, https://bugs.python.org/issue32255 + # + # Python's CSV library adds an extraneous '""' + # before the newline when the NaN-value is in + # the first row. Otherwise, only the newline + # character is added. This behavior is inconsistent + # and was patched in https://bugs.python.org/pull_request4672. + df1 = DataFrame([None, 1]) + expected1 = """\ +"" +1.0 +""" + with tm.ensure_clean('test.csv') as path: + df1.to_csv(path, header=None, index=None) + with open(path, 'r') as f: + assert f.read() == expected1 + + df2 = DataFrame([1, None]) + expected2 = """\ +1.0 +"" +""" + with tm.ensure_clean('test.csv') as path: + df2.to_csv(path, header=None, index=None) + with open(path, 'r') as f: + assert f.read() == expected2 + def test_to_csv_defualt_encoding(self): # GH17097 df = DataFrame({'col': [u"AAAAA", u"ÄÄÄÄÄ", u"ßßßßß", u"聞聞聞聞聞"]})
- [x] closes #18676 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19091
2018-01-05T05:06:42Z
2018-02-11T21:11:11Z
2018-02-11T21:11:11Z
2018-02-11T21:11:31Z
TST: Avoid skipping common indexes tests
diff --git a/pandas/tests/indexes/interval/test_interval_new.py b/pandas/tests/indexes/interval/test_interval_new.py index c8b30e19daa02..a0d11db46d316 100644 --- a/pandas/tests/indexes/interval/test_interval_new.py +++ b/pandas/tests/indexes/interval/test_interval_new.py @@ -4,14 +4,13 @@ import numpy as np from pandas import Interval, IntervalIndex, Int64Index -from pandas.tests.indexes.common import Base import pandas.util.testing as tm pytestmark = pytest.mark.skip(reason="new indexing tests for issue 16316") -class TestIntervalIndex(Base): +class TestIntervalIndex(object): def _compare_tuple_of_numpy_array(self, result, expected): lidx, ridx = result
Closes https://github.com/pandas-dev/pandas/issues/19083
https://api.github.com/repos/pandas-dev/pandas/pulls/19085
2018-01-04T20:50:01Z
2018-01-05T00:24:21Z
2018-01-05T00:24:21Z
2018-02-22T15:27:33Z
DOC: Add information on kind parameter to resample docstring
diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 1ab7c50d86c98..2b5e4b912247e 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -5724,6 +5724,10 @@ def resample(self, rule, how=None, axis=0, fill_method=None, closed=None, convention : {'start', 'end', 's', 'e'} For PeriodIndex only, controls whether to use the start or end of `rule` + kind: {'timestamp', 'period'}, optional + Pass 'timestamp' to convert the resulting index to a + ``DateTimeIndex`` or 'period' to convert it to a ``PeriodIndex``. + By default the input representation is retained. loffset : timedelta Adjust the resampled time labels base : int, default 0
xref #5023 When working on another issue I couldn't find any information in the ``resample`` [docstring](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html) on the ``kind`` parameter so I added it. There was some relevant information in the resample [section](http://pandas.pydata.org/pandas-docs/stable/timeseries.html#resampling) of the time-series documentation.
https://api.github.com/repos/pandas-dev/pandas/pulls/19084
2018-01-04T20:36:59Z
2018-01-05T00:53:20Z
2018-01-05T00:53:20Z
2018-01-06T00:19:21Z
DOC: Spellcheck of merging.rst, reshaping.rst and timeseries.rst
diff --git a/doc/source/basics.rst b/doc/source/basics.rst index bd49b5b7c9b32..55c26e2186344 100644 --- a/doc/source/basics.rst +++ b/doc/source/basics.rst @@ -2220,7 +2220,7 @@ For example, to select ``bool`` columns: df.select_dtypes(include=[bool]) -You can also pass the name of a dtype in the `numpy dtype hierarchy +You can also pass the name of a dtype in the `NumPy dtype hierarchy <http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__: .. ipython:: python diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst index 57f07a41afbc3..7afa852262a38 100644 --- a/doc/source/enhancingperf.rst +++ b/doc/source/enhancingperf.rst @@ -28,14 +28,14 @@ For many use cases writing pandas in pure Python and NumPy is sufficient. In som computationally heavy applications however, it can be possible to achieve sizeable speed-ups by offloading work to `cython <http://cython.org/>`__. -This tutorial assumes you have refactored as much as possible in python, for example +This tutorial assumes you have refactored as much as possible in Python, for example trying to remove for loops and making use of NumPy vectorization, it's always worth optimising in Python first. This tutorial walks through a "typical" process of cythonizing a slow computation. We use an `example from the cython documentation <http://docs.cython.org/src/quickstart/cythonize.html>`__ but in the context of pandas. Our final cythonized solution is around 100 times -faster than the pure python. +faster than the pure Python. .. _enhancingperf.pure: @@ -52,7 +52,7 @@ We have a DataFrame to which we want to apply a function row-wise. 'x': 'x'}) df -Here's the function in pure python: +Here's the function in pure Python: .. ipython:: python @@ -173,7 +173,7 @@ Using ndarray It's calling series... a lot! It's creating a Series from each row, and get-ting from both the index and the series (three times for each row). Function calls are expensive -in python, so maybe we could minimize these by cythonizing the apply part. +in Python, so maybe we could minimize these by cythonizing the apply part. .. note:: @@ -231,7 +231,7 @@ the rows, applying our ``integrate_f_typed``, and putting this in the zeros arra .. note:: - Loops like this would be *extremely* slow in python, but in Cython looping + Loops like this would be *extremely* slow in Python, but in Cython looping over NumPy arrays is *fast*. .. code-block:: ipython diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst index 0467ac225585b..4ebc8b82aaa47 100644 --- a/doc/source/indexing.rst +++ b/doc/source/indexing.rst @@ -84,7 +84,7 @@ of multi-axis indexing. ``length-1`` of the axis), but may also be used with a boolean array. ``.iloc`` will raise ``IndexError`` if a requested indexer is out-of-bounds, except *slice* indexers which allow - out-of-bounds indexing. (this conforms with python/numpy *slice* + out-of-bounds indexing. (this conforms with Python/NumPy *slice* semantics). Allowed inputs are: - An integer e.g. ``5``. @@ -1517,7 +1517,7 @@ The :meth:`~pandas.DataFrame.lookup` Method Sometimes you want to extract a set of values given a sequence of row labels and column labels, and the ``lookup`` method allows for this and returns a -numpy array. For instance: +NumPy array. For instance: .. ipython:: python diff --git a/doc/source/io.rst b/doc/source/io.rst index 5878272a3da42..2ef7e6d3b64f4 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -775,7 +775,7 @@ The simplest case is to just pass in ``parse_dates=True``: df = pd.read_csv('foo.csv', index_col=0, parse_dates=True) df - # These are python datetime objects + # These are Python datetime objects df.index It is often the case that we may want to store date and time data separately, diff --git a/doc/source/merging.rst b/doc/source/merging.rst index 5f2e90e6ae4fe..ebade853313ab 100644 --- a/doc/source/merging.rst +++ b/doc/source/merging.rst @@ -31,11 +31,11 @@ operations. Concatenating objects --------------------- -The ``concat`` function (in the main pandas namespace) does all of the heavy -lifting of performing concatenation operations along an axis while performing -optional set logic (union or intersection) of the indexes (if any) on the other -axes. Note that I say "if any" because there is only a single possible axis of -concatenation for Series. +The :func:`~pandas.concat` function (in the main pandas namespace) does all of +the heavy lifting of performing concatenation operations along an axis while +performing optional set logic (union or intersection) of the indexes (if any) on +the other axes. Note that I say "if any" because there is only a single possible +axis of concatenation for Series. Before diving into all of the details of ``concat`` and what it can do, here is a simple example: @@ -109,10 +109,10 @@ some configurable handling of "what to do with the other axes": to the actual data concatenation. - ``copy`` : boolean, default True. If False, do not copy data unnecessarily. -Without a little bit of context and example many of these arguments don't make -much sense. Let's take the above example. Suppose we wanted to associate -specific keys with each of the pieces of the chopped up DataFrame. We can do -this using the ``keys`` argument: +Without a little bit of context many of these arguments don't make much sense. +Let's revisit the above example. Suppose we wanted to associate specific keys +with each of the pieces of the chopped up DataFrame. We can do this using the +``keys`` argument: .. ipython:: python @@ -128,7 +128,7 @@ this using the ``keys`` argument: As you can see (if you've read the rest of the documentation), the resulting object's index has a :ref:`hierarchical index <advanced.hierarchical>`. This -means that we can now do stuff like select out each chunk by key: +means that we can now select out each chunk by key: .. ipython:: python @@ -138,10 +138,10 @@ It's not a stretch to see how this can be very useful. More detail on this functionality below. .. note:: - It is worth noting however, that ``concat`` (and therefore ``append``) makes - a full copy of the data, and that constantly reusing this function can - create a significant performance hit. If you need to use the operation over - several datasets, use a list comprehension. + It is worth noting that :func:`~pandas.concat` (and therefore + :func:`~pandas.append`) makes a full copy of the data, and that constantly + reusing this function can create a significant performance hit. If you need + to use the operation over several datasets, use a list comprehension. :: @@ -152,17 +152,16 @@ functionality below. Set logic on the other axes ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When gluing together multiple DataFrames (or Panels or...), for example, you -have a choice of how to handle the other axes (other than the one being -concatenated). This can be done in three ways: +When gluing together multiple ``DataFrame``s, you have a choice of how to handle +the other axes (other than the one being concatenated). This can be done in +the following three ways: - Take the (sorted) union of them all, ``join='outer'``. This is the default option as it results in zero information loss. - Take the intersection, ``join='inner'``. -- Use a specific index (in the case of DataFrame) or indexes (in the case of - Panel or future higher dimensional objects), i.e. the ``join_axes`` argument +- Use a specific index, as passed to the ``join_axes`` argument. -Here is a example of each of these methods. First, the default ``join='outer'`` +Here is an example of each of these methods. First, the default ``join='outer'`` behavior: .. ipython:: python @@ -217,9 +216,9 @@ DataFrame: Concatenating using ``append`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -A useful shortcut to ``concat`` are the ``append`` instance methods on Series -and DataFrame. These methods actually predated ``concat``. They concatenate -along ``axis=0``, namely the index: +A useful shortcut to :func:`~pandas.concat` are the :meth:`~DataFrame.append` +instance methods on ``Series`` and ``DataFrame``. These methods actually predated +``concat``. They concatenate along ``axis=0``, namely the index: .. ipython:: python @@ -233,7 +232,7 @@ along ``axis=0``, namely the index: labels=['df1', 'df2'], vertical=True); plt.close('all'); -In the case of DataFrame, the indexes must be disjoint but the columns do not +In the case of ``DataFrame``, the indexes must be disjoint but the columns do not need to be: .. ipython:: python @@ -264,18 +263,17 @@ need to be: .. note:: - Unlike `list.append` method, which appends to the original list and - returns nothing, ``append`` here **does not** modify ``df1`` and - returns its copy with ``df2`` appended. + Unlike the :py:meth:`~list.append` method, which appends to the original list + and returns ``None``, :meth:`~DataFrame.append` here **does not** modify + ``df1`` and returns its copy with ``df2`` appended. .. _merging.ignore_index: Ignoring indexes on the concatenation axis ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -For DataFrames which don't have a meaningful index, you may wish to append them -and ignore the fact that they may have overlapping indexes: - -To do this, use the ``ignore_index`` argument: +For ``DataFrame``s which don't have a meaningful index, you may wish to append +them and ignore the fact that they may have overlapping indexes. To do this, use +the ``ignore_index`` argument: .. ipython:: python @@ -289,7 +287,7 @@ To do this, use the ``ignore_index`` argument: labels=['df1', 'df4'], vertical=True); plt.close('all'); -This is also a valid argument to ``DataFrame.append``: +This is also a valid argument to :meth:`DataFrame.append`: .. ipython:: python @@ -308,9 +306,9 @@ This is also a valid argument to ``DataFrame.append``: Concatenating with mixed ndims ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You can concatenate a mix of Series and DataFrames. The -Series will be transformed to DataFrames with the column name as -the name of the Series. +You can concatenate a mix of ``Series`` and ``DataFrame``s. The +``Series`` will be transformed to ``DataFrame`` with the column name as +the name of the ``Series``. .. ipython:: python @@ -325,7 +323,7 @@ the name of the Series. labels=['df1', 's1'], vertical=False); plt.close('all'); -If unnamed Series are passed they will be numbered consecutively. +If unnamed ``Series`` are passed they will be numbered consecutively. .. ipython:: python @@ -357,8 +355,10 @@ Passing ``ignore_index=True`` will drop all name references. More concatenating with group keys ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -A fairly common use of the ``keys`` argument is to override the column names when creating a new DataFrame based on existing Series. -Notice how the default behaviour consists on letting the resulting DataFrame inherits the parent Series' name, when these existed. +A fairly common use of the ``keys`` argument is to override the column names +when creating a new ``DataFrame`` based on existing ``Series``. +Notice how the default behaviour consists on letting the resulting ``DataFrame`` +inherit the parent ``Series``' name, when these existed. .. ipython:: python @@ -374,7 +374,7 @@ Through the ``keys`` argument we can override the existing column names. pd.concat([s3, s4, s5], axis=1, keys=['red','blue','yellow']) -Let's consider now a variation on the very first example presented: +Let's consider a variation of the very first example presented: .. ipython:: python @@ -417,7 +417,7 @@ for the ``keys`` argument (unless other keys are specified): plt.close('all'); The MultiIndex created has levels that are constructed from the passed keys and -the index of the DataFrame pieces: +the index of the ``DataFrame`` pieces: .. ipython:: python @@ -444,7 +444,7 @@ do so using the ``levels`` argument: result.index.levels -Yes, this is fairly esoteric, but is actually necessary for implementing things +This is fairly esoteric, but it is actually necessary for implementing things like GroupBy where the order of a categorical variable is meaningful. .. _merging.append.row: @@ -453,8 +453,8 @@ Appending rows to a DataFrame ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ While not especially efficient (since a new object must be created), you can -append a single row to a DataFrame by passing a Series or dict to ``append``, -which returns a new DataFrame as above. +append a single row to a ``DataFrame`` by passing a ``Series`` or dict to +``append``, which returns a new ``DataFrame`` as above. .. ipython:: python @@ -498,16 +498,16 @@ pandas has full-featured, **high performance** in-memory join operations idiomatically very similar to relational databases like SQL. These methods perform significantly better (in some cases well over an order of magnitude better) than other open source implementations (like ``base::merge.data.frame`` -in R). The reason for this is careful algorithmic design and internal layout of -the data in DataFrame. +in R). The reason for this is careful algorithmic design and the internal layout +of the data in ``DataFrame``. See the :ref:`cookbook<cookbook.merge>` for some advanced strategies. Users who are familiar with SQL but new to pandas might be interested in a :ref:`comparison with SQL<compare_with_sql.join>`. -pandas provides a single function, ``merge``, as the entry point for all -standard database join operations between DataFrame objects: +pandas provides a single function, :func:`~pandas.merge`, as the entry point for +all standard database join operations between ``DataFrame`` objects: :: @@ -516,28 +516,28 @@ standard database join operations between DataFrame objects: suffixes=('_x', '_y'), copy=True, indicator=False, validate=None) -- ``left``: A DataFrame object -- ``right``: Another DataFrame object +- ``left``: A DataFrame object. +- ``right``: Another DataFrame object. - ``on``: Column or index level names to join on. Must be found in both the left and right DataFrame objects. If not passed and ``left_index`` and ``right_index`` are ``False``, the intersection of the columns in the - DataFrames will be inferred to be the join keys + DataFrames will be inferred to be the join keys. - ``left_on``: Columns or index levels from the left DataFrame to use as keys. Can either be column names, index level names, or arrays with length - equal to the length of the DataFrame + equal to the length of the DataFrame. - ``right_on``: Columns or index levels from the right DataFrame to use as keys. Can either be column names, index level names, or arrays with length - equal to the length of the DataFrame + equal to the length of the DataFrame. - ``left_index``: If ``True``, use the index (row labels) from the left DataFrame as its join key(s). In the case of a DataFrame with a MultiIndex (hierarchical), the number of levels must match the number of join keys - from the right DataFrame + from the right DataFrame. - ``right_index``: Same usage as ``left_index`` for the right DataFrame - ``how``: One of ``'left'``, ``'right'``, ``'outer'``, ``'inner'``. Defaults - to ``inner``. See below for more detailed description of each method + to ``inner``. See below for more detailed description of each method. - ``sort``: Sort the result DataFrame by the join keys in lexicographical order. Defaults to ``True``, setting to ``False`` will improve performance - substantially in many cases + substantially in many cases. - ``suffixes``: A tuple of string suffixes to apply to overlapping columns. Defaults to ``('_x', '_y')``. - ``copy``: Always copy data (default ``True``) from the passed DataFrame @@ -575,10 +575,10 @@ and ``right`` is a subclass of DataFrame, the return type will still be ``DataFrame``. ``merge`` is a function in the pandas namespace, and it is also available as a -DataFrame instance method, with the calling DataFrame being implicitly -considered the left object in the join. +``DataFrame`` instance method :meth:`~DataFrame.merge`, with the calling +``DataFrame `` being implicitly considered the left object in the join. -The related ``DataFrame.join`` method, uses ``merge`` internally for the +The related :meth:`~DataFrame.join` method, uses ``merge`` internally for the index-on-index (by default) and column(s)-on-index join. If you are joining on index only, you may wish to use ``DataFrame.join`` to save yourself some typing. @@ -587,19 +587,19 @@ Brief primer on merge methods (relational algebra) Experienced users of relational databases like SQL will be familiar with the terminology used to describe join operations between two SQL-table like -structures (DataFrame objects). There are several cases to consider which are -very important to understand: +structures (``DataFrame`` objects). There are several cases to consider which +are very important to understand: -- **one-to-one** joins: for example when joining two DataFrame objects on - their indexes (which must contain unique values) +- **one-to-one** joins: for example when joining two ``DataFrame`` objects on + their indexes (which must contain unique values). - **many-to-one** joins: for example when joining an index (unique) to one or - more columns in a DataFrame + more columns in a different ``DataFrame``. - **many-to-many** joins: joining columns on columns. .. note:: When joining columns on columns (potentially a many-to-many join), any - indexes on the passed DataFrame objects **will be discarded**. + indexes on the passed ``DataFrame`` objects **will be discarded**. It is worth spending some time understanding the result of the **many-to-many** @@ -627,7 +627,9 @@ key combination: labels=['left', 'right'], vertical=False); plt.close('all'); -Here is a more complicated example with multiple join keys: +Here is a more complicated example with multiple join keys. Only the keys +appearing in ``left`` and ``right`` are present (the intersection), since +``how='inner'```by default. .. ipython:: python @@ -712,7 +714,7 @@ either the left or right tables, the values in the joined table will be labels=['left', 'right'], vertical=False); plt.close('all'); -Here is another example with duplicate join keys in DataFrames: +Here is another example with duplicate join keys in ``DataFrame``s: .. ipython:: python @@ -742,9 +744,14 @@ Checking for duplicate keys .. versionadded:: 0.21.0 -Users can use the ``validate`` argument to automatically check whether there are unexpected duplicates in their merge keys. Key uniqueness is checked before merge operations and so should protect against memory overflows. Checking key uniqueness is also a good way to ensure user data structures are as expected. +Users can use the ``validate`` argument to automatically check whether there +are unexpected duplicates in their merge keys. Key uniqueness is checked before +merge operations and so should protect against memory overflows. Checking key +uniqueness is also a good way to ensure user data structures are as expected. -In the following example, there are duplicate values of ``B`` in the right DataFrame. As this is not a one-to-one merge -- as specified in the ``validate`` argument -- an exception will be raised. +In the following example, there are duplicate values of ``B`` in the right +``DataFrame``. As this is not a one-to-one merge -- as specified in the +``validate`` argument -- an exception will be raised. .. ipython:: python @@ -758,7 +765,9 @@ In the following example, there are duplicate values of ``B`` in the right DataF ... MergeError: Merge keys are not unique in right dataset; not a one-to-one merge -If the user is aware of the duplicates in the right `DataFrame` but wants to ensure there are no duplicates in the left DataFrame, one can use the `validate='one_to_many'` argument instead, which will not raise an exception. +If the user is aware of the duplicates in the right ``DataFrame`` but wants to +ensure there are no duplicates in the left DataFrame, one can use the +``validate='one_to_many'`` argument instead, which will not raise an exception. .. ipython:: python @@ -770,7 +779,9 @@ If the user is aware of the duplicates in the right `DataFrame` but wants to ens The merge indicator ~~~~~~~~~~~~~~~~~~~ -``merge`` accepts the argument ``indicator``. If ``True``, a Categorical-type column called ``_merge`` will be added to the output object that takes on values: +:func:`~pandas.merge` accepts the argument ``indicator``. If ``True``, a +Categorical-type column called ``_merge`` will be added to the output object +that takes on values: =================================== ================ Observation Origin ``_merge`` value @@ -809,7 +820,7 @@ Merging will preserve the dtype of the join keys. right = pd.DataFrame({'key': [1, 2], 'v1': [20, 30]}) right -We are able to preserve the join keys +We are able to preserve the join keys: .. ipython:: python @@ -826,7 +837,7 @@ resulting dtype will be upcast. .. versionadded:: 0.20.0 -Merging will preserve ``category`` dtypes of the mergands. See also the section on :ref:`categoricals <categorical.merge>` +Merging will preserve ``category`` dtypes of the mergands. See also the section on :ref:`categoricals <categorical.merge>`. The left frame. @@ -854,7 +865,7 @@ The right frame. right right.dtypes -The merged result +The merged result: .. ipython:: python @@ -876,9 +887,9 @@ The merged result Joining on index ~~~~~~~~~~~~~~~~ -``DataFrame.join`` is a convenient method for combining the columns of two -potentially differently-indexed DataFrames into a single result DataFrame. Here -is a very basic example: +:meth:`DataFrame.join` is a convenient method for combining the columns of two +potentially differently-indexed ``DataFrames`` into a single result +``DataFrame``. Here is a very basic example: .. ipython:: python @@ -912,6 +923,8 @@ is a very basic example: labels=['left', 'right'], vertical=False); plt.close('all'); +The same as above, but with ``how='inner'``. + .. ipython:: python result = left.join(right, how='inner') @@ -955,10 +968,10 @@ indexes: Joining key columns on an index ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``join`` takes an optional ``on`` argument which may be a column or multiple -column names, which specifies that the passed DataFrame is to be aligned on -that column in the DataFrame. These two function calls are completely -equivalent: +:meth:`~DataFrame.join` takes an optional ``on`` argument which may be a column +or multiple column names, which specifies that the passed ``DataFrame`` is to be +aligned on that column in the ``DataFrame``. These two function calls are +completely equivalent: :: @@ -967,8 +980,8 @@ equivalent: how='left', sort=False) Obviously you can choose whichever form you find more convenient. For -many-to-one joins (where one of the DataFrame's is already indexed by the join -key), using ``join`` may be more convenient. Here is a simple example: +many-to-one joins (where one of the ``DataFrame``'s is already indexed by the +join key), using ``join`` may be more convenient. Here is a simple example: .. ipython:: python @@ -1105,7 +1118,8 @@ This is equivalent but less verbose and more memory efficient / faster than this Joining with two multi-indexes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -This is not Implemented via ``join`` at-the-moment, however it can be done using the following. +This is not implemented via ``join`` at-the-moment, however it can be done using +the following code. .. ipython:: python @@ -1181,7 +1195,7 @@ Overlapping value columns ~~~~~~~~~~~~~~~~~~~~~~~~~ The merge ``suffixes`` argument takes a tuple of list of strings to append to -overlapping column names in the input DataFrames to disambiguate the result +overlapping column names in the input ``DataFrame``s to disambiguate the result columns: .. ipython:: python @@ -1211,7 +1225,7 @@ columns: labels=['left', 'right'], vertical=False); plt.close('all'); -``DataFrame.join`` has ``lsuffix`` and ``rsuffix`` arguments which behave +:meth:`DataFrame.join` has ``lsuffix`` and ``rsuffix`` arguments which behave similarly. .. ipython:: python @@ -1233,8 +1247,8 @@ similarly. Joining multiple DataFrame or Panel objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -A list or tuple of DataFrames can also be passed to ``DataFrame.join`` to join -them together on their indexes. The same is true for ``Panel.join``. +A list or tuple of ``DataFrames`` can also be passed to :meth:`~DataFrame.join` +to join them together on their indexes. .. ipython:: python @@ -1255,8 +1269,8 @@ Merging together values within Series or DataFrame columns ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Another fairly common situation is to have two like-indexed (or similarly -indexed) Series or DataFrame objects and wanting to "patch" values in one -object from values for matching indices in the other. Here is an example: +indexed) ``Series`` or ``DataFrame`` objects and wanting to "patch" values in +one object from values for matching indices in the other. Here is an example: .. ipython:: python @@ -1265,7 +1279,7 @@ object from values for matching indices in the other. Here is an example: df2 = pd.DataFrame([[-42.6, np.nan, -8.2], [-5., 1.6, 4]], index=[1, 2]) -For this, use the ``combine_first`` method: +For this, use the :meth:`~DataFrame.combine_first` method: .. ipython:: python @@ -1279,9 +1293,9 @@ For this, use the ``combine_first`` method: labels=['df1', 'df2'], vertical=False); plt.close('all'); -Note that this method only takes values from the right DataFrame if they are -missing in the left DataFrame. A related method, ``update``, alters non-NA -values inplace: +Note that this method only takes values from the right ``DataFrame`` if they are +missing in the left ``DataFrame``. A related method, :meth:`~DataFrame.update`, +alters non-NA values inplace: .. ipython:: python :suppress: @@ -1332,12 +1346,16 @@ Merging AsOf .. versionadded:: 0.19.0 -A :func:`merge_asof` is similar to an ordered left-join except that we match on nearest key rather than equal keys. For each row in the ``left`` DataFrame, we select the last row in the ``right`` DataFrame whose ``on`` key is less than the left's key. Both DataFrames must be sorted by the key. +A :func:`merge_asof` is similar to an ordered left-join except that we match on +nearest key rather than equal keys. For each row in the ``left`` ``DataFrame``, +we select the last row in the ``right`` ``DataFrame`` whose ``on`` key is less +than the left's key. Both DataFrames must be sorted by the key. -Optionally an asof merge can perform a group-wise merge. This matches the ``by`` key equally, -in addition to the nearest match on the ``on`` key. +Optionally an asof merge can perform a group-wise merge. This matches the +``by`` key equally, in addition to the nearest match on the ``on`` key. -For example; we might have ``trades`` and ``quotes`` and we want to ``asof`` merge them. +For example; we might have ``trades`` and ``quotes`` and we want to ``asof`` +merge them. .. ipython:: python @@ -1395,9 +1413,9 @@ We only asof within ``2ms`` between the quote time and the trade time. by='ticker', tolerance=pd.Timedelta('2ms')) -We only asof within ``10ms`` between the quote time and the trade time and we exclude exact matches on time. -Note that though we exclude the exact matches (of the quotes), prior quotes DO propagate to that point -in time. +We only asof within ``10ms`` between the quote time and the trade time and we +exclude exact matches on time. Note that though we exclude the exact matches +(of the quotes), prior quotes **do** propagate to that point in time. .. ipython:: python diff --git a/doc/source/missing_data.rst b/doc/source/missing_data.rst index d2250ae7b2116..f56378b533909 100644 --- a/doc/source/missing_data.rst +++ b/doc/source/missing_data.rst @@ -86,8 +86,8 @@ pandas provides the :func:`isna` and .. warning:: - One has to be mindful that in Python (and numpy), the ``nan's`` don't compare equal, but ``None's`` **do**. - Note that Pandas/numpy uses the fact that ``np.nan != np.nan``, and treats ``None`` like ``np.nan``. + One has to be mindful that in Python (and NumPy), the ``nan's`` don't compare equal, but ``None's`` **do**. + Note that pandas/NumPy uses the fact that ``np.nan != np.nan``, and treats ``None`` like ``np.nan``. .. ipython:: python diff --git a/doc/source/release.rst b/doc/source/release.rst index de045c426cf7b..cd763de42d162 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -1635,7 +1635,7 @@ performance improvements along with a large number of bug fixes. Highlights include: -- Drop support for numpy < 1.7.0 (:issue:`7711`) +- Drop support for NumPy < 1.7.0 (:issue:`7711`) - The ``Categorical`` type was integrated as a first-class pandas type, see :ref:`here <whatsnew_0150.cat>` - New scalar type ``Timedelta``, and a new index type ``TimedeltaIndex``, see :ref:`here <whatsnew_0150.timedeltaindex>` - New DataFrame default display for ``df.info()`` to include memory usage, see :ref:`Memory Usage <whatsnew_0150.memory>` @@ -2032,7 +2032,7 @@ Bug Fixes - Bug in Series.xs with a multi-index (:issue:`6018`) - Bug in Series construction of mixed type with datelike and an integer (which should result in object type and not automatic conversion) (:issue:`6028`) -- Possible segfault when chained indexing with an object array under numpy 1.7.1 (:issue:`6026`, :issue:`6056`) +- Possible segfault when chained indexing with an object array under NumPy 1.7.1 (:issue:`6026`, :issue:`6056`) - Bug in setting using fancy indexing a single element with a non-scalar (e.g. a list), (:issue:`6043`) - ``to_sql`` did not respect ``if_exists`` (:issue:`4110` :issue:`4304`) @@ -2177,7 +2177,7 @@ Improvements to existing features - allow DataFrame constructor to accept more list-like objects, e.g. list of ``collections.Sequence`` and ``array.Array`` objects (:issue:`3783`, :issue:`4297`, :issue:`4851`), thanks @lgautier -- DataFrame constructor now accepts a numpy masked record array +- DataFrame constructor now accepts a NumPy masked record array (:issue:`3478`), thanks @jnothman - ``__getitem__`` with ``tuple`` key (e.g., ``[:, 2]``) on ``Series`` without ``MultiIndex`` raises ``ValueError`` (:issue:`4759`, :issue:`4837`) @@ -2397,8 +2397,8 @@ API Changes support ``pow`` or ``mod`` with non-scalars. (:issue:`3765`) - Arithmetic func factories are now passed real names (suitable for using with super) (:issue:`5240`) -- Provide numpy compatibility with 1.7 for a calling convention like - ``np.prod(pandas_object)`` as numpy call with additional keyword args +- Provide NumPy compatibility with 1.7 for a calling convention like + ``np.prod(pandas_object)`` as NumPy call with additional keyword args (:issue:`4435`) - Provide __dir__ method (and local context) for tab completion / remove ipython completers code (:issue:`4501`) @@ -2481,7 +2481,7 @@ See :ref:`Internal Refactoring<whatsnew_0130.refactoring>` - Series now inherits from ``NDFrame`` rather than directly from ``ndarray``. There are several minor changes that affect the API. - - numpy functions that do not support the array interface will now return + - NumPy functions that do not support the array interface will now return ``ndarrays`` rather than series, e.g. ``np.diff``, ``np.ones_like``, ``np.where`` - ``Series(0.5)`` would previously return the scalar ``0.5``, this is no @@ -2650,7 +2650,7 @@ Bug Fixes - Fix bug in having a rhs of ``np.timedelta64`` or ``np.offsets.DateOffset`` when operating with datetimes (:issue:`4532`) - Fix arithmetic with series/datetimeindex and ``np.timedelta64`` not working - the same (:issue:`4134`) and buggy timedelta in numpy 1.6 (:issue:`4135`) + the same (:issue:`4134`) and buggy timedelta in NumPy 1.6 (:issue:`4135`) - Fix bug in ``pd.read_clipboard`` on windows with PY3 (:issue:`4561`); not decoding properly - ``tslib.get_period_field()`` and ``tslib.get_period_field_arr()`` now raise @@ -2691,7 +2691,7 @@ Bug Fixes - Bug with reindexing on the index with a non-unique index will now raise ``ValueError`` (:issue:`4746`) - Bug in setting with ``loc/ix`` a single indexer with a multi-index axis and - a numpy array, related to (:issue:`3777`) + a NumPy array, related to (:issue:`3777`) - Bug in concatenation with duplicate columns across dtypes not merging with axis=0 (:issue:`4771`, :issue:`4975`) - Bug in ``iloc`` with a slice index failing (:issue:`4771`) @@ -2958,7 +2958,7 @@ API Changes to enable alternate encodings (:issue:`3750`) - enable support for ``iterator/chunksize`` with ``read_hdf`` - The repr() for (Multi)Index now obeys display.max_seq_items rather - then numpy threshold print options. (:issue:`3426`, :issue:`3466`) + then NumPy threshold print options. (:issue:`3426`, :issue:`3466`) - Added mangle_dupe_cols option to read_table/csv, allowing users to control legacy behaviour re dupe cols (A, A.1, A.2 vs A, A ) (:issue:`3468`) Note: The default value will change in 0.12 to the "no mangle" behaviour, @@ -3025,8 +3025,8 @@ API Changes as ``Index``, ``Categorical``, ``GroupBy``, ``SparseList``, and ``SparseArray`` (+ their base classes). Currently, ``PandasObject`` provides string methods (from ``StringMixin``). (:issue:`4090`, :issue:`4092`) -- New ``StringMixin`` that, given a ``__unicode__`` method, gets python 2 and - python 3 compatible string methods (``__str__``, ``__bytes__``, and +- New ``StringMixin`` that, given a ``__unicode__`` method, gets Python 2 and + Python 3 compatible string methods (``__str__``, ``__bytes__``, and ``__repr__``). Plus string safety throughout. Now employed in many places throughout the pandas library. (:issue:`4090`, :issue:`4092`) @@ -3139,7 +3139,7 @@ Bug Fixes two integer arrays with at least 10000 cells total (:issue:`3764`) - Indexing with a string with seconds resolution not selecting from a time index (:issue:`3925`) - csv parsers would loop infinitely if ``iterator=True`` but no ``chunksize`` was - specified (:issue:`3967`), python parser failing with ``chunksize=1`` + specified (:issue:`3967`), Python parser failing with ``chunksize=1`` - Fix index name not propagating when using ``shift`` - Fixed dropna=False being ignored with multi-index stack (:issue:`3997`) - Fixed flattening of columns when renaming MultiIndex columns DataFrame (:issue:`4004`) @@ -3301,7 +3301,7 @@ API Changes - all timedelta like objects will be correctly assigned to ``timedelta64`` with mixed ``NaN`` and/or ``NaT`` allowed -- arguments to DataFrame.clip were inconsistent to numpy and Series clipping +- arguments to DataFrame.clip were inconsistent to NumPy and Series clipping (:issue:`2747`) - util.testing.assert_frame_equal now checks the column and index names (:issue:`2964`) - Constructors will now return a more informative ValueError on failures @@ -3360,7 +3360,7 @@ Bug Fixes - Series ops with a Timestamp on the rhs was throwing an exception (:issue:`2898`) added tests for Series ops with datetimes,timedeltas,Timestamps, and datelike Series on both lhs and rhs - - Fixed subtle timedelta64 inference issue on py3 & numpy 1.7.0 (:issue:`3094`) + - Fixed subtle timedelta64 inference issue on py3 & NumPy 1.7.0 (:issue:`3094`) - Fixed some formatting issues on timedelta when negative - Support null checking on timedelta64, representing (and formatting) with NaT - Support setitem with np.nan value, converts to NaT @@ -4574,7 +4574,7 @@ Bug Fixes - Add clearer error message in csv parser (:issue:`835`) - Fix loss of fractional seconds in HDFStore (:issue:`513`) - Fix DataFrame join where columns have datetimes (:issue:`787`) -- Work around numpy performance issue in take (:issue:`817`) +- Work around NumPy performance issue in take (:issue:`817`) - Improve comparison operations for NA-friendliness (:issue:`801`) - Fix indexing operation for floating point values (:issue:`780`, :issue:`798`) - Fix groupby case resulting in malformed dataframe (:issue:`814`) @@ -5822,7 +5822,7 @@ API Changes `offset` argument for everything. So you can still pass a time rule string to `offset` - Added optional `encoding` argument to `read_csv`, `read_table`, `to_csv`, - `from_csv` to handle unicode in python 2.x + `from_csv` to handle unicode in Python 2.x Bug Fixes ~~~~~~~~~ diff --git a/doc/source/reshaping.rst b/doc/source/reshaping.rst index e2b7b0e586d70..71ddaa13fdd8a 100644 --- a/doc/source/reshaping.rst +++ b/doc/source/reshaping.rst @@ -41,7 +41,7 @@ Data is often stored in CSV files or databases in so-called "stacked" or df -For the curious here is how the above DataFrame was created: +For the curious here is how the above ``DataFrame`` was created: .. code-block:: python @@ -63,15 +63,16 @@ To select out everything for variable ``A`` we could do: But suppose we wish to do time series operations with the variables. A better representation would be where the ``columns`` are the unique variables and an ``index`` of dates identifies individual observations. To reshape the data into -this form, use the ``pivot`` function: +this form, we use the :meth:`DataFrame.pivot` method (also implemented as a +top level function :func:`~pandas.pivot`): .. ipython:: python df.pivot(index='date', columns='variable', values='value') -If the ``values`` argument is omitted, and the input DataFrame has more than +If the ``values`` argument is omitted, and the input ``DataFrame`` has more than one column of values which are not used as column or index inputs to ``pivot``, -then the resulting "pivoted" DataFrame will have :ref:`hierarchical columns +then the resulting "pivoted" ``DataFrame`` will have :ref:`hierarchical columns <advanced.hierarchical>` whose topmost level indicates the respective value column: @@ -81,7 +82,7 @@ column: pivoted = df.pivot('date', 'variable') pivoted -You of course can then select subsets from the pivoted DataFrame: +You can then select subsets from the pivoted ``DataFrame``: .. ipython:: python @@ -95,18 +96,18 @@ are homogeneously-typed. Reshaping by stacking and unstacking ------------------------------------ -Closely related to the ``pivot`` function are the related ``stack`` and -``unstack`` functions currently available on Series and DataFrame. These -functions are designed to work together with ``MultiIndex`` objects (see the -section on :ref:`hierarchical indexing <advanced.hierarchical>`). Here are -essentially what these functions do: +Closely related to the :meth:`~DataFrame.pivot` method are the related +:meth:`~DataFrame.stack` and :meth:`~DataFrame.unstack` methods available on +``Series`` and ``DataFrame``. These methods are designed to work together with +``MultiIndex`` objects (see the section on :ref:`hierarchical indexing +<advanced.hierarchical>`). Here are essentially what these methods do: - ``stack``: "pivot" a level of the (possibly hierarchical) column labels, - returning a DataFrame with an index with a new inner-most level of row + returning a ``DataFrame`` with an index with a new inner-most level of row labels. - - ``unstack``: inverse operation from ``stack``: "pivot" a level of the + - ``unstack``: (inverse operation of ``stack``) "pivot" a level of the (possibly hierarchical) row index to the column axis, producing a reshaped - DataFrame with a new inner-most level of column labels. + ``DataFrame`` with a new inner-most level of column labels. The clearest way to explain is by example. Let's take a prior example data set from the hierarchical indexing section: @@ -122,11 +123,11 @@ from the hierarchical indexing section: df2 = df[:4] df2 -The ``stack`` function "compresses" a level in the DataFrame's columns to +The ``stack`` function "compresses" a level in the ``DataFrame``'s columns to produce either: - - A Series, in the case of a simple column Index - - A DataFrame, in the case of a ``MultiIndex`` in the columns + - A ``Series``, in the case of a simple column Index. + - A ``DataFrame``, in the case of a ``MultiIndex`` in the columns. If the columns have a ``MultiIndex``, you can choose which level to stack. The stacked level becomes the new lowest level in a ``MultiIndex`` on the columns: @@ -136,7 +137,7 @@ stacked level becomes the new lowest level in a ``MultiIndex`` on the columns: stacked = df2.stack() stacked -With a "stacked" DataFrame or Series (having a ``MultiIndex`` as the +With a "stacked" ``DataFrame`` or ``Series`` (having a ``MultiIndex`` as the ``index``), the inverse operation of ``stack`` is ``unstack``, which by default unstacks the **last level**: @@ -157,7 +158,7 @@ the level numbers: Notice that the ``stack`` and ``unstack`` methods implicitly sort the index levels involved. Hence a call to ``stack`` and then ``unstack``, or vice versa, -will result in a **sorted** copy of the original DataFrame or Series: +will result in a **sorted** copy of the original ``DataFrame`` or ``Series``: .. ipython:: python @@ -166,7 +167,7 @@ will result in a **sorted** copy of the original DataFrame or Series: df all(df.unstack().stack() == df.sort_index()) -while the above code will raise a ``TypeError`` if the call to ``sort_index`` is +The above code will raise a ``TypeError`` if the call to ``sort_index`` is removed. .. _reshaping.stack_multiple: @@ -265,12 +266,12 @@ the right thing: Reshaping by Melt ----------------- -The top-level :func:`melt` and :func:`~DataFrame.melt` functions are useful to -massage a DataFrame into a format where one or more columns are identifier variables, -while all other columns, considered measured variables, are "unpivoted" to the -row axis, leaving just two non-identifier columns, "variable" and "value". The -names of those columns can be customized by supplying the ``var_name`` and -``value_name`` parameters. +The top-level :func:`~pandas.melt` function and the corresponding :meth:`DataFrame.melt` +are useful to massage a ``DataFrame`` into a format where one or more columns +are *identifier variables*, while all other columns, considered *measured +variables*, are "unpivoted" to the row axis, leaving just two non-identifier +columns, "variable" and "value". The names of those columns can be customized +by supplying the ``var_name`` and ``value_name`` parameters. For instance, @@ -284,8 +285,9 @@ For instance, cheese.melt(id_vars=['first', 'last']) cheese.melt(id_vars=['first', 'last'], var_name='quantity') -Another way to transform is to use the ``wide_to_long`` panel data convenience -function. +Another way to transform is to use the :func:`~pandas.wide_to_long` panel data +convenience function. It is less flexible than :func:`~pandas.melt`, but more +user-friendly. .. ipython:: python @@ -324,22 +326,25 @@ Pivot tables .. _reshaping.pivot: -While ``pivot`` provides general purpose pivoting of DataFrames with various -data types (strings, numerics, etc.), Pandas also provides the ``pivot_table`` -function for pivoting with aggregation of numeric data. -The function ``pandas.pivot_table`` can be used to create spreadsheet-style pivot -tables. See the :ref:`cookbook<cookbook.pivot>` for some advanced strategies -It takes a number of arguments +While :meth:`~DataFrame.pivot` provides general purpose pivoting with various +data types (strings, numerics, etc.), pandas also provides :func:`~pandas.pivot_table` +for pivoting with aggregation of numeric data. + +The function :func:`~pandas.pivot_table` can be used to create spreadsheet-style +pivot tables. See the :ref:`cookbook<cookbook.pivot>` for some advanced +strategies. -- ``data``: A DataFrame object -- ``values``: a column or a list of columns to aggregate +It takes a number of arguments: + +- ``data``: a DataFrame object. +- ``values``: a column or a list of columns to aggregate. - ``index``: a column, Grouper, array which has the same length as data, or list of them. Keys to group by on the pivot table index. If an array is passed, it is being used as the same manner as column values. - ``columns``: a column, Grouper, array which has the same length as data, or list of them. Keys to group by on the pivot table column. If an array is passed, it is being used as the same manner as column values. -- ``aggfunc``: function to use for aggregation, defaulting to ``numpy.mean`` +- ``aggfunc``: function to use for aggregation, defaulting to ``numpy.mean``. Consider a data set like this: @@ -363,7 +368,7 @@ We can produce pivot tables from this data very easily: pd.pivot_table(df, values='D', index=['B'], columns=['A', 'C'], aggfunc=np.sum) pd.pivot_table(df, values=['D','E'], index=['B'], columns=['A', 'C'], aggfunc=np.sum) -The result object is a DataFrame having potentially hierarchical indexes on the +The result object is a ``DataFrame`` having potentially hierarchical indexes on the rows and columns. If the ``values`` column name is not given, the pivot table will include all of the data that can be aggregated in an additional level of hierarchy in the columns: @@ -386,7 +391,8 @@ calling ``to_string`` if you wish: table = pd.pivot_table(df, index=['A', 'B'], columns=['C']) print(table.to_string(na_rep='')) -Note that ``pivot_table`` is also available as an instance method on DataFrame. +Note that ``pivot_table`` is also available as an instance method on DataFrame, + i.e. :meth:`DataFrame.pivot_table`. .. _reshaping.pivot.margins: @@ -406,27 +412,27 @@ rows and columns: Cross tabulations ----------------- -Use the ``crosstab`` function to compute a cross-tabulation of two (or more) +Use :func:`~pandas.crosstab` to compute a cross-tabulation of two (or more) factors. By default ``crosstab`` computes a frequency table of the factors unless an array of values and an aggregation function are passed. It takes a number of arguments -- ``index``: array-like, values to group by in the rows -- ``columns``: array-like, values to group by in the columns +- ``index``: array-like, values to group by in the rows. +- ``columns``: array-like, values to group by in the columns. - ``values``: array-like, optional, array of values to aggregate according to - the factors + the factors. - ``aggfunc``: function, optional, If no values array is passed, computes a - frequency table -- ``rownames``: sequence, default ``None``, must match number of row arrays passed + frequency table. +- ``rownames``: sequence, default ``None``, must match number of row arrays passed. - ``colnames``: sequence, default ``None``, if passed, must match number of column - arrays passed + arrays passed. - ``margins``: boolean, default ``False``, Add row/column margins (subtotals) - ``normalize``: boolean, {'all', 'index', 'columns'}, or {0,1}, default ``False``. Normalize by dividing all values by the sum of values. -Any Series passed will have their name attributes used unless row or column +Any ``Series`` passed will have their name attributes used unless row or column names for the cross-tabulation are specified For example: @@ -478,9 +484,9 @@ using the ``normalize`` argument: pd.crosstab(df.A, df.B, normalize='columns') -``crosstab`` can also be passed a third Series and an aggregation function -(``aggfunc``) that will be applied to the values of the third Series within each -group defined by the first two Series: +``crosstab`` can also be passed a third ``Series`` and an aggregation function +(``aggfunc``) that will be applied to the values of the third ``Series`` within +each group defined by the first two ``Series``: .. ipython:: python @@ -502,9 +508,9 @@ Finally, one can also add margins or normalize this output. Tiling ------ -The ``cut`` function computes groupings for the values of the input array and -is often used to transform continuous variables to discrete or categorical -variables: +The :func:`~pandas.cut` function computes groupings for the values of the input +array and is often used to transform continuous variables to discrete or +categorical variables: .. ipython:: python @@ -523,7 +529,7 @@ Alternatively we can specify custom bin-edges: .. versionadded:: 0.20.0 If the ``bins`` keyword is an ``IntervalIndex``, then these will be -used to bin the passed data. +used to bin the passed data.:: pd.cut([25, 20, 50], bins=c.categories) @@ -533,9 +539,10 @@ used to bin the passed data. Computing indicator / dummy variables ------------------------------------- -To convert a categorical variable into a "dummy" or "indicator" DataFrame, for example -a column in a DataFrame (a Series) which has ``k`` distinct values, can derive a DataFrame -containing ``k`` columns of 1s and 0s: +To convert a categorical variable into a "dummy" or "indicator" ``DataFrame``, +for example a column in a ``DataFrame`` (a ``Series``) which has ``k`` distinct +values, can derive a ``DataFrame`` containing ``k`` columns of 1s and 0s using +:func:`~pandas.get_dummies`: .. ipython:: python @@ -544,7 +551,7 @@ containing ``k`` columns of 1s and 0s: pd.get_dummies(df['key']) Sometimes it's useful to prefix the column names, for example when merging the result -with the original DataFrame: +with the original ``DataFrame``: .. ipython:: python @@ -569,9 +576,9 @@ This function is often used along with discretization functions like ``cut``: See also :func:`Series.str.get_dummies <pandas.Series.str.get_dummies>`. -:func:`get_dummies` also accepts a DataFrame. By default all categorical -variables (categorical in the statistical sense, -those with `object` or `categorical` dtype) are encoded as dummy variables. +:func:`get_dummies` also accepts a ``DataFrame``. By default all categorical +variables (categorical in the statistical sense, those with `object` or +`categorical` dtype) are encoded as dummy variables. .. ipython:: python @@ -580,9 +587,8 @@ those with `object` or `categorical` dtype) are encoded as dummy variables. 'C': [1, 2, 3]}) pd.get_dummies(df) -All non-object columns are included untouched in the output. - -You can control the columns that are encoded with the ``columns`` keyword. +All non-object columns are included untouched in the output. You can control +the columns that are encoded with the ``columns`` keyword. .. ipython:: python @@ -592,14 +598,14 @@ Notice that the ``B`` column is still included in the output, it just hasn't been encoded. You can drop ``B`` before calling ``get_dummies`` if you don't want to include it in the output. -As with the Series version, you can pass values for the ``prefix`` and +As with the ``Series`` version, you can pass values for the ``prefix`` and ``prefix_sep``. By default the column name is used as the prefix, and '_' as -the prefix separator. You can specify ``prefix`` and ``prefix_sep`` in 3 ways +the prefix separator. You can specify ``prefix`` and ``prefix_sep`` in 3 ways: - string: Use the same value for ``prefix`` or ``prefix_sep`` for each column - to be encoded + to be encoded. - list: Must be the same length as the number of columns being encoded. -- dict: Mapping column name to prefix +- dict: Mapping column name to prefix. .. ipython:: python @@ -634,7 +640,8 @@ When a column contains only one level, it will be omitted in the result. pd.get_dummies(df, drop_first=True) -By default new columns will have ``np.uint8`` dtype. To choose another dtype use ``dtype`` argument: +By default new columns will have ``np.uint8`` dtype. +To choose another dtype, use the``dtype`` argument: .. ipython:: python @@ -650,7 +657,7 @@ By default new columns will have ``np.uint8`` dtype. To choose another dtype use Factorizing values ------------------ -To encode 1-d values as an enumerated type use ``factorize``: +To encode 1-d values as an enumerated type use :func:`~pandas.factorize`: .. ipython:: python @@ -666,7 +673,7 @@ handling of NaN: .. note:: The following ``numpy.unique`` will fail under Python 3 with a ``TypeError`` because of an ordering bug. See also - `Here <https://github.com/numpy/numpy/issues/641>`__ + `here <https://github.com/numpy/numpy/issues/641>`__. .. code-block:: ipython diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst index fa21cc997d4f4..466c48b780861 100644 --- a/doc/source/timeseries.rst +++ b/doc/source/timeseries.rst @@ -60,7 +60,7 @@ Change frequency and fill gaps: converted = ts.asfreq('45Min', method='pad') converted.head() -Resample: +Resample the series to a daily frequency: .. ipython:: python @@ -73,7 +73,7 @@ Resample: Overview -------- -Following table shows the type of time-related classes pandas can handle and +The following table shows the type of time-related classes pandas can handle and how to create them. ================= =============================== =================================================================== @@ -112,9 +112,9 @@ For example: pd.Period('2012-05', freq='D') -``Timestamp`` and ``Period`` can be the index. Lists of ``Timestamp`` and -``Period`` are automatically coerced to ``DatetimeIndex`` and ``PeriodIndex`` -respectively. +:class:`Timestamp` and :class:`Period` can serve as an index. Lists of +``Timestamp`` and ``Period`` are automatically coerced to :class:`DatetimeIndex` +and :class:`PeriodIndex` respectively. .. ipython:: python @@ -149,7 +149,7 @@ future releases. Converting to Timestamps ------------------------ -To convert a ``Series`` or list-like object of date-like objects e.g. strings, +To convert a :class:`Series` or list-like object of date-like objects e.g. strings, epochs, or a mixture, you can use the ``to_datetime`` function. When passed a ``Series``, this returns a ``Series`` (with the same index), while a list-like is converted to a ``DatetimeIndex``: @@ -197,7 +197,9 @@ This could also potentially speed up the conversion considerably. pd.to_datetime('12-11-2010 00:00', format='%d-%m-%Y %H:%M') -For more information on how to specify the ``format`` options, see https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior. +For more information on the choices available when specifying the ``format`` +option, see the Python `datetime documentation +<https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior`__. Assembling Datetime from Multiple DataFrame Columns ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -449,12 +451,12 @@ The ``DatetimeIndex`` class contains many time series related optimizations: - A large range of dates for various offsets are pre-computed and cached under the hood in order to make generating subsequent date ranges very fast - (just have to grab a slice) - - Fast shifting using the ``shift`` and ``tshift`` method on pandas objects + (just have to grab a slice). + - Fast shifting using the ``shift`` and ``tshift`` method on pandas objects. - Unioning of overlapping ``DatetimeIndex`` objects with the same frequency is - very fast (important for fast data alignment) + very fast (important for fast data alignment). - Quick access to date fields via properties such as ``year``, ``month``, etc. - - Regularization functions like ``snap`` and very fast ``asof`` logic + - Regularization functions like ``snap`` and very fast ``asof`` logic. ``DatetimeIndex`` objects have all the basic functionality of regular ``Index`` objects, and a smorgasbord of advanced time series specific methods for easy @@ -515,25 +517,26 @@ would include matching times on an included date: dft dft['2013'] -This starts on the very first time in the month, and includes the last date & time for the month +This starts on the very first time in the month, and includes the last date and +time for the month: .. ipython:: python dft['2013-1':'2013-2'] -This specifies a stop time **that includes all of the times on the last day** +This specifies a stop time **that includes all of the times on the last day**: .. ipython:: python dft['2013-1':'2013-2-28'] -This specifies an **exact** stop time (and is not the same as the above) +This specifies an **exact** stop time (and is not the same as the above): .. ipython:: python dft['2013-1':'2013-2-28 00:00:00'] -We are stopping on the included end-point as it is part of the index +We are stopping on the included end-point as it is part of the index: .. ipython:: python @@ -589,7 +592,8 @@ A timestamp string with minute resolution (or more accurate), gives a scalar ins series_minute['2011-12-31 23:59'] series_minute['2011-12-31 23:59:00'] -If index resolution is second, then, the minute-accurate timestamp gives a ``Series``. +If index resolution is second, then the minute-accurate timestamp gives a +``Series``. .. ipython:: python @@ -652,10 +656,10 @@ With no defaults. Truncating & Fancy Indexing ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -A ``truncate`` convenience function is provided that is similar to slicing. -Note that ``truncate`` assumes a 0 value for any unspecified date component -in a ``DatetimeIndex`` in contrast to slicing which returns any partially -matching dates: +A :meth:`~DataFrame.truncate` convenience function is provided that is similar +to slicing. Note that ``truncate`` assumes a 0 value for any unspecified date +component in a ``DatetimeIndex`` in contrast to slicing which returns any +partially matching dates: .. ipython:: python @@ -709,7 +713,9 @@ There are several time/date properties that one can access from ``Timestamp`` or is_year_end,"Logical indicating if last day of year (defined by frequency)" is_leap_year,"Logical indicating if the date belongs to a leap year" -Furthermore, if you have a ``Series`` with datetimelike values, then you can access these properties via the ``.dt`` accessor, see the :ref:`docs <basics.dt_accessors>` +Furthermore, if you have a ``Series`` with datetimelike values, then you can +access these properties via the ``.dt`` accessor, as detailed in the section +on :ref:`.dt accessors<basics.dt_accessors>`. .. _timeseries.offsets: @@ -718,8 +724,8 @@ DateOffset Objects In the preceding examples, we created ``DatetimeIndex`` objects at various frequencies by passing in :ref:`frequency strings <timeseries.offset_aliases>` -like 'M', 'W', and 'BM to the ``freq`` keyword. Under the hood, these frequency -strings are being translated into an instance of pandas ``DateOffset``, +like 'M', 'W', and 'BM' to the ``freq`` keyword. Under the hood, these frequency +strings are being translated into an instance of :class:`DateOffset`, which represents a regular frequency increment. Specific offset logic like "month", "business day", or "one hour" is represented in its various subclasses. @@ -761,7 +767,7 @@ which represents a regular frequency increment. Specific offset logic like Nano, "one nanosecond" The basic ``DateOffset`` takes the same arguments as -``dateutil.relativedelta``, which works like: +``dateutil.relativedelta``, which works as follows: .. ipython:: python @@ -777,12 +783,13 @@ We could have done the same thing with ``DateOffset``: The key features of a ``DateOffset`` object are: -- it can be added / subtracted to/from a datetime object to obtain a - shifted date -- it can be multiplied by an integer (positive or negative) so that the - increment will be applied multiple times -- it has ``rollforward`` and ``rollback`` methods for moving a date forward - or backward to the next or previous "offset date" +- It can be added / subtracted to/from a datetime object to obtain a + shifted date. +- It can be multiplied by an integer (positive or negative) so that the + increment will be applied multiple times. +- It has :meth:`~pandas.DateOffset.rollforward` and + :meth:`~pandas.DateOffset.rollback` methods for moving a date forward or + backward to the next or previous "offset date". Subclasses of ``DateOffset`` define the ``apply`` function which dictates custom date increment logic, such as adding business days: @@ -811,7 +818,10 @@ The ``rollforward`` and ``rollback`` methods do exactly what you would expect: It's definitely worth exploring the ``pandas.tseries.offsets`` module and the various docstrings for the classes. -These operations (``apply``, ``rollforward`` and ``rollback``) preserves time (hour, minute, etc) information by default. To reset time, use ``normalize=True`` keyword when creating the offset instance. If ``normalize=True``, result is normalized after the function is applied. +These operations (``apply``, ``rollforward`` and ``rollback``) preserve time +(hour, minute, etc) information by default. To reset time, use ``normalize=True`` +when creating the offset instance. If ``normalize=True``, the result is +normalized after the function is applied. .. ipython:: python @@ -847,7 +857,7 @@ particular day of the week: d - Week() -``normalize`` option will be effective for addition and subtraction. +The ``normalize`` option will be effective for addition and subtraction. .. ipython:: python @@ -926,7 +936,7 @@ As an interesting example, let's look at Egypt where a Friday-Saturday weekend i dt = datetime(2013, 4, 30) dt + 2 * bday_egypt -Let's map to the weekday names +Let's map to the weekday names: .. ipython:: python @@ -982,9 +992,10 @@ The ``BusinessHour`` class provides a business hour representation on ``Business allowing to use specific start and end times. By default, ``BusinessHour`` uses 9:00 - 17:00 as business hours. -Adding ``BusinessHour`` will increment ``Timestamp`` by hourly. -If target ``Timestamp`` is out of business hours, move to the next business hour then increment it. -If the result exceeds the business hours end, remaining is added to the next business day. +Adding ``BusinessHour`` will increment ``Timestamp`` by hourly frequency. +If target ``Timestamp`` is out of business hours, move to the next business hour +then increment it. If the result exceeds the business hours end, the remaining +hours are added to the next business day. .. ipython:: python @@ -1010,9 +1021,10 @@ If the result exceeds the business hours end, remaining is added to the next bus # Subtracting 3 business hours pd.Timestamp('2014-08-01 10:00') + BusinessHour(-3) -Also, you can specify ``start`` and ``end`` time by keywords. -Argument must be ``str`` which has ``hour:minute`` representation or ``datetime.time`` instance. -Specifying seconds, microseconds and nanoseconds as business hour results in ``ValueError``. +You can also specify ``start`` and ``end`` time by keywords. The argument must +be a ``str`` with an ``hour:minute`` representation or a ``datetime.time`` +instance. Specifying seconds, microseconds and nanoseconds as business hour +results in ``ValueError``. .. ipython:: python @@ -1068,8 +1080,9 @@ under the default business hours (9:00 - 17:00), there is no gap (0 minutes) bet # The result is the same as rollworward because BusinessDay never overlap. BusinessHour().apply(pd.Timestamp('2014-08-02')) -``BusinessHour`` regards Saturday and Sunday as holidays. To use arbitrary holidays, -you can use ``CustomBusinessHour`` offset, see :ref:`Custom Business Hour <timeseries.custombusinesshour>`: +``BusinessHour`` regards Saturday and Sunday as holidays. To use arbitrary +holidays, you can use ``CustomBusinessHour`` offset, as explained in the +following subsection. .. _timeseries.custombusinesshour: @@ -1212,7 +1225,7 @@ Anchored Offset Semantics ~~~~~~~~~~~~~~~~~~~~~~~~~ For those offsets that are anchored to the start or end of specific -frequency (``MonthEnd``, ``MonthBegin``, ``WeekEnd``, etc) the following +frequency (``MonthEnd``, ``MonthBegin``, ``WeekEnd``, etc), the following rules apply to rolling forward and backwards. When ``n`` is not 0, if the given date is not on an anchor point, it snapped to the next(previous) @@ -1263,7 +1276,7 @@ Holidays and calendars provide a simple way to define holiday rules to be used with ``CustomBusinessDay`` or in other analysis that requires a predefined set of holidays. The ``AbstractHolidayCalendar`` class provides all the necessary methods to return a list of holidays and only ``rules`` need to be defined -in a specific holiday calendar class. Further, ``start_date`` and ``end_date`` +in a specific holiday calendar class. Furthermore, the ``start_date`` and ``end_date`` class attributes determine over what date range holidays are generated. These should be overwritten on the ``AbstractHolidayCalendar`` class to have the range apply to all calendar subclasses. ``USFederalHolidayCalendar`` is the @@ -1318,7 +1331,7 @@ or ``Timestamp`` objects. datetime(2012, 7, 6) + offset Ranges are defined by the ``start_date`` and ``end_date`` class attributes -of ``AbstractHolidayCalendar``. The defaults are below. +of ``AbstractHolidayCalendar``. The defaults are shown below. .. ipython:: python @@ -1358,16 +1371,17 @@ Shifting / Lagging ~~~~~~~~~~~~~~~~~~ One may want to *shift* or *lag* the values in a time series back and forward in -time. The method for this is ``shift``, which is available on all of the pandas -objects. +time. The method for this is :meth:`~Series.shift`, which is available on all of +the pandas objects. .. ipython:: python ts = ts[:5] ts.shift(1) -The shift method accepts an ``freq`` argument which can accept a -``DateOffset`` class or other ``timedelta``-like object or also a :ref:`offset alias <timeseries.offset_aliases>`: +The ``shift`` method accepts an ``freq`` argument which can accept a +``DateOffset`` class or other ``timedelta``-like object or also an +:ref:`offset alias <timeseries.offset_aliases>`: .. ipython:: python @@ -1375,8 +1389,8 @@ The shift method accepts an ``freq`` argument which can accept a ts.shift(5, freq='BM') Rather than changing the alignment of the data and the index, ``DataFrame`` and -``Series`` objects also have a ``tshift`` convenience method that changes -all the dates in the index by a specified number of offsets: +``Series`` objects also have a :meth:`~Series.tshift` convenience method that +changes all the dates in the index by a specified number of offsets: .. ipython:: python @@ -1388,9 +1402,10 @@ is not being realigned. Frequency Conversion ~~~~~~~~~~~~~~~~~~~~ -The primary function for changing frequencies is the ``asfreq`` function. -For a ``DatetimeIndex``, this is basically just a thin, but convenient wrapper -around ``reindex`` which generates a ``date_range`` and calls ``reindex``. +The primary function for changing frequencies is the :meth:`~Series.asfreq` +method. For a ``DatetimeIndex``, this is basically just a thin, but convenient +wrapper around :meth:`~Series.reindex` which generates a ``date_range`` and +calls ``reindex``. .. ipython:: python @@ -1400,7 +1415,7 @@ around ``reindex`` which generates a ``date_range`` and calls ``reindex``. ts.asfreq(BDay()) ``asfreq`` provides a further convenience so you can specify an interpolation -method for any gaps that may appear after the frequency conversion +method for any gaps that may appear after the frequency conversion. .. ipython:: python @@ -1409,14 +1424,14 @@ method for any gaps that may appear after the frequency conversion Filling Forward / Backward ~~~~~~~~~~~~~~~~~~~~~~~~~~ -Related to ``asfreq`` and ``reindex`` is the ``fillna`` function documented in -the :ref:`missing data section <missing_data.fillna>`. +Related to ``asfreq`` and ``reindex`` is :meth:`~Series.fillna`, which is +documented in the :ref:`missing data section <missing_data.fillna>`. Converting to Python Datetimes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``DatetimeIndex`` can be converted to an array of Python native datetime.datetime objects using the -``to_pydatetime`` method. +``DatetimeIndex`` can be converted to an array of Python native +:py:class:`datetime.datetime` objects using the ``to_pydatetime`` method. .. _timeseries.resampling: @@ -1428,20 +1443,22 @@ Resampling The interface to ``.resample`` has changed in 0.18.0 to be more groupby-like and hence more flexible. See the :ref:`whatsnew docs <whatsnew_0180.breaking.resample>` for a comparison with prior versions. -Pandas has a simple, powerful, and efficient functionality for -performing resampling operations during frequency conversion (e.g., converting -secondly data into 5-minutely data). This is extremely common in, but not -limited to, financial applications. +Pandas has a simple, powerful, and efficient functionality for performing +resampling operations during frequency conversion (e.g., converting secondly +data into 5-minutely data). This is extremely common in, but not limited to, +financial applications. -``.resample()`` is a time-based groupby, followed by a reduction method on each of its groups. -See some :ref:`cookbook examples <cookbook.resample>` for some advanced strategies +:meth:`~Series.resample` is a time-based groupby, followed by a reduction method +on each of its groups. See some :ref:`cookbook examples <cookbook.resample>` for +some advanced strategies. Starting in version 0.18.1, the ``resample()`` function can be used directly from ``DataFrameGroupBy`` objects, see the :ref:`groupby docs <groupby.transform.window_resample>`. .. note:: - ``.resample()`` is similar to using a ``.rolling()`` operation with a time-based offset, see a discussion :ref:`here <stats.moments.ts-versus-resampling>` + ``.resample()`` is similar to using a :meth:`~Series.rolling` operation with + a time-based offset, see a discussion :ref:`here <stats.moments.ts-versus-resampling>`. Basics ~~~~~~ @@ -1542,20 +1559,21 @@ For upsampling, you can specify a way to upsample and the ``limit`` parameter to Sparse Resampling ~~~~~~~~~~~~~~~~~ -Sparse timeseries are ones where you have a lot fewer points relative -to the amount of time you are looking to resample. Naively upsampling a sparse series can potentially -generate lots of intermediate values. When you don't want to use a method to fill these values, e.g. ``fill_method`` is ``None``, -then intermediate values will be filled with ``NaN``. +Sparse timeseries are the ones where you have a lot fewer points relative +to the amount of time you are looking to resample. Naively upsampling a sparse +series can potentially generate lots of intermediate values. When you don't want +to use a method to fill these values, e.g. ``fill_method`` is ``None``, then +intermediate values will be filled with ``NaN``. Since ``resample`` is a time-based groupby, the following is a method to efficiently -resample only the groups that are not all ``NaN`` +resample only the groups that are not all ``NaN``. .. ipython:: python rng = pd.date_range('2014-1-1', periods=100, freq='D') + pd.Timedelta('1s') ts = pd.Series(range(100), index=rng) -If we want to resample to the full range of the series +If we want to resample to the full range of the series: .. ipython:: python @@ -1624,7 +1642,7 @@ columns of a ``DataFrame``: 'B' : lambda x: np.std(x, ddof=1)}) The function names can also be strings. In order for a string to be valid it -must be implemented on the Resampled object +must be implemented on the resampled object: .. ipython:: python @@ -2000,7 +2018,7 @@ To convert from an ``int64`` based YYYYMMDD representation. s.apply(conv) s.apply(conv)[2] -These can easily be converted to a ``PeriodIndex`` +These can easily be converted to a ``PeriodIndex``: .. ipython:: python @@ -2278,7 +2296,7 @@ a convert on an aware stamp. pd.Series(s_aware.values) - However, these can be easily converted + However, these can be easily converted: .. ipython:: python diff --git a/doc/source/tutorials.rst b/doc/source/tutorials.rst index 0b8a2cb89b45e..43ccd372d9d5b 100644 --- a/doc/source/tutorials.rst +++ b/doc/source/tutorials.rst @@ -174,7 +174,7 @@ Various Tutorials - `Wes McKinney's (pandas BDFL) blog <http://blog.wesmckinney.com/>`_ - `Statistical analysis made easy in Python with SciPy and pandas DataFrames, by Randal Olson <http://www.randalolson.com/2012/08/06/statistical-analysis-made-easy-in-python/>`_ - `Statistical Data Analysis in Python, tutorial videos, by Christopher Fonnesbeck from SciPy 2013 <http://conference.scipy.org/scipy2013/tutorial_detail.php?id=109>`_ -- `Financial analysis in python, by Thomas Wiecki <http://nbviewer.ipython.org/github/twiecki/financial-analysis-python-tutorial/blob/master/1.%20Pandas%20Basics.ipynb>`_ +- `Financial analysis in Python, by Thomas Wiecki <http://nbviewer.ipython.org/github/twiecki/financial-analysis-python-tutorial/blob/master/1.%20Pandas%20Basics.ipynb>`_ - `Intro to pandas data structures, by Greg Reda <http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/>`_ - `Pandas and Python: Top 10, by Manish Amde <http://manishamde.github.io/blog/2013/03/07/pandas-and-python-top-10/>`_ - `Pandas Tutorial, by Mikhail Semeniuk <http://www.bearrelroll.com/2013/05/python-pandas-tutorial>`_
This PR is a continuation of my reading through the docs, see #19017, #18973, #18948, #18941. The changes are: * In `merging.rst`, `reshaping.rst` and `timeseries.rst`: * Function references as links * Backticks ` `` ` around Series, DataFrame * Minor rephrasing of sentences, spelling, etc * Replaced some instances of `python` with `Python` and `numpy` with `NumPy` globally in `*.rst` files. I hope to add a lint rule in a future PR. Comments/reviews are very welcome.
https://api.github.com/repos/pandas-dev/pandas/pulls/19081
2018-01-04T18:57:32Z
2018-01-05T00:32:05Z
2018-01-05T00:32:04Z
2018-01-05T00:33:03Z
STYLE: deprecation linting
diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 1937a57939c87..1c401c4854306 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -34,7 +34,7 @@ from pandas.core.ops import _op_descriptions from pandas.core.series import Series from pandas.core.reshape.util import cartesian_product -from pandas.util._decorators import (deprecate, Appender) +from pandas.util._decorators import Appender from pandas.util._validators import validate_axis_style_args _shared_doc_kwargs = dict(
xref #19067
https://api.github.com/repos/pandas-dev/pandas/pulls/19079
2018-01-04T16:47:02Z
2018-01-04T17:25:44Z
2018-01-04T17:25:44Z
2018-01-04T17:25:45Z
str.extractall with no match returns appropriate MultIndex
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 6a48abb6c6592..a692d1e60fd24 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -325,7 +325,7 @@ Indexing - Bug in indexing non-scalar value from ``Series`` having non-unique ``Index`` will return value flattened (:issue:`17610`) - Bug in :func:`DatetimeIndex.insert` where inserting ``NaT`` into a timezone-aware index incorrectly raised (:issue:`16357`) - Bug in ``__setitem__`` when indexing a :class:`DataFrame` with a 2-d boolean ndarray (:issue:`18582`) - +- Bug in ``str.extractall`` when there were no matches empty :class:`Index` was returned instead of appropriate :class:`MultiIndex` (:issue:`19034`) I/O ^^^ diff --git a/pandas/core/strings.py b/pandas/core/strings.py index fab4e77ce4467..3b7ec2ad8a508 100644 --- a/pandas/core/strings.py +++ b/pandas/core/strings.py @@ -794,12 +794,10 @@ def str_extractall(arr, pat, flags=0): result_key = tuple(subject_key + (match_i, )) index_list.append(result_key) - if 0 < len(index_list): - from pandas import MultiIndex - index = MultiIndex.from_tuples( - index_list, names=arr.index.names + ["match"]) - else: - index = None + from pandas import MultiIndex + index = MultiIndex.from_tuples( + index_list, names=arr.index.names + ["match"]) + result = arr._constructor_expanddim(match_list, index=index, columns=columns) return result diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py index 8aa69bcbfdf7f..973fe74429551 100644 --- a/pandas/tests/test_strings.py +++ b/pandas/tests/test_strings.py @@ -1072,28 +1072,50 @@ def test_extractall_single_group_with_quantifier(self): e = DataFrame(['ab', 'abc', 'd', 'cd'], i) tm.assert_frame_equal(r, e) - def test_extractall_no_matches(self): - s = Series(['a3', 'b3', 'd4c2'], name='series_name') + @pytest.mark.parametrize('data, names', [ + ([], (None, )), + ([], ('i1', )), + ([], (None, 'i2')), + ([], ('i1', 'i2')), + (['a3', 'b3', 'd4c2'], (None, )), + (['a3', 'b3', 'd4c2'], ('i1', 'i2')), + (['a3', 'b3', 'd4c2'], (None, 'i2')), + (['a3', 'b3', 'd4c2'], ('i1', 'i2')), + ]) + def test_extractall_no_matches(self, data, names): + # GH19075 extractall with no matches should return a valid MultiIndex + n = len(data) + if len(names) == 1: + i = Index(range(n), name=names[0]) + else: + a = (tuple([i] * (n - 1)) for i in range(n)) + i = MultiIndex.from_tuples(a, names=names) + s = Series(data, name='series_name', index=i, dtype='object') + ei = MultiIndex.from_tuples([], names=(names + ('match',))) + # one un-named group. r = s.str.extractall('(z)') - e = DataFrame(columns=[0]) + e = DataFrame(columns=[0], index=ei) tm.assert_frame_equal(r, e) + # two un-named groups. r = s.str.extractall('(z)(z)') - e = DataFrame(columns=[0, 1]) + e = DataFrame(columns=[0, 1], index=ei) tm.assert_frame_equal(r, e) + # one named group. r = s.str.extractall('(?P<first>z)') - e = DataFrame(columns=["first"]) + e = DataFrame(columns=["first"], index=ei) tm.assert_frame_equal(r, e) + # two named groups. r = s.str.extractall('(?P<first>z)(?P<second>z)') - e = DataFrame(columns=["first", "second"]) + e = DataFrame(columns=["first", "second"], index=ei) tm.assert_frame_equal(r, e) + # one named, one un-named. r = s.str.extractall('(z)(?P<second>z)') - e = DataFrame(columns=[0, - "second"]) + e = DataFrame(columns=[0, "second"], index=ei) tm.assert_frame_equal(r, e) def test_extractall_stringindex(self):
Relates and fixes issue #19034. It's a small fix which creates empty MultiIndex instead of Index, with appropriate names, when `str.extractall` doesn't find any matches. Also, a related test in `pandas.tests.test_strings` has been updated to reflect the expected result.
https://api.github.com/repos/pandas-dev/pandas/pulls/19075
2018-01-04T13:16:39Z
2018-01-05T19:13:44Z
2018-01-05T19:13:44Z
2018-01-05T21:30:29Z
REF: codes-based MultiIndex engine
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 246eab386b2ab..efb4707649f08 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -380,6 +380,7 @@ Performance Improvements - Improved performance of ``DatetimeIndex`` and ``Series`` arithmetic operations with Business-Month and Business-Quarter frequencies (:issue:`18489`) - :func:`Series` / :func:`DataFrame` tab completion limits to 100 values, for better performance. (:issue:`18587`) - Improved performance of :func:`DataFrame.median` with ``axis=1`` when bottleneck is not installed (:issue:`16468`) +- Improved performance of :func:`MultiIndex.get_loc` for large indexes, at the cost of a reduction in performance for small ones (:issue:`18519`) .. _whatsnew_0230.docs: @@ -476,7 +477,11 @@ MultiIndex - Bug in :func:`MultiIndex.get_level_values` which would return an invalid index on level of ints with missing values (:issue:`17924`) - Bug in :func:`MultiIndex.remove_unused_levels` which would fill nan values (:issue:`18417`) - Bug in :func:`MultiIndex.from_tuples`` which would fail to take zipped tuples in python3 (:issue:`18434`) -- +- Bug in :func:`MultiIndex.get_loc`` which would fail to automatically cast values between float and int (:issue:`18818`, :issue:`15994`) +- Bug in :func:`MultiIndex.get_loc`` which would cast boolean to integer labels (:issue:`19086`) +- Bug in :func:`MultiIndex.get_loc`` which would fail to locate keys containing ``NaN`` (:issue:`18485`) +- Bug in :func:`MultiIndex.get_loc`` in large :class:`MultiIndex`, would fail when levels had different dtypes (:issue:`18520`) + I/O ^^^ diff --git a/pandas/_libs/hashtable.pxd b/pandas/_libs/hashtable.pxd index 014da22df3382..d735b3c0673b2 100644 --- a/pandas/_libs/hashtable.pxd +++ b/pandas/_libs/hashtable.pxd @@ -31,15 +31,6 @@ cdef class PyObjectHashTable(HashTable): cpdef get_item(self, object val) cpdef set_item(self, object key, Py_ssize_t val) -cdef class MultiIndexHashTable(HashTable): - cdef: - kh_uint64_t *table - object mi - - cpdef get_item(self, object val) - cpdef set_item(self, object key, Py_ssize_t val) - cdef inline void _check_for_collision(self, Py_ssize_t loc, object label) - cdef class StringHashTable(HashTable): cdef kh_str_t *table diff --git a/pandas/_libs/hashtable_class_helper.pxi.in b/pandas/_libs/hashtable_class_helper.pxi.in index bd9dd1f9bae37..bca4e388f3279 100644 --- a/pandas/_libs/hashtable_class_helper.pxi.in +++ b/pandas/_libs/hashtable_class_helper.pxi.in @@ -899,139 +899,3 @@ cdef class PyObjectHashTable(HashTable): count += 1 return np.asarray(labels) - - -cdef class MultiIndexHashTable(HashTable): - - def __init__(self, size_hint=1): - self.table = kh_init_uint64() - self.mi = None - kh_resize_uint64(self.table, size_hint) - - def __dealloc__(self): - if self.table is not NULL: - kh_destroy_uint64(self.table) - self.table = NULL - - def __len__(self): - return self.table.size - - def sizeof(self, deep=False): - """ return the size of my table in bytes """ - return self.table.n_buckets * (sizeof(uint64_t) + # keys - sizeof(size_t) + # vals - sizeof(uint32_t)) # flags - - def _check_for_collisions(self, int64_t[:] locs, object mi): - # validate that the locs map to the actual values - # provided in the mi - # we can only check if we *don't* have any missing values - # :< - cdef: - ndarray[int64_t] alocs - - alocs = np.asarray(locs) - if (alocs != -1).all(): - - result = self.mi.take(locs) - if isinstance(mi, tuple): - from pandas import Index - mi = Index([mi]) - if not result.equals(mi): - raise AssertionError( - "hash collision\nlocs:\n{}\n" - "result:\n{}\nmi:\n{}".format(alocs, result, mi)) - - cdef inline void _check_for_collision(self, Py_ssize_t loc, object label): - # validate that the loc maps to the actual value - # version of _check_for_collisions above for single label (tuple) - - result = self.mi[loc] - - if not all(l == r or (is_null_datetimelike(l) - and is_null_datetimelike(r)) - for l, r in zip(result, label)): - raise AssertionError( - "hash collision\nloc:\n{}\n" - "result:\n{}\nmi:\n{}".format(loc, result, label)) - - def __contains__(self, object key): - try: - self.get_item(key) - return True - except (KeyError, ValueError, TypeError): - return False - - cpdef get_item(self, object key): - cdef: - khiter_t k - uint64_t value - int64_t[:] locs - Py_ssize_t loc - - value = self.mi._hashed_indexing_key(key) - k = kh_get_uint64(self.table, value) - if k != self.table.n_buckets: - loc = self.table.vals[k] - self._check_for_collision(loc, key) - return loc - else: - raise KeyError(key) - - cpdef set_item(self, object key, Py_ssize_t val): - raise NotImplementedError - - @cython.boundscheck(False) - def map_locations(self, object mi): - cdef: - Py_ssize_t i, n - ndarray[uint64_t] values - uint64_t val - int ret = 0 - khiter_t k - - self.mi = mi - n = len(mi) - values = mi._hashed_values - - with nogil: - for i in range(n): - val = values[i] - k = kh_put_uint64(self.table, val, &ret) - self.table.vals[k] = i - - @cython.boundscheck(False) - def lookup(self, object mi): - # look up with a target mi - cdef: - Py_ssize_t i, n - ndarray[uint64_t] values - int ret = 0 - uint64_t val - khiter_t k - int64_t[:] locs - - n = len(mi) - values = mi._hashed_values - - locs = np.empty(n, dtype=np.int64) - - with nogil: - for i in range(n): - val = values[i] - k = kh_get_uint64(self.table, val) - if k != self.table.n_buckets: - locs[i] = self.table.vals[k] - else: - locs[i] = -1 - - self._check_for_collisions(locs, mi) - return np.asarray(locs) - - def unique(self, object mi): - raise NotImplementedError - - def get_labels(self, object mi, ObjectVector uniques, - Py_ssize_t count_prior, int64_t na_sentinel, - bint check_null=True): - raise NotImplementedError diff --git a/pandas/_libs/index.pyx b/pandas/_libs/index.pyx index bfea4ff9915ac..6b23e487aad3a 100644 --- a/pandas/_libs/index.pyx +++ b/pandas/_libs/index.pyx @@ -26,11 +26,12 @@ from hashtable cimport HashTable from pandas._libs import algos, hashtable as _hash from pandas._libs.tslibs import period as periodlib from pandas._libs.tslib import Timestamp, Timedelta +from pandas._libs.missing import checknull cdef int64_t iNaT = util.get_nat() -cdef inline is_definitely_invalid_key(object val): +cdef inline bint is_definitely_invalid_key(object val): if PyTuple_Check(val): try: hash(val) @@ -585,70 +586,137 @@ cpdef convert_scalar(ndarray arr, object value): return value -cdef class MultiIndexObjectEngine(ObjectEngine): +cdef class BaseMultiIndexCodesEngine: """ - provide the same interface as the MultiIndexEngine - but use the IndexEngine for computation - - This provides good performance with samller MI's + Base class for MultiIndexUIntEngine and MultiIndexPyIntEngine, which + represent each label in a MultiIndex as an integer, by juxtaposing the bits + encoding each level, with appropriate offsets. + + For instance: if 3 levels have respectively 3, 6 and 1 possible values, + then their labels can be represented using respectively 2, 3 and 1 bits, + as follows: + _ _ _ _____ _ __ __ __ + |0|0|0| ... |0| 0|a1|a0| -> offset 0 (first level) + — — — ————— — —— —— —— + |0|0|0| ... |0|b2|b1|b0| -> offset 2 (bits required for first level) + — — — ————— — —— —— —— + |0|0|0| ... |0| 0| 0|c0| -> offset 5 (bits required for first two levels) + ‾ ‾ ‾ ‾‾‾‾‾ ‾ ‾‾ ‾‾ ‾‾ + and the resulting unsigned integer representation will be: + _ _ _ _____ _ __ __ __ __ __ __ + |0|0|0| ... |0|c0|b2|b1|b0|a1|a0| + ‾ ‾ ‾ ‾‾‾‾‾ ‾ ‾‾ ‾‾ ‾‾ ‾‾ ‾‾ ‾‾ + + Offsets are calculated at initialization, labels are transformed by method + _codes_to_ints. + + Keys are located by first locating each component against the respective + level, then locating (the integer representation of) codes. """ - def get_indexer(self, values): - # convert a MI to an ndarray - if hasattr(values, 'values'): - values = values.values - return super(MultiIndexObjectEngine, self).get_indexer(values) + def __init__(self, object levels, object labels, + ndarray[uint64_t, ndim=1] offsets): + """ + Parameters + ---------- + levels : list-like of numpy arrays + Levels of the MultiIndex + labels : list-like of numpy arrays of integer dtype + Labels of the MultiIndex + offsets : numpy array of uint64 dtype + Pre-calculated offsets, one for each level of the index + """ - cpdef get_loc(self, object val): + self.levels = levels + self.offsets = offsets - # convert a MI to an ndarray - if hasattr(val, 'values'): - val = val.values - return super(MultiIndexObjectEngine, self).get_loc(val) + # Transform labels in a single array, and add 1 so that we are working + # with positive integers (-1 for NaN becomes 0): + codes = (np.array(labels, dtype='int64').T + 1).astype('uint64', + copy=False) + # Map each codes combination in the index to an integer unambiguously + # (no collisions possible), based on the "offsets", which describe the + # number of bits to switch labels for each level: + lab_ints = self._codes_to_ints(codes) -cdef class MultiIndexHashEngine(ObjectEngine): - """ - Use a hashing based MultiIndex impl - but use the IndexEngine for computation + # Initialize underlying index (e.g. libindex.UInt64Engine) with + # integers representing labels: we will use its get_loc and get_indexer + self._base.__init__(self, lambda: lab_ints, len(lab_ints)) - This provides good performance with larger MI's - """ + def _extract_level_codes(self, object target, object method=None): + """ + Map the requested list of (tuple) keys to their integer representations + for searching in the underlying integer index. + + Parameters + ---------- + target : list-like of keys + Each key is a tuple, with a label for each level of the index. + + Returns + ------ + int_keys : 1-dimensional array of dtype uint64 or object + Integers representing one combination each + """ - def _call_monotonic(self, object mi): - # defer these back to the mi iteself - return (mi.is_monotonic_increasing, - mi.is_monotonic_decreasing, - mi.is_unique) + level_codes = [lev.get_indexer(codes) + 1 for lev, codes + in zip(self.levels, zip(*target))] + return self._codes_to_ints(np.array(level_codes, dtype='uint64').T) + + def get_indexer(self, object target, object method=None, + object limit=None): + lab_ints = self._extract_level_codes(target) + + # All methods (exact, backfill, pad) directly map to the respective + # methods of the underlying (integers) index... + if method is not None: + # but underlying backfill and pad methods require index and keys + # to be sorted. The index already is (checked in + # Index._get_fill_indexer), sort (integer representations of) keys: + order = np.argsort(lab_ints) + lab_ints = lab_ints[order] + indexer = (getattr(self._base, 'get_{}_indexer'.format(method)) + (self, lab_ints, limit=limit)) + indexer = indexer[order] + else: + indexer = self._base.get_indexer(self, lab_ints) - def get_backfill_indexer(self, other, limit=None): - # we coerce to ndarray-of-tuples - values = np.array(self._get_index_values()) - return algos.backfill_object(values, other, limit=limit) + return indexer - def get_pad_indexer(self, other, limit=None): - # we coerce to ndarray-of-tuples - values = np.array(self._get_index_values()) - return algos.pad_object(values, other, limit=limit) + def get_loc(self, object key): + if is_definitely_invalid_key(key): + raise TypeError("'{key}' is an invalid key".format(key=key)) + if not PyTuple_Check(key): + raise KeyError(key) + try: + indices = [0 if checknull(v) else lev.get_loc(v) + 1 + for lev, v in zip(self.levels, key)] + except KeyError: + raise KeyError(key) - cpdef get_loc(self, object val): - if is_definitely_invalid_key(val): - raise TypeError("'{val}' is an invalid key".format(val=val)) + # Transform indices into single integer: + lab_int = self._codes_to_ints(np.array(indices, dtype='uint64')) - self._ensure_mapping_populated() - if not self.unique: - return self._get_loc_duplicates(val) + return self._base.get_loc(self, lab_int) - try: - return self.mapping.get_item(val) - except TypeError: - raise KeyError(val) + def get_indexer_non_unique(self, object target): + # This needs to be overridden just because the default one works on + # target._values, and target can be itself a MultiIndex. - def get_indexer(self, values): - self._ensure_mapping_populated() - return self.mapping.lookup(values) + lab_ints = self._extract_level_codes(target) + indexer = self._base.get_indexer_non_unique(self, lab_ints) + + return indexer + + def __contains__(self, object val): + # Default __contains__ looks in the underlying mapping, which in this + # case only contains integer representations. + try: + self.get_loc(val) + return True + except (KeyError, TypeError, ValueError): + return False - cdef _make_hash_table(self, n): - return _hash.MultiIndexHashTable(n) # Generated from template. include "index_class_helper.pxi" diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 797774832aaa5..510f7245cebd8 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -45,6 +45,87 @@ target_klass='MultiIndex or list of tuples')) +class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine, + libindex.UInt64Engine): + """ + This class manages a MultiIndex by mapping label combinations to positive + integers. + """ + _base = libindex.UInt64Engine + + def _codes_to_ints(self, codes): + """ + Transform combination(s) of uint64 in one uint64 (each), in a strictly + monotonic way (i.e. respecting the lexicographic order of integer + combinations): see BaseMultiIndexCodesEngine documentation. + + Parameters + ---------- + codes : 1- or 2-dimensional array of dtype uint64 + Combinations of integers (one per row) + + Returns + ------ + int_keys : scalar or 1-dimensional array, of dtype uint64 + Integer(s) representing one combination (each) + """ + # Shift the representation of each level by the pre-calculated number + # of bits: + codes <<= self.offsets + + # Now sum and OR are in fact interchangeable. This is a simple + # composition of the (disjunct) significant bits of each level (i.e. + # each column in "codes") in a single positive integer: + if codes.ndim == 1: + # Single key + return np.bitwise_or.reduce(codes) + + # Multiple keys + return np.bitwise_or.reduce(codes, axis=1) + + +class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, + libindex.ObjectEngine): + """ + This class manages those (extreme) cases in which the number of possible + label combinations overflows the 64 bits integers, and uses an ObjectEngine + containing Python integers. + """ + _base = libindex.ObjectEngine + + def _codes_to_ints(self, codes): + """ + Transform combination(s) of uint64 in one Python integer (each), in a + strictly monotonic way (i.e. respecting the lexicographic order of + integer combinations): see BaseMultiIndexCodesEngine documentation. + + Parameters + ---------- + codes : 1- or 2-dimensional array of dtype uint64 + Combinations of integers (one per row) + + Returns + ------ + int_keys : int, or 1-dimensional array of dtype object + Integer(s) representing one combination (each) + """ + + # Shift the representation of each level by the pre-calculated number + # of bits. Since this can overflow uint64, first make sure we are + # working with Python integers: + codes = codes.astype('object') << self.offsets + + # Now sum and OR are in fact interchangeable. This is a simple + # composition of the (disjunct) significant bits of each level (i.e. + # each column in "codes") in a single positive integer (per row): + if codes.ndim == 1: + # Single key + return np.bitwise_or.reduce(codes) + + # Multiple keys + return np.bitwise_or.reduce(codes, axis=1) + + class MultiIndex(Index): """ A multi-level, or hierarchical, index object for pandas objects @@ -687,16 +768,25 @@ def _get_level_number(self, level): @cache_readonly def _engine(self): - - # choose our engine based on our size - # the hashing based MultiIndex for larger - # sizes, and the MultiIndexOjbect for smaller - # xref: https://github.com/pandas-dev/pandas/pull/16324 - l = len(self) - if l > 10000: - return libindex.MultiIndexHashEngine(lambda: self, l) - - return libindex.MultiIndexObjectEngine(lambda: self.values, l) + # Calculate the number of bits needed to represent labels in each + # level, as log2 of their sizes (including -1 for NaN): + sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels])) + + # Sum bit counts, starting from the _right_.... + lev_bits = np.cumsum(sizes[::-1])[::-1] + + # ... in order to obtain offsets such that sorting the combination of + # shifted codes (one for each level, resulting in a unique integer) is + # equivalent to sorting lexicographically the codes themselves. Notice + # that each level needs to be shifted by the number of bits needed to + # represent the _previous_ ones: + offsets = np.concatenate([lev_bits[1:], [0]]).astype('uint64') + + # Check the total number of bits needed for our representation: + if lev_bits[0] > 64: + # The levels would overflow a 64 bit uint - use Python integers: + return MultiIndexPyIntEngine(self.levels, self.labels, offsets) + return MultiIndexUIntEngine(self.levels, self.labels, offsets) @property def values(self): @@ -1885,16 +1975,11 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None): if tolerance is not None: raise NotImplementedError("tolerance not implemented yet " 'for MultiIndex') - indexer = self._get_fill_indexer(target, method, limit) + indexer = self._engine.get_indexer(target, method, limit) elif method == 'nearest': raise NotImplementedError("method='nearest' not implemented yet " 'for MultiIndex; see GitHub issue 9365') else: - # we may not compare equally because of hashing if we - # don't have the same dtypes - if self._inferred_type_levels != target._inferred_type_levels: - return Index(self.values).get_indexer(target.values) - indexer = self._engine.get_indexer(target) return _ensure_platform_int(indexer) @@ -2131,17 +2216,6 @@ def _maybe_to_slice(loc): ''.format(keylen, self.nlevels)) if keylen == self.nlevels and self.is_unique: - - def _maybe_str_to_time_stamp(key, lev): - if lev.is_all_dates and not isinstance(key, Timestamp): - try: - return Timestamp(key, tz=getattr(lev, 'tz', None)) - except Exception: - pass - return key - - key = com._values_from_object(key) - key = tuple(map(_maybe_str_to_time_stamp, key, self.levels)) return self._engine.get_loc(key) # -- partial selection or non-unique index @@ -2274,34 +2348,9 @@ def partial_selection(key, indexer=None): return indexer, maybe_droplevels(indexer, ilevels, drop_level) - if len(key) == self.nlevels: - - if self.is_unique: - - # here we have a completely specified key, but are - # using some partial string matching here - # GH4758 - all_dates = ((l.is_all_dates and - not isinstance(k, compat.string_types)) - for k, l in zip(key, self.levels)) - can_index_exactly = any(all_dates) - if (any(l.is_all_dates - for k, l in zip(key, self.levels)) and - not can_index_exactly): - indexer = self.get_loc(key) - - # we have a multiple selection here - if (not isinstance(indexer, slice) or - indexer.stop - indexer.start != 1): - return partial_selection(key, indexer) - - key = tuple(self[indexer].tolist()[0]) - - return (self._engine.get_loc( - com._values_from_object(key)), None) - - else: - return partial_selection(key) + if len(key) == self.nlevels and self.is_unique: + # Complete key in unique index -> standard get_loc + return (self._engine.get_loc(key), None) else: return partial_selection(key) else: diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 9664d73651185..aedc957ec67da 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -1258,6 +1258,17 @@ def test_get_loc_level(self): assert result == expected assert new_index.equals(index.droplevel(0)) + @pytest.mark.parametrize('level', [0, 1]) + @pytest.mark.parametrize('null_val', [np.nan, pd.NaT, None]) + def test_get_loc_nan(self, level, null_val): + # GH 18485 : NaN in MultiIndex + levels = [['a', 'b'], ['c', 'd']] + key = ['b', 'd'] + levels[level] = np.array([0, null_val], dtype=type(null_val)) + key[level] = null_val + idx = MultiIndex.from_product(levels) + assert idx.get_loc(tuple(key)) == 3 + def test_get_loc_missing_nan(self): # GH 8569 idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]]) @@ -1266,6 +1277,38 @@ def test_get_loc_missing_nan(self): pytest.raises(KeyError, idx.get_loc, np.nan) pytest.raises(KeyError, idx.get_loc, [np.nan]) + @pytest.mark.parametrize('dtype1', [int, float, bool, str]) + @pytest.mark.parametrize('dtype2', [int, float, bool, str]) + def test_get_loc_multiple_dtypes(self, dtype1, dtype2): + # GH 18520 + levels = [np.array([0, 1]).astype(dtype1), + np.array([0, 1]).astype(dtype2)] + idx = pd.MultiIndex.from_product(levels) + assert idx.get_loc(idx[2]) == 2 + + @pytest.mark.parametrize('level', [0, 1]) + @pytest.mark.parametrize('dtypes', [[int, float], [float, int]]) + def test_get_loc_implicit_cast(self, level, dtypes): + # GH 18818, GH 15994 : as flat index, cast int to float and vice-versa + levels = [['a', 'b'], ['c', 'd']] + key = ['b', 'd'] + lev_dtype, key_dtype = dtypes + levels[level] = np.array([0, 1], dtype=lev_dtype) + key[level] = key_dtype(1) + idx = MultiIndex.from_product(levels) + assert idx.get_loc(tuple(key)) == 3 + + def test_get_loc_cast_bool(self): + # GH 19086 : int is casted to bool, but not vice-versa + levels = [[False, True], np.arange(2, dtype='int64')] + idx = MultiIndex.from_product(levels) + + assert idx.get_loc((0, 1)) == 1 + assert idx.get_loc((1, 0)) == 2 + + pytest.raises(KeyError, idx.get_loc, (False, True)) + pytest.raises(KeyError, idx.get_loc, (True, False)) + def test_slice_locs(self): df = tm.makeTimeDataFrame() stacked = df.stack() diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py index 424ba6aab9a56..9582264a8c716 100644 --- a/pandas/tests/test_multilevel.py +++ b/pandas/tests/test_multilevel.py @@ -1590,6 +1590,38 @@ def test_unstack_group_index_overflow(self): result = s.unstack(4) assert result.shape == (500, 2) + def test_pyint_engine(self): + # GH 18519 : when combinations of codes cannot be represented in 64 + # bits, the index underlying the MultiIndex engine works with Python + # integers, rather than uint64. + N = 5 + keys = [tuple(l) for l in [[0] * 10 * N, + [1] * 10 * N, + [2] * 10 * N, + [np.nan] * N + [2] * 9 * N, + [0] * N + [2] * 9 * N, + [np.nan] * N + [2] * 8 * N + [0] * N]] + # Each level contains 4 elements (including NaN), so it is represented + # in 2 bits, for a total of 2*N*10 = 100 > 64 bits. If we were using a + # 64 bit engine and truncating the first levels, the fourth and fifth + # keys would collide; if truncating the last levels, the fifth and + # sixth; if rotating bits rather than shifting, the third and fifth. + + for idx in range(len(keys)): + index = MultiIndex.from_tuples(keys) + assert index.get_loc(keys[idx]) == idx + + expected = np.arange(idx + 1, dtype='int64') + result = index.get_indexer([keys[i] for i in expected]) + tm.assert_numpy_array_equal(result, expected) + + # With missing key: + idces = range(len(keys)) + expected = np.array([-1] + list(idces), dtype='int64') + missing = tuple([0, 1] * 5 * N) + result = index.get_indexer([missing] + [keys[i] for i in idces]) + tm.assert_numpy_array_equal(result, expected) + def test_getitem_lowerdim_corner(self): pytest.raises(KeyError, self.frame.loc.__getitem__, (('bar', 'three'), 'B'))
closes #18519 closes #18818 closes #18520 closes #18485 closes #15994 closes #19086 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This PR provides a cleaner and more robust ``MultiIndex`` engine. asv benchmarks ``` before after ratio [93033151] [6e1ecec1] + 4.26±0.03ms 20.8±0.08ms 4.87 multiindex_object.GetLoc.time_med_get_loc_warm + 4.28±0.02μs 19.6±0.1μs 4.58 multiindex_object.GetLoc.time_string_get_loc + 4.12±0.02ms 18.1±0.09ms 4.40 multiindex_object.GetLoc.time_small_get_loc_warm + 4.55±0.1μs 18.8±0.3μs 4.13 multiindex_object.GetLoc.time_med_get_loc - 178±4ms 148±0.7ms 0.83 multiindex_object.GetLoc.time_large_get_loc - 163±1ms 120±0.5ms 0.73 multiindex_object.Integer.time_get_indexer - 336±0.6ms 167±1ms 0.50 multiindex_object.GetLoc.time_large_get_loc_warm SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. ``` ... clearly show that the engine increases performance for large indexes, and significantly decreases performance for ``get_loc`` on small/medium indexes. For reference, on the ``small`` test case (10 elements), 16 μs. are lost on each ``get_loc`` call, while on the ``large`` test case (1 million elements), 146 μs. are gained on each ``get_loc`` call. I think the tradeoff is acceptable, also considering that with this approach, any improvement to flat indexes automatically transfers to ``MultiIndex``. But most importantly, I think we have no other options to fix several annoying open issues (and more bugs - I'm sure - which were never reported) about incoherence between flat indexes and ``MultiIndex``. Notice that moving the engine into ``index.pyx`` is problematic because cython does not support multiple inheritance. Anyway, trying to cythonize a couple of methods brought no gain at all (as the overhead is mostly due to the multiple ``get_loc(val)`` calls on the different levels).
https://api.github.com/repos/pandas-dev/pandas/pulls/19074
2018-01-04T10:41:25Z
2018-01-28T11:53:06Z
2018-01-28T11:53:05Z
2018-01-29T02:51:24Z
BUG: Allow merging on Index vectors
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index d53de30187156..22f9ebd8aab98 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -407,7 +407,7 @@ Reshaping - Bug in :func:`Series.rank` where ``Series`` containing ``NaT`` modifies the ``Series`` inplace (:issue:`18521`) - Bug in :func:`cut` which fails when using readonly arrays (:issue:`18773`) - Bug in :func:`Dataframe.pivot_table` which fails when the ``aggfunc`` arg is of type string. The behavior is now consistent with other methods like ``agg`` and ``apply`` (:issue:`18713`) - +- Bug in :func:`DataFrame.merge` in which merging using ``Index`` objects as vectors raised an Exception (:issue:`19038`) Numeric ^^^^^^^ diff --git a/pandas/core/dtypes/api.py b/pandas/core/dtypes/api.py index a2180ecc4632f..738e1ea9062f6 100644 --- a/pandas/core/dtypes/api.py +++ b/pandas/core/dtypes/api.py @@ -55,6 +55,7 @@ is_dict_like, is_iterator, is_file_like, + is_array_like, is_list_like, is_hashable, is_named_tuple) diff --git a/pandas/core/dtypes/inference.py b/pandas/core/dtypes/inference.py index 8010a213efaf0..6fed25a0012f2 100644 --- a/pandas/core/dtypes/inference.py +++ b/pandas/core/dtypes/inference.py @@ -267,6 +267,39 @@ def is_list_like(obj): not isinstance(obj, string_and_binary_types)) +def is_array_like(obj): + """ + Check if the object is array-like. + + For an object to be considered array-like, it must be list-like and + have a `dtype` attribute. + + Parameters + ---------- + obj : The object to check. + + Returns + ------- + is_array_like : bool + Whether `obj` has array-like properties. + + Examples + -------- + >>> is_array_like(np.array([1, 2, 3])) + True + >>> is_array_like(pd.Series(["a", "b"])) + True + >>> is_array_like(pd.Index(["2016-01-01"])) + True + >>> is_array_like([1, 2, 3]) + False + >>> is_array_like(("a", "b")) + False + """ + + return is_list_like(obj) and hasattr(obj, "dtype") + + def is_nested_list_like(obj): """ Check if the object is list-like, and that all of its elements diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py index ad2a433b5632b..8ee30bf72d313 100644 --- a/pandas/core/reshape/merge.py +++ b/pandas/core/reshape/merge.py @@ -10,7 +10,7 @@ from pandas.compat import range, lzip, zip, map, filter import pandas.compat as compat -from pandas import (Categorical, Series, DataFrame, +from pandas import (Categorical, DataFrame, Index, MultiIndex, Timedelta) from pandas.core.frame import _merge_doc from pandas.core.dtypes.common import ( @@ -18,6 +18,7 @@ is_datetime64_dtype, needs_i8_conversion, is_int64_dtype, + is_array_like, is_categorical_dtype, is_integer_dtype, is_float_dtype, @@ -814,13 +815,12 @@ def _get_merge_keys(self): join_names = [] right_drop = [] left_drop = [] + left, right = self.left, self.right stacklevel = 5 # Number of stack levels from df.merge - is_lkey = lambda x: isinstance( - x, (np.ndarray, Series)) and len(x) == len(left) - is_rkey = lambda x: isinstance( - x, (np.ndarray, Series)) and len(x) == len(right) + is_lkey = lambda x: is_array_like(x) and len(x) == len(left) + is_rkey = lambda x: is_array_like(x) and len(x) == len(right) # Note that pd.merge_asof() has separate 'on' and 'by' parameters. A # user could, for example, request 'left_index' and 'left_by'. In a diff --git a/pandas/tests/api/test_types.py b/pandas/tests/api/test_types.py index 1cbcf3f9109a4..7e6430accc546 100644 --- a/pandas/tests/api/test_types.py +++ b/pandas/tests/api/test_types.py @@ -30,7 +30,7 @@ class TestTypes(Base): 'is_period_dtype', 'is_interval', 'is_interval_dtype', 'is_re', 'is_re_compilable', 'is_dict_like', 'is_iterator', 'is_file_like', - 'is_list_like', 'is_hashable', + 'is_list_like', 'is_hashable', 'is_array_like', 'is_named_tuple', 'pandas_dtype', 'union_categoricals', 'infer_dtype'] deprecated = ['is_any_int_dtype', 'is_floating_dtype', 'is_sequence'] diff --git a/pandas/tests/dtypes/test_inference.py b/pandas/tests/dtypes/test_inference.py index 33c570a814e7d..b4f5d67530fbd 100644 --- a/pandas/tests/dtypes/test_inference.py +++ b/pandas/tests/dtypes/test_inference.py @@ -78,6 +78,23 @@ def test_is_list_like_fails(ll): assert not inference.is_list_like(ll) +def test_is_array_like(): + assert inference.is_array_like(Series([])) + assert inference.is_array_like(Series([1, 2])) + assert inference.is_array_like(np.array(["a", "b"])) + assert inference.is_array_like(Index(["2016-01-01"])) + + class DtypeList(list): + dtype = "special" + + assert inference.is_array_like(DtypeList()) + + assert not inference.is_array_like([1, 2, 3]) + assert not inference.is_array_like(tuple()) + assert not inference.is_array_like("foo") + assert not inference.is_array_like(123) + + @pytest.mark.parametrize('inner', [ [], [1], (1, ), (1, 2), {'a': 1}, set([1, 'a']), Series([1]), Series([]), Series(['a']).str, (x for x in range(5)) diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py index 70b84f7a6225b..b9a667499b7a0 100644 --- a/pandas/tests/reshape/merge/test_merge.py +++ b/pandas/tests/reshape/merge/test_merge.py @@ -1370,6 +1370,32 @@ def f(): household.join(log_return, how='outer') pytest.raises(NotImplementedError, f) + @pytest.mark.parametrize("klass", [None, np.asarray, Series, Index]) + def test_merge_datetime_index(self, klass): + # see gh-19038 + df = DataFrame([1, 2, 3], + ["2016-01-01", "2017-01-01", "2018-01-01"], + columns=["a"]) + df.index = pd.to_datetime(df.index) + on_vector = df.index.year + + if klass is not None: + on_vector = klass(on_vector) + + expected = DataFrame({"a": [1, 2, 3]}) + + if klass == np.asarray: + # The join key is added for ndarray. + expected["key_1"] = [2016, 2017, 2018] + + result = df.merge(df, on=["a", on_vector], how="inner") + tm.assert_frame_equal(result, expected) + + expected = DataFrame({"a_x": [1, 2, 3], + "a_y": [1, 2, 3]}) + result = df.merge(df, on=[df.index.year], how="inner") + tm.assert_frame_equal(result, expected) + class TestMergeDtypes(object):
This behavior used to work in v0.19.0 and is consistent with the documentation. Closes #19038
https://api.github.com/repos/pandas-dev/pandas/pulls/19073
2018-01-04T07:46:36Z
2018-01-06T17:11:29Z
2018-01-06T17:11:29Z
2018-01-06T19:26:35Z
Tests for fixed issues in Series[datetime64]
diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index c1e9a62d98fd3..2350477c2302a 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -1195,6 +1195,18 @@ def test_sub_single_tz(self): expected = Series([Timedelta('-2days')]) assert_series_equal(result, expected) + def test_dt64tz_series_sub_dtitz(self): + # GH#19071 subtracting tzaware DatetimeIndex from tzaware Series + # (with same tz) raises, fixed by #19024 + dti = pd.date_range('1999-09-30', periods=10, tz='US/Pacific') + ser = pd.Series(dti) + expected = pd.Series(pd.TimedeltaIndex(['0days'] * 10)) + + res = dti - ser + tm.assert_series_equal(res, expected) + res = ser - dti + tm.assert_series_equal(res, expected) + def test_sub_datetime_compat(self): # see gh-14088 s = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), pd.NaT]) @@ -1317,6 +1329,37 @@ def test_datetime64_ops_nat(self): with pytest.raises(TypeError): nat_series_dtype_timestamp / 1 + def test_dt64series_arith_overflow(self): + # GH#12534, fixed by #19024 + dt = pd.Timestamp('1700-01-31') + td = pd.Timedelta('20000 Days') + dti = pd.date_range('1949-09-30', freq='100Y', periods=4) + ser = pd.Series(dti) + with pytest.raises(OverflowError): + ser - dt + with pytest.raises(OverflowError): + dt - ser + with pytest.raises(OverflowError): + ser + td + with pytest.raises(OverflowError): + td + ser + + ser.iloc[-1] = pd.NaT + expected = pd.Series(['2004-10-03', '2104-10-04', '2204-10-04', 'NaT'], + dtype='datetime64[ns]') + res = ser + td + tm.assert_series_equal(res, expected) + res = td + ser + tm.assert_series_equal(res, expected) + + ser.iloc[1:] = pd.NaT + expected = pd.Series(['91279 Days', 'NaT', 'NaT', 'NaT'], + dtype='timedelta64[ns]') + res = ser - dt + tm.assert_series_equal(res, expected) + res = dt - ser + tm.assert_series_equal(res, -expected) + class TestSeriesOperators(TestData): def test_op_method(self):
- [x] closes #12534, closes #19071 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19072
2018-01-04T06:59:31Z
2018-01-05T00:45:31Z
2018-01-05T00:45:31Z
2018-01-05T01:59:09Z
CLN: ASV string
diff --git a/asv_bench/benchmarks/pandas_vb_common.py b/asv_bench/benchmarks/pandas_vb_common.py index 7b4fec0090701..c0d24afae4219 100644 --- a/asv_bench/benchmarks/pandas_vb_common.py +++ b/asv_bench/benchmarks/pandas_vb_common.py @@ -1,27 +1,29 @@ import os -from pandas import * -import pandas as pd -from numpy.random import randn -from numpy.random import randint -import pandas.util.testing as tm -import random -import numpy as np -import threading from importlib import import_module +import numpy as np try: - from pandas.compat import range + from pandas import Panel except ImportError: - pass + from pandas import WidePanel as Panel # noqa + +# Compatibility import for lib +for imp in ['pandas._libs.lib', 'pandas.lib']: + try: + lib = import_module(imp) + break + except: + pass numeric_dtypes = [np.int64, np.int32, np.uint32, np.uint64, np.float32, np.float64, np.int16, np.int8, np.uint16, np.uint8] datetime_dtypes = [np.datetime64, np.timedelta64] -# This function just needs to be imported into each benchmark file in order to -# sets up the random seed before each function. -# http://asv.readthedocs.io/en/latest/writing_benchmarks.html + def setup(*args, **kwargs): + # This function just needs to be imported into each benchmark file to + # set up the random seed before each function. + # http://asv.readthedocs.io/en/latest/writing_benchmarks.html np.random.seed(1234) @@ -42,22 +44,3 @@ def remove(self, f): def teardown(self, *args, **kwargs): self.remove(self.fname) - -# Compatibility import for lib -for imp in ['pandas._libs.lib', 'pandas.lib', 'pandas_tseries']: - try: - lib = import_module(imp) - break - except: - pass - -try: - Panel = Panel -except Exception: - Panel = WidePanel - -# didn't add to namespace until later -try: - from pandas.core.index import MultiIndex -except ImportError: - pass diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py index 948d4b92a5a57..4435327e1eb38 100644 --- a/asv_bench/benchmarks/strings.py +++ b/asv_bench/benchmarks/strings.py @@ -1,119 +1,144 @@ -from .pandas_vb_common import * -import string -import itertools as IT -import pandas.util.testing as testing +import numpy as np +from pandas import Series +import pandas.util.testing as tm -class StringMethods(object): - goal_time = 0.2 +class Methods(object): - def make_series(self, letters, strlen, size): - return Series([str(x) for x in np.fromiter(IT.cycle(letters), count=(size * strlen), dtype='|S1').view('|S{}'.format(strlen))]) + goal_time = 0.2 def setup(self): - self.many = self.make_series(('matchthis' + string.ascii_uppercase), strlen=19, size=10000) - self.few = self.make_series(('matchthis' + (string.ascii_uppercase * 42)), strlen=19, size=10000) - self.s = self.make_series(string.ascii_uppercase, strlen=10, size=10000).str.join('|') + self.s = Series(tm.makeStringIndex(10**5)) def time_cat(self): - self.many.str.cat(sep=',') + self.s.str.cat(sep=',') def time_center(self): - self.many.str.center(100) - - def time_contains_few(self): - self.few.str.contains('matchthis') - - def time_contains_few_noregex(self): - self.few.str.contains('matchthis', regex=False) - - def time_contains_many(self): - self.many.str.contains('matchthis') - - def time_contains_many_noregex(self): - self.many.str.contains('matchthis', regex=False) + self.s.str.center(100) def time_count(self): - self.many.str.count('matchthis') + self.s.str.count('A') def time_endswith(self): - self.many.str.endswith('matchthis') + self.s.str.endswith('A') def time_extract(self): - self.many.str.extract('(\\w*)matchthis(\\w*)') + self.s.str.extract('(\\w*)A(\\w*)') def time_findall(self): - self.many.str.findall('[A-Z]+') + self.s.str.findall('[A-Z]+') def time_get(self): - self.many.str.get(0) - - def time_join_split(self): - self.many.str.join('--').str.split('--') - - def time_join_split_expand(self): - self.many.str.join('--').str.split('--', expand=True) + self.s.str.get(0) def time_len(self): - self.many.str.len() + self.s.str.len() def time_match(self): - self.many.str.match('mat..this') + self.s.str.match('A') def time_pad(self): - self.many.str.pad(100, side='both') - - def time_repeat(self): - self.many.str.repeat(list(IT.islice(IT.cycle(range(1, 4)), len(self.many)))) + self.s.str.pad(100, side='both') def time_replace(self): - self.many.str.replace('(matchthis)', '\x01\x01') + self.s.str.replace('A', '\x01\x01') def time_slice(self): - self.many.str.slice(5, 15, 2) + self.s.str.slice(5, 15, 2) def time_startswith(self): - self.many.str.startswith('matchthis') + self.s.str.startswith('A') def time_strip(self): - self.many.str.strip('matchthis') + self.s.str.strip('A') def time_rstrip(self): - self.many.str.rstrip('matchthis') + self.s.str.rstrip('A') def time_lstrip(self): - self.many.str.lstrip('matchthis') + self.s.str.lstrip('A') def time_title(self): - self.many.str.title() + self.s.str.title() def time_upper(self): - self.many.str.upper() + self.s.str.upper() def time_lower(self): - self.many.str.lower() + self.s.str.lower() + + +class Repeat(object): + + goal_time = 0.2 + params = ['int', 'array'] + param_names = ['repeats'] + + def setup(self, repeats): + N = 10**5 + self.s = Series(tm.makeStringIndex(N)) + repeat = {'int': 1, 'array': np.random.randint(1, 3, N)} + self.repeat = repeat[repeats] + + def time_repeat(self, repeats): + self.s.str.repeat(self.repeat) + + +class Contains(object): + + goal_time = 0.2 + params = [True, False] + param_names = ['regex'] + + def setup(self, regex): + self.s = Series(tm.makeStringIndex(10**5)) + + def time_contains(self, regex): + self.s.str.contains('A', regex=regex) + + +class Split(object): + + goal_time = 0.2 + params = [True, False] + param_names = ['expand'] + + def setup(self, expand): + self.s = Series(tm.makeStringIndex(10**5)).str.join('--') + + def time_split(self, expand): + self.s.str.split('--', expand=expand) + + +class Dummies(object): + + goal_time = 0.2 + + def setup(self): + self.s = Series(tm.makeStringIndex(10**5)).str.join('|') def time_get_dummies(self): self.s.str.get_dummies('|') -class StringEncode(object): +class Encode(object): + goal_time = 0.2 def setup(self): - self.ser = Series(testing.makeUnicodeIndex()) + self.ser = Series(tm.makeUnicodeIndex()) def time_encode_decode(self): self.ser.str.encode('utf-8').str.decode('utf-8') -class StringSlice(object): +class Slice(object): goal_time = 0.2 def setup(self): self.s = Series(['abcdefg', np.nan] * 500000) - def time_series_string_vector_slice(self): + def time_vector_slice(self): # GH 2602 self.s.str[:5] diff --git a/ci/lint.sh b/ci/lint.sh index b4eafcaf28e39..2031eaa8a1d5d 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -24,7 +24,7 @@ if [ "$LINT" ]; then echo "Linting setup.py DONE" echo "Linting asv_bench/benchmarks/" - flake8 asv_bench/benchmarks/ --exclude=asv_bench/benchmarks/[ps]*.py --ignore=F811 + flake8 asv_bench/benchmarks/ --exclude=asv_bench/benchmarks/*.py --ignore=F811 if [ $? -ne "0" ]; then RET=1 fi
Cleaned up the `strings.py` benchmarks, `pandas_vb_common.py` of all the unnecessary imports, and linting all the `.py` files in `asv_bench` now. ``` asv dev -b ^strings · Discovering benchmarks · Running 25 total benchmarks (1 commits * 1 environments * 25 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 4.00%] ··· Running strings.Contains.time_contains ok [ 4.00%] ···· ======= ======== regex ------- -------- True 107ms False 38.4ms ======= ======== [ 8.00%] ··· Running strings.Dummies.time_get_dummies 7.80s [ 12.00%] ··· Running strings.Encode.time_encode_decode 697μs [ 16.00%] ··· Running strings.Methods.time_cat 24.1ms [ 20.00%] ··· Running strings.Methods.time_center 84.2ms [ 24.00%] ··· Running strings.Methods.time_count 119ms [ 28.00%] ··· Running strings.Methods.time_endswith 68.0ms [ 32.00%] ··· Running strings.Methods.time_extract 477ms [ 32.00%] ····· /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/strings.py:26: FutureWarning: currently extract(expand=None) means expand=False (return Index/Series/DataFrame) but in a future version of pandas this will be changed to expand=True (return DataFrame) self.s.str.extract('(\\w*)A(\\w*)') [ 36.00%] ··· Running strings.Methods.time_findall 173ms [ 40.00%] ··· Running strings.Methods.time_get 59.9ms [ 44.00%] ··· Running strings.Methods.time_len 41.1ms [ 48.00%] ··· Running strings.Methods.time_lower 56.3ms [ 52.00%] ··· Running strings.Methods.time_lstrip 56.3ms [ 56.00%] ··· Running strings.Methods.time_match 143ms [ 60.00%] ··· Running strings.Methods.time_pad 85.6ms [ 64.00%] ··· Running strings.Methods.time_replace 73.2ms [ 68.00%] ··· Running strings.Methods.time_rstrip 59.1ms [ 72.00%] ··· Running strings.Methods.time_slice 51.9ms [ 76.00%] ··· Running strings.Methods.time_startswith 67.2ms [ 80.00%] ··· Running strings.Methods.time_strip 56.0ms [ 84.00%] ··· Running strings.Methods.time_title 59.3ms [ 88.00%] ··· Running strings.Methods.time_upper 55.8ms [ 92.00%] ··· Running strings.Repeat.time_repeat ok [ 92.00%] ···· ========= ======== repeats --------- -------- int 77.7ms array 70.2ms ========= ======== [ 96.00%] ··· Running strings.Slice.time_vector_slice 271ms [100.00%] ··· Running strings.Split.time_split ok [100.00%] ···· ======== ======= expand -------- ------- True 773ms False 352ms ======== ======= ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19069
2018-01-04T04:31:52Z
2018-01-06T17:19:45Z
2018-01-06T17:19:45Z
2018-01-07T04:59:32Z
TST: catch warnings on Panel
diff --git a/pandas/tests/generic/test_panel.py b/pandas/tests/generic/test_panel.py index 720f471a0ebd4..4cbd5cb2aa69f 100644 --- a/pandas/tests/generic/test_panel.py +++ b/pandas/tests/generic/test_panel.py @@ -32,3 +32,26 @@ def test_to_xarray(self): # idempotency assert_panel_equal(result.to_pandas(), p) + + +# run all the tests, but wrap each in a warning catcher +for t in ['test_rename', 'test_get_numeric_data', + 'test_get_default', 'test_nonzero', + 'test_downcast', 'test_constructor_compound_dtypes', + 'test_head_tail', + 'test_size_compat', 'test_split_compat', + 'test_unexpected_keyword', + 'test_stat_unexpected_keyword', 'test_api_compat', + 'test_stat_non_defaults_args', + 'test_truncate_out_of_bounds', + 'test_metadata_propagation', 'test_copy_and_deepcopy', + 'test_sample']: + + def f(): + def tester(self): + f = getattr(super(TestPanel, self), t) + with catch_warnings(record=True): + f() + return tester + + setattr(TestPanel, t, f()) diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 2278d1fe25c7c..b99f019a8e98f 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -13,7 +13,7 @@ import traceback from datetime import datetime -from functools import wraps, partial +from functools import wraps from contextlib import contextmanager from numpy.random import randn, rand @@ -1266,14 +1266,13 @@ def assert_frame_equal(left, right, check_dtype=True, obj='DataFrame.iloc[:, {idx}]'.format(idx=i)) -def assert_panelnd_equal(left, right, - check_dtype=True, - check_panel_type=False, - check_less_precise=False, - assert_func=assert_frame_equal, - check_names=False, - by_blocks=False, - obj='Panel'): +def assert_panel_equal(left, right, + check_dtype=True, + check_panel_type=False, + check_less_precise=False, + check_names=False, + by_blocks=False, + obj='Panel'): """Check that left and right Panels are equal. Parameters @@ -1288,7 +1287,6 @@ def assert_panelnd_equal(left, right, Specify comparison precision. Only used when check_exact is False. 5 digits (False) or 3 digits (True) after decimal points are compared. If int, then specify the digits to compare - assert_func : function for comparing data check_names : bool, default True Whether to check the Index names attribute. by_blocks : bool, default False @@ -1322,19 +1320,15 @@ def assert_panelnd_equal(left, right, assert item in right, msg litem = left.iloc[i] ritem = right.iloc[i] - assert_func(litem, ritem, check_less_precise=check_less_precise) + assert_frame_equal(litem, ritem, + check_less_precise=check_less_precise, + check_names=check_names) for i, item in enumerate(right._get_axis(0)): msg = "non-matching item (left) '{item}'".format(item=item) assert item in left, msg -# TODO: strangely check_names fails in py3 ? -_panel_frame_equal = partial(assert_frame_equal, check_names=False) -assert_panel_equal = partial(assert_panelnd_equal, - assert_func=_panel_frame_equal) - - # ----------------------------------------------------------------------------- # Sparse
xref #19059
https://api.github.com/repos/pandas-dev/pandas/pulls/19068
2018-01-04T02:15:47Z
2018-01-04T05:52:50Z
2018-01-04T05:52:50Z
2018-01-04T05:52:50Z
DEPR: removing to_long and toLong, deprecated in 0.7.0
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index ea4245cb3281e..c8fe00eb5521f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -253,6 +253,7 @@ Removal of prior version deprecations/changes - ``pandas.tseries.frequencies.get_standard_freq`` has been removed in favor of ``pandas.tseries.frequencies.to_offset(freq).rule_code`` (:issue:`13874`) - The ``freqstr`` keyword has been removed from ``pandas.tseries.frequencies.to_offset`` in favor of ``freq`` (:issue:`13874`) - The ``Panel4D`` and ``PanelND`` classes have been removed (:issue:`13776`) +- The ``Panel``class has dropped the ``to_long``and ``toLong`` methods (:issue:`19077`) .. _whatsnew_0230.performance: diff --git a/pandas/core/panel.py b/pandas/core/panel.py index b7111a6d0d5bf..1937a57939c87 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -996,9 +996,6 @@ def construct_index_parts(idx, major=True): return DataFrame(data, index=index, columns=self.items) - to_long = deprecate('to_long', to_frame) - toLong = deprecate('toLong', to_frame) - def apply(self, func, axis='major', **kwargs): """ Applies function along axis (or axes) of the Panel
- [X] closes #19077 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19067
2018-01-04T01:34:09Z
2018-01-04T15:25:02Z
2018-01-04T15:25:02Z
2018-01-04T15:25:04Z
DOC: Add clear and other parameter docs to assert_produces_warning
diff --git a/pandas/util/testing.py b/pandas/util/testing.py index b99f019a8e98f..cd9ebd3017256 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -2339,12 +2339,44 @@ def exception_matches(self, exc_type, exc_value, trace_back): def assert_produces_warning(expected_warning=Warning, filter_level="always", clear=None, check_stacklevel=True): """ - Context manager for running code that expects to raise (or not raise) - warnings. Checks that code raises the expected warning and only the - expected warning. Pass ``False`` or ``None`` to check that it does *not* - raise a warning. Defaults to ``exception.Warning``, baseclass of all - Warnings. (basically a wrapper around ``warnings.catch_warnings``). + Context manager for running code expected to either raise a specific + warning, or not raise any warnings. Verifies that the code raises the + expected warning, and that it does not raise any other unexpected + warnings. It is basically a wrapper around ``warnings.catch_warnings``. + Parameters + ---------- + expected_warning : {Warning, False, None}, default Warning + The type of Exception raised. ``exception.Warning`` is the base + class for all warnings. To check that no warning is returned, + specify ``False`` or ``None``. + filter_level : str, default "always" + Specifies whether warnings are ignored, displayed, or turned + into errors. + Valid values are: + + * "error" - turns matching warnings into exeptions + * "ignore" - discard the warning + * "always" - always emit a warning + * "default" - print the warning the first time it is generated + from each location + * "module" - print the warning the first time it is generated + from each module + * "once" - print the warning the first time it is generated + + clear : str, default None + If not ``None`` then remove any previously raised warnings from + the ``__warningsregistry__`` to ensure that no warning messages are + suppressed by this context manager. If ``None`` is specified, + the ``__warningsregistry__`` keeps track of which warnings have been + shown, and does not show them again. + check_stacklevel : bool, default True + If True, displays the line that called the function containing + the warning to show were the function is called. Otherwise, the + line that implements the function is displayed. + + Examples + -------- >>> import warnings >>> with assert_produces_warning(): ... warnings.warn(UserWarning())
- [GH18996 ] closes #18996 - Added information on `clear` parameter - Added information on other undocumented parameters (expected_warning, filter_level, check_stacklevel)
https://api.github.com/repos/pandas-dev/pandas/pulls/19066
2018-01-03T23:22:42Z
2018-01-10T00:26:31Z
2018-01-10T00:26:31Z
2018-01-10T16:29:51Z
Added ISO 8601 Duration string constructor for Timedelta
diff --git a/asv_bench/benchmarks/timedelta.py b/asv_bench/benchmarks/timedelta.py index f99f95678a0b7..1897b0287ed19 100644 --- a/asv_bench/benchmarks/timedelta.py +++ b/asv_bench/benchmarks/timedelta.py @@ -1,9 +1,40 @@ +import datetime + import numpy as np import pandas as pd from pandas import to_timedelta, Timestamp, Timedelta +class TimedeltaConstructor(object): + goal_time = 0.2 + + def time_from_int(self): + Timedelta(123456789) + + def time_from_unit(self): + Timedelta(1, unit='d') + + def time_from_components(self): + Timedelta(days=1, hours=2, minutes=3, seconds=4, milliseconds=5, + microseconds=6, nanoseconds=7) + + def time_from_datetime_timedelta(self): + Timedelta(datetime.timedelta(days=1, seconds=1)) + + def time_from_np_timedelta(self): + Timedelta(np.timedelta64(1, 'ms')) + + def time_from_string(self): + Timedelta('1 days') + + def time_from_iso_format(self): + Timedelta('P4DT12H30M5S') + + def time_from_missing(self): + Timedelta('nat') + + class ToTimedelta(object): goal_time = 0.2 diff --git a/doc/source/timedeltas.rst b/doc/source/timedeltas.rst index 6bbfb54629c4d..50cff4c7bbdfb 100644 --- a/doc/source/timedeltas.rst +++ b/doc/source/timedeltas.rst @@ -62,6 +62,14 @@ You can construct a ``Timedelta`` scalar through various arguments: pd.Timedelta('nan') pd.Timedelta('nat') + # ISO 8601 Duration strings + pd.Timedelta('P0DT0H1M0S') + pd.Timedelta('P0DT0H0M0.000000123S') + +.. versionadded:: 0.23.0 + + Added constructor for `ISO 8601 Duration`_ strings + :ref:`DateOffsets<timeseries.offsets>` (``Day, Hour, Minute, Second, Milli, Micro, Nano``) can also be used in construction. .. ipython:: python diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index c7b1cb4379700..4d806f1f05a16 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -211,6 +211,7 @@ Other API Changes - Subtracting ``NaT`` from a :class:`Series` with ``dtype='datetime64[ns]'`` returns a ``Series`` with ``dtype='timedelta64[ns]'`` instead of ``dtype='datetime64[ns]'``(:issue:`18808`) - Operations between a :class:`Series` with dtype ``dtype='datetime64[ns]'`` and a :class:`PeriodIndex` will correctly raises ``TypeError`` (:issue:`18850`) - Subtraction of :class:`Series` with timezone-aware ``dtype='datetime64[ns]'`` with mis-matched timezones will raise ``TypeError`` instead of ``ValueError`` (issue:`18817`) +- The default ``Timedelta`` constructor now accepts an ``ISO 8601 Duration`` string as an argument (:issue:`19040`) .. _whatsnew_0230.deprecations: diff --git a/pandas/_libs/tslibs/timedeltas.pyx b/pandas/_libs/tslibs/timedeltas.pyx index b37e5dc620260..af3fa738fad14 100644 --- a/pandas/_libs/tslibs/timedeltas.pyx +++ b/pandas/_libs/tslibs/timedeltas.pyx @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # cython: profile=False import collections +import re import sys cdef bint PY3 = (sys.version_info[0] >= 3) @@ -506,6 +507,57 @@ def _binary_op_method_timedeltalike(op, name): # ---------------------------------------------------------------------- # Timedelta Construction +iso_pater = re.compile(r"""P + (?P<days>-?[0-9]*)DT + (?P<hours>[0-9]{1,2})H + (?P<minutes>[0-9]{1,2})M + (?P<seconds>[0-9]{0,2}) + (\. + (?P<milliseconds>[0-9]{1,3}) + (?P<microseconds>[0-9]{0,3}) + (?P<nanoseconds>[0-9]{0,3}) + )?S""", re.VERBOSE) + + +cdef int64_t parse_iso_format_string(object iso_fmt) except? -1: + """ + Extracts and cleanses the appropriate values from a match object with + groups for each component of an ISO 8601 duration + + Parameters + ---------- + iso_fmt: + ISO 8601 Duration formatted string + + Returns + ------- + ns: int64_t + Precision in nanoseconds of matched ISO 8601 duration + + Raises + ------ + ValueError + If ``iso_fmt`` cannot be parsed + """ + + cdef int64_t ns = 0 + + match = re.match(iso_pater, iso_fmt) + if match: + match_dict = match.groupdict(default='0') + for comp in ['milliseconds', 'microseconds', 'nanoseconds']: + match_dict[comp] = '{:0<3}'.format(match_dict[comp]) + + for k, v in match_dict.items(): + ns += timedelta_from_spec(v, '0', k) + + else: + raise ValueError("Invalid ISO 8601 Duration format - " + "{}".format(iso_fmt)) + + return ns + + cdef _to_py_int_float(v): # Note: This used to be defined inside Timedelta.__new__ # but cython will not allow `cdef` functions to be defined dynamically. @@ -825,7 +877,11 @@ class Timedelta(_Timedelta): if isinstance(value, Timedelta): value = value.value elif is_string_object(value): - value = np.timedelta64(parse_timedelta_string(value)) + if len(value) > 0 and value[0] == 'P': + value = parse_iso_format_string(value) + else: + value = parse_timedelta_string(value) + value = np.timedelta64(value) elif PyDelta_Check(value): value = convert_to_timedelta64(value, 'ns') elif is_timedelta64_object(value): diff --git a/pandas/tests/scalar/test_timedelta.py b/pandas/tests/scalar/test_timedelta.py index c260700c9473b..310555c19ea99 100644 --- a/pandas/tests/scalar/test_timedelta.py +++ b/pandas/tests/scalar/test_timedelta.py @@ -853,3 +853,29 @@ def test_isoformat(self): result = Timedelta(minutes=1).isoformat() expected = 'P0DT0H1M0S' assert result == expected + + @pytest.mark.parametrize('fmt,exp', [ + ('P6DT0H50M3.010010012S', Timedelta(days=6, minutes=50, seconds=3, + milliseconds=10, microseconds=10, + nanoseconds=12)), + ('P-6DT0H50M3.010010012S', Timedelta(days=-6, minutes=50, seconds=3, + milliseconds=10, microseconds=10, + nanoseconds=12)), + ('P4DT12H30M5S', Timedelta(days=4, hours=12, minutes=30, seconds=5)), + ('P0DT0H0M0.000000123S', Timedelta(nanoseconds=123)), + ('P0DT0H0M0.00001S', Timedelta(microseconds=10)), + ('P0DT0H0M0.001S', Timedelta(milliseconds=1)), + ('P0DT0H1M0S', Timedelta(minutes=1)), + ('P1DT25H61M61S', Timedelta(days=1, hours=25, minutes=61, seconds=61)) + ]) + def test_iso_constructor(self, fmt, exp): + assert Timedelta(fmt) == exp + + @pytest.mark.parametrize('fmt', [ + 'PPPPPPPPPPPP', 'PDTHMS', 'P0DT999H999M999S', + 'P1DT0H0M0.0000000000000S', 'P1DT0H0M00000000000S', + 'P1DT0H0M0.S']) + def test_iso_constructor_raises(self, fmt): + with tm.assert_raises_regex(ValueError, 'Invalid ISO 8601 Duration ' + 'format - {}'.format(fmt)): + Timedelta(fmt)
- [X] closes #19040 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry ASV results below ``` · Running 16 total benchmarks (2 commits * 1 environments * 8 benchmarks) [ 0.00%] · For pandas commit hash 6eda5188: [ 0.00%] ·· Building for conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt... [ 0.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt [ 6.25%] ··· Running timedelta.TimedeltaConstructor.time_from_components 14.6±0.2μs [ 12.50%] ··· Running timedelta.TimedeltaConstructor.time_from_datetime_timedelta 7.17±0.1μs [ 18.75%] ··· Running timedelta.TimedeltaConstructor.time_from_int 5.86±0.08μs [ 25.00%] ··· Running timedelta.TimedeltaConstructor.time_from_iso_format 20.7±0.4μs [ 31.25%] ··· Running timedelta.TimedeltaConstructor.time_from_missing 2.11±0.03μs [ 37.50%] ··· Running timedelta.TimedeltaConstructor.time_from_np_timedelta 4.79±0.1μs [ 43.75%] ··· Running timedelta.TimedeltaConstructor.time_from_string 6.32±0.08μs [ 50.00%] ··· Running timedelta.TimedeltaConstructor.time_from_unit 6.22±0.1μs [ 50.00%] · For pandas commit hash 6552718d: [ 50.00%] ·· Building for conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt... [ 50.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt [ 56.25%] ··· Running timedelta.TimedeltaConstructor.time_from_components 14.8±0.2μs [ 62.50%] ··· Running timedelta.TimedeltaConstructor.time_from_datetime_timedelta 7.28±0.09μs [ 68.75%] ··· Running timedelta.TimedeltaConstructor.time_from_int 5.67±0.1μs [ 75.00%] ··· Running timedelta.TimedeltaConstructor.time_from_iso_format failed [ 81.25%] ··· Running timedelta.TimedeltaConstructor.time_from_missing 2.09±0.05μs [ 87.50%] ··· Running timedelta.TimedeltaConstructor.time_from_np_timedelta 4.74±0.04μs [ 93.75%] ··· Running timedelta.TimedeltaConstructor.time_from_string 5.97±0.1μs [100.00%] ··· Running timedelta.TimedeltaConstructor.time_from_unit 6.29±0.1μs SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY. ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19065
2018-01-03T22:58:39Z
2018-01-05T14:12:59Z
2018-01-05T14:12:58Z
2018-01-05T14:42:14Z
Revert "CI: fix conda version (#19025)"
diff --git a/ci/install_travis.sh b/ci/install_travis.sh index c92da8d4774e1..272e7f2e05d14 100755 --- a/ci/install_travis.sh +++ b/ci/install_travis.sh @@ -48,12 +48,7 @@ echo echo "[update conda]" conda config --set ssl_verify false || exit 1 conda config --set quiet true --set always_yes true --set changeps1 false || exit 1 - -# TODO(jreback), fix conoda version -echo -echo "[conda version]" -conda install conda=4.4.4 -# conda update -q conda +conda update -q conda if [ "$CONDA_BUILD_TEST" ]; then echo
This reverts commit c19bdc98fa3f856e2e6b3495bbab88b587801987.
https://api.github.com/repos/pandas-dev/pandas/pulls/19062
2018-01-03T20:38:15Z
2018-01-03T23:54:51Z
2018-01-03T23:54:51Z
2018-01-03T23:55:12Z
CLN: Remove the Panel4D and PanelND classes
diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst index da54a6a5f5c02..b6690eff89836 100644 --- a/doc/source/cookbook.rst +++ b/doc/source/cookbook.rst @@ -411,16 +411,6 @@ Levels `Flatten Hierarchical columns <http://stackoverflow.com/questions/14507794/python-pandas-how-to-flatten-a-hierarchical-index-in-columns>`__ -panelnd -******* - -The :ref:`panelnd<dsintro.panelnd>` docs. - -`Construct a 5D panelnd -<http://stackoverflow.com/questions/18748598/why-my-panelnd-factory-throwing-a-keyerror>`__ - -.. _cookbook.missing_data: - Missing Data ------------ diff --git a/doc/source/dsintro.rst b/doc/source/dsintro.rst index 7237dc5f1200b..d7650b6b0938f 100644 --- a/doc/source/dsintro.rst +++ b/doc/source/dsintro.rst @@ -982,21 +982,3 @@ Alternatively, one can convert to an xarray ``DataArray``. p.to_xarray() You can see the full-documentation for the `xarray package <http://xarray.pydata.org/en/stable/>`__. - -.. _dsintro.panelnd: -.. _dsintro.panel4d: - -Panel4D and PanelND (Deprecated) --------------------------------- - -.. warning:: - - In 0.19.0 ``Panel4D`` and ``PanelND`` are deprecated and will be removed in - a future version. The recommended way to represent these types of - n-dimensional data are with the - `xarray package <http://xarray.pydata.org/en/stable/>`__. - Pandas provides a :meth:`~Panel4D.to_xarray` method to automate - this conversion. - -See the `docs of a previous version <http://pandas.pydata.org/pandas-docs/version/0.18.1/dsintro.html#panel4d-experimental>`__ -for documentation on these objects. diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 6a48abb6c6592..8008d935c30f1 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -249,6 +249,7 @@ Removal of prior version deprecations/changes - The ``Series``, ``Categorical``, and ``Index`` classes have dropped the ``reshape`` method (:issue:`13012`) - ``pandas.tseries.frequencies.get_standard_freq`` has been removed in favor of ``pandas.tseries.frequencies.to_offset(freq).rule_code`` (:issue:`13874`) - The ``freqstr`` keyword has been removed from ``pandas.tseries.frequencies.to_offset`` in favor of ``freq`` (:issue:`13874`) +- The ``Panel4D`` and ``PanelND`` classes have been removed (:issue:`13776`) .. _whatsnew_0230.performance: diff --git a/pandas/core/api.py b/pandas/core/api.py index bff42090d689e..b228a97c99074 100644 --- a/pandas/core/api.py +++ b/pandas/core/api.py @@ -22,7 +22,6 @@ from pandas.core.series import Series from pandas.core.frame import DataFrame from pandas.core.panel import Panel, WidePanel -from pandas.core.panel4d import Panel4D # TODO: Remove import when statsmodels updates #18264 from pandas.core.reshape.reshape import get_dummies diff --git a/pandas/core/dtypes/generic.py b/pandas/core/dtypes/generic.py index 618bcf6495155..629d88aa7f086 100644 --- a/pandas/core/dtypes/generic.py +++ b/pandas/core/dtypes/generic.py @@ -43,7 +43,7 @@ def _check(cls, inst): ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series", )) ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe", )) -ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel", "panel4d")) +ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel",)) ABCSparseSeries = create_pandas_abc_type("ABCSparseSeries", "_subtyp", ('sparse_series', 'sparse_time_series')) diff --git a/pandas/core/generic.py b/pandas/core/generic.py index 84799d12df0c4..1ab7c50d86c98 100644 --- a/pandas/core/generic.py +++ b/pandas/core/generic.py @@ -7146,7 +7146,7 @@ def describe(self, percentiles=None, include=None, exclude=None): DataFrame.select_dtypes """ if self.ndim >= 3: - msg = "describe is not implemented on Panel or PanelND objects." + msg = "describe is not implemented on Panel objects." raise NotImplementedError(msg) elif self.ndim == 2 and self.columns.size == 0: raise ValueError("Cannot describe a DataFrame without columns") diff --git a/pandas/core/panel.py b/pandas/core/panel.py index 7ec177b03aeb1..b7111a6d0d5bf 100644 --- a/pandas/core/panel.py +++ b/pandas/core/panel.py @@ -1224,9 +1224,6 @@ def reindex(self, *args, **kwargs): kwargs['minor_axis'] = minor axes = validate_axis_style_args(self, args, kwargs, 'labels', 'reindex') - if self.ndim >= 4: - # Hack for PanelND - axes = {} kwargs.update(axes) kwargs.pop('axis', None) kwargs.pop('labels', None) diff --git a/pandas/core/panel4d.py b/pandas/core/panel4d.py deleted file mode 100644 index 0fac720302cfb..0000000000000 --- a/pandas/core/panel4d.py +++ /dev/null @@ -1,99 +0,0 @@ -""" Panel4D: a 4-d dict like collection of panels """ - -import warnings -from pandas.core.generic import NDFrame -from pandas.core.panelnd import create_nd_panel_factory -from pandas.core.panel import Panel -from pandas.util._validators import validate_axis_style_args - - -Panel4D = create_nd_panel_factory(klass_name='Panel4D', - orders=['labels', 'items', 'major_axis', - 'minor_axis'], - slices={'labels': 'labels', - 'items': 'items', - 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, - slicer=Panel, - aliases={'major': 'major_axis', - 'minor': 'minor_axis'}, stat_axis=2, - ns=dict(__doc__=""" - Panel4D is a 4-Dimensional named container very much like a Panel, but - having 4 named dimensions. It is intended as a test bed for more - N-Dimensional named containers. - - .. deprecated:: 0.19.0 - The recommended way to represent these types of n-dimensional data - are with the `xarray package <http://xarray.pydata.org/en/stable/>`__. - Pandas provides a `.to_xarray()` method to automate this conversion. - - Parameters - ---------- - data : ndarray (labels x items x major x minor), or dict of Panels - - labels : Index or array-like : axis=0 - items : Index or array-like : axis=1 - major_axis : Index or array-like: axis=2 - minor_axis : Index or array-like: axis=3 - - dtype : dtype, default None - Data type to force, otherwise infer - copy : boolean, default False - Copy data from inputs. Only affects DataFrame / 2d ndarray input - """)) - - -def panel4d_init(self, data=None, labels=None, items=None, major_axis=None, - minor_axis=None, copy=False, dtype=None): - - # deprecation GH13564 - warnings.warn("\nPanel4D is deprecated and will be removed in a " - "future version.\nThe recommended way to represent " - "these types of n-dimensional data are with\n" - "the `xarray package " - "<http://xarray.pydata.org/en/stable/>`__.\n" - "Pandas provides a `.to_xarray()` method to help " - "automate this conversion.\n", - FutureWarning, stacklevel=2) - self._init_data(data=data, labels=labels, items=items, - major_axis=major_axis, minor_axis=minor_axis, copy=copy, - dtype=dtype) - - -def panel4d_reindex(self, labs=None, labels=None, items=None, major_axis=None, - minor_axis=None, axis=None, **kwargs): - # Hack for reindex_axis deprecation - # Ha, we used labels for two different things - # I think this will work still. - if labs is None: - args = () - else: - args = (labs,) - kwargs_ = dict(labels=labels, - items=items, - major_axis=major_axis, - minor_axis=minor_axis, - axis=axis) - kwargs_ = {k: v for k, v in kwargs_.items() if v is not None} - # major = kwargs.pop("major", None) - # minor = kwargs.pop('minor', None) - - # if major is not None: - # if kwargs.get("major_axis"): - # raise TypeError("Cannot specify both 'major' and 'major_axis'") - # kwargs_['major_axis'] = major - # if minor is not None: - # if kwargs.get("minor_axis"): - # raise TypeError("Cannot specify both 'minor' and 'minor_axis'") - # kwargs_['minor_axis'] = minor - - if axis is not None: - kwargs_['axis'] = axis - - axes = validate_axis_style_args(self, args, kwargs_, 'labs', 'reindex') - kwargs.update(axes) - return NDFrame.reindex(self, **kwargs) - - -Panel4D.__init__ = panel4d_init -Panel4D.reindex = panel4d_reindex diff --git a/pandas/core/panelnd.py b/pandas/core/panelnd.py deleted file mode 100644 index 80ee680d2b9d2..0000000000000 --- a/pandas/core/panelnd.py +++ /dev/null @@ -1,132 +0,0 @@ -""" Factory methods to create N-D panels """ - -import warnings -from pandas.compat import zip -import pandas.compat as compat - - -def create_nd_panel_factory(klass_name, orders, slices, slicer, aliases=None, - stat_axis=2, info_axis=0, ns=None): - """ manufacture a n-d class: - - .. deprecated:: 0.19.0 - The recommended way to represent these types of n-dimensional data - are with the `xarray package <http://xarray.pydata.org/en/stable/>`__. - Pandas provides a `.to_xarray()` method to automate this conversion. - - Parameters - ---------- - klass_name : the klass name - orders : the names of the axes in order (highest to lowest) - slices : a dictionary that defines how the axes map to the slice axis - slicer : the class representing a slice of this panel - aliases : a dictionary defining aliases for various axes - default = { major : major_axis, minor : minor_axis } - stat_axis : the default statistic axis default = 2 - info_axis : the info axis - - Returns - ------- - a class object representing this panel - """ - - # if slicer is a name, get the object - if isinstance(slicer, compat.string_types): - import pandas - try: - slicer = getattr(pandas, slicer) - except: - raise Exception("cannot create this slicer [%s]" % slicer) - - # build the klass - ns = {} if not ns else ns - klass = type(klass_name, (slicer, ), ns) - - # setup the axes - klass._setup_axes(axes=orders, info_axis=info_axis, stat_axis=stat_axis, - aliases=aliases, slicers=slices) - - klass._constructor_sliced = slicer - - # define the methods #### - def __init__(self, *args, **kwargs): - - # deprecation GH13564 - warnings.warn("\n{klass} is deprecated and will be removed in a " - "future version.\nThe recommended way to represent " - "these types of n-dimensional data are with the\n" - "`xarray package " - "<http://xarray.pydata.org/en/stable/>`__.\n" - "Pandas provides a `.to_xarray()` method to help " - "automate this conversion.\n".format( - klass=self.__class__.__name__), - FutureWarning, stacklevel=2) - - if not (kwargs.get('data') or len(args)): - raise Exception("must supply at least a data argument to [%s]" % - klass_name) - if 'copy' not in kwargs: - kwargs['copy'] = False - if 'dtype' not in kwargs: - kwargs['dtype'] = None - self._init_data(*args, **kwargs) - - klass.__init__ = __init__ - - def _get_plane_axes_index(self, axis): - """ return the sliced index for this object """ - - # TODO: axis_name is not used, remove? - axis_name = self._get_axis_name(axis) # noqa - index = self._AXIS_ORDERS.index(axis) - - planes = [] - if index: - planes.extend(self._AXIS_ORDERS[0:index]) - if index != self._AXIS_LEN: - planes.extend(self._AXIS_ORDERS[index + 1:]) - - return planes - - klass._get_plane_axes_index = _get_plane_axes_index - - def _combine(self, other, func, axis=0): - if isinstance(other, klass): - return self._combine_with_constructor(other, func) - return super(klass, self)._combine(other, func, axis=axis) - - klass._combine = _combine - - def _combine_with_constructor(self, other, func): - - # combine labels to form new axes - new_axes = [] - for a in self._AXIS_ORDERS: - new_axes.append(getattr(self, a).union(getattr(other, a))) - - # reindex: could check that everything's the same size, but forget it - d = {a: ax for a, ax in zip(self._AXIS_ORDERS, new_axes)} - d['copy'] = False - this = self.reindex(**d) - other = other.reindex(**d) - - result_values = func(this.values, other.values) - - return self._constructor(result_values, **d) - - klass._combine_with_constructor = _combine_with_constructor - - # set as NonImplemented operations which we don't support - for f in ['to_frame', 'to_excel', 'to_sparse', 'groupby', 'join', 'filter', - 'dropna', 'shift']: - - def func(self, *args, **kwargs): - raise NotImplementedError("this operation is not supported") - - setattr(klass, f, func) - - # add the aggregate operations - klass._add_aggregate_operations() - klass._add_numeric_operations() - - return klass diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py index efe6ab6c18868..72543bb6f825e 100644 --- a/pandas/io/pytables.py +++ b/pandas/io/pytables.py @@ -24,7 +24,7 @@ from pandas.core.dtypes.missing import array_equivalent import numpy as np -from pandas import (Series, DataFrame, Panel, Panel4D, Index, +from pandas import (Series, DataFrame, Panel, Index, MultiIndex, Int64Index, isna, concat, to_datetime, SparseSeries, SparseDataFrame, PeriodIndex, DatetimeIndex, TimedeltaIndex) @@ -180,7 +180,6 @@ class DuplicateWarning(Warning): DataFrame: u('frame'), SparseDataFrame: u('sparse_frame'), Panel: u('wide'), - Panel4D: u('ndim'), } # storer class map @@ -203,7 +202,6 @@ class DuplicateWarning(Warning): u('appendable_frame'): 'AppendableFrameTable', u('appendable_multiframe'): 'AppendableMultiFrameTable', u('appendable_panel'): 'AppendablePanelTable', - u('appendable_ndim'): 'AppendableNDimTable', u('worm'): 'WORMTable', u('legacy_frame'): 'LegacyFrameTable', u('legacy_panel'): 'LegacyPanelTable', @@ -212,8 +210,7 @@ class DuplicateWarning(Warning): # axes map _AXES_MAP = { DataFrame: [0], - Panel: [1, 2], - Panel4D: [1, 2, 3], + Panel: [1, 2] } # register our configuration options @@ -924,7 +921,7 @@ def append(self, key, value, format=None, append=True, columns=None, Parameters ---------- key : object - value : {Series, DataFrame, Panel, Panel4D} + value : {Series, DataFrame, Panel} format: 'table' is the default table(t) : table format Write as a PyTables Table structure which may perform @@ -4346,14 +4343,6 @@ def is_transposed(self): return self.data_orientation != tuple(range(self.ndim)) -class AppendableNDimTable(AppendablePanelTable): - - """ suppor the new appendable table formats """ - table_type = u('appendable_ndim') - ndim = 4 - obj_type = Panel4D - - def _reindex_axis(obj, axis, labels, other=None): ax = obj._get_axis(axis) labels = _ensure_index(labels) diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index e47f1919faaf5..8962eb90be828 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -51,8 +51,7 @@ class TestPDApi(Base): 'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex'] # these are already deprecated; awaiting removal - deprecated_classes = ['WidePanel', 'Panel4D', 'TimeGrouper', - 'Expr', 'Term'] + deprecated_classes = ['WidePanel', 'TimeGrouper', 'Expr', 'Term'] # these should be deprecated in the future deprecated_classes_in_future = ['Panel'] diff --git a/pandas/tests/dtypes/test_missing.py b/pandas/tests/dtypes/test_missing.py index fd2c63ef5b37e..4f208bc352c70 100644 --- a/pandas/tests/dtypes/test_missing.py +++ b/pandas/tests/dtypes/test_missing.py @@ -99,13 +99,6 @@ def test_isna_isnull(self, isna_f): expected = p.apply(isna_f) tm.assert_panel_equal(result, expected) - # panel 4d - with catch_warnings(record=True): - for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]: - result = isna_f(p) - expected = p.apply(isna_f) - tm.assert_panel4d_equal(result, expected) - def test_isna_lists(self): result = isna([[False]]) exp = np.array([[False]]) diff --git a/pandas/tests/frame/test_query_eval.py b/pandas/tests/frame/test_query_eval.py index 55aeaf6e77be1..a226f8de3c8bd 100644 --- a/pandas/tests/frame/test_query_eval.py +++ b/pandas/tests/frame/test_query_eval.py @@ -366,12 +366,6 @@ def test_raise_on_panel_with_multiindex(self, parser, engine): with pytest.raises(NotImplementedError): pd.eval('p + 1', parser=parser, engine=engine) - def test_raise_on_panel4d_with_multiindex(self, parser, engine): - p4d = tm.makePanel4D(7) - p4d.items = tm.makeCustomIndex(len(p4d.items), nlevels=2) - with pytest.raises(NotImplementedError): - pd.eval('p4d + 1', parser=parser, engine=engine) - @td.skip_if_no_ne class TestDataFrameQueryNumExprPandas(object): diff --git a/pandas/tests/generic/test_generic.py b/pandas/tests/generic/test_generic.py index a37c1649e5677..3868bdf7d4620 100644 --- a/pandas/tests/generic/test_generic.py +++ b/pandas/tests/generic/test_generic.py @@ -10,8 +10,7 @@ from pandas.core.dtypes.common import is_scalar from pandas import (Series, DataFrame, Panel, - date_range, Panel4D, - MultiIndex) + date_range, MultiIndex) import pandas.io.formats.printing as printing @@ -726,9 +725,6 @@ def test_squeeze(self): with catch_warnings(record=True): for p in [tm.makePanel()]: tm.assert_panel_equal(p.squeeze(), p) - with catch_warnings(record=True): - for p4d in [tm.makePanel4D()]: - tm.assert_panel4d_equal(p4d.squeeze(), p4d) # squeezing df = tm.makeTimeDataFrame().reindex(columns=['A']) @@ -741,14 +737,6 @@ def test_squeeze(self): p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A']) tm.assert_series_equal(p.squeeze(), p.loc['ItemA', :, 'A']) - with catch_warnings(record=True): - p4d = tm.makePanel4D().reindex(labels=['label1']) - tm.assert_panel_equal(p4d.squeeze(), p4d['label1']) - - with catch_warnings(record=True): - p4d = tm.makePanel4D().reindex(labels=['label1'], items=['ItemA']) - tm.assert_frame_equal(p4d.squeeze(), p4d.loc['label1', 'ItemA']) - # don't fail with 0 length dimensions GH11229 & GH8999 empty_series = Series([], name='five') empty_frame = DataFrame([empty_series]) @@ -796,13 +784,6 @@ def test_transpose(self): tm.assert_raises_regex(TypeError, msg, p.transpose, 2, 0, 1, axes=(2, 0, 1)) - with catch_warnings(record=True): - for p4d in [tm.makePanel4D()]: - tm.assert_panel4d_equal(p4d.transpose(2, 0, 3, 1) - .transpose(1, 3, 0, 2), p4d) - tm.assert_raises_regex(TypeError, msg, p4d.transpose, - 2, 0, 3, 1, axes=(2, 0, 3, 1)) - def test_numpy_transpose(self): msg = "the 'axes' parameter is not supported" @@ -824,12 +805,6 @@ def test_numpy_transpose(self): np.transpose(p, axes=(2, 0, 1)), axes=(1, 2, 0)), p) - with catch_warnings(record=True): - p4d = tm.makePanel4D() - tm.assert_panel4d_equal(np.transpose( - np.transpose(p4d, axes=(2, 0, 3, 1)), - axes=(1, 3, 0, 2)), p4d) - def test_take(self): indices = [1, 5, -2, 6, 3, -1] for s in [tm.makeFloatSeries(), tm.makeStringSeries(), @@ -855,16 +830,6 @@ def test_take(self): minor_axis=p.minor_axis) tm.assert_panel_equal(out, expected) - with catch_warnings(record=True): - for p4d in [tm.makePanel4D()]: - out = p4d.take(indices) - expected = Panel4D(data=p4d.values.take(indices, axis=0), - labels=p4d.labels.take(indices), - major_axis=p4d.major_axis, - minor_axis=p4d.minor_axis, - items=p4d.items) - tm.assert_panel4d_equal(out, expected) - def test_take_invalid_kwargs(self): indices = [-3, 2, 0, 1] s = tm.makeFloatSeries() @@ -872,9 +837,8 @@ def test_take_invalid_kwargs(self): with catch_warnings(record=True): p = tm.makePanel() - p4d = tm.makePanel4D() - for obj in (s, df, p, p4d): + for obj in (s, df, p): msg = r"take\(\) got an unexpected keyword argument 'foo'" tm.assert_raises_regex(TypeError, msg, obj.take, indices, foo=2) diff --git a/pandas/tests/generic/test_panel.py b/pandas/tests/generic/test_panel.py index 1c8be94d6eac3..720f471a0ebd4 100644 --- a/pandas/tests/generic/test_panel.py +++ b/pandas/tests/generic/test_panel.py @@ -3,11 +3,8 @@ from warnings import catch_warnings -import pytest - -from pandas import Panel, Panel4D +from pandas import Panel from pandas.util.testing import (assert_panel_equal, - assert_panel4d_equal, assert_almost_equal) import pandas.util.testing as tm @@ -35,61 +32,3 @@ def test_to_xarray(self): # idempotency assert_panel_equal(result.to_pandas(), p) - - -class TestPanel4D(Generic): - _typ = Panel4D - _comparator = lambda self, x, y: assert_panel4d_equal(x, y, by_blocks=True) - - def test_sample(self): - pytest.skip("sample on Panel4D") - - @td.skip_if_no('xarray', min_version='0.7.0') - def test_to_xarray(self): - from xarray import DataArray - - with catch_warnings(record=True): - p = tm.makePanel4D() - - result = p.to_xarray() - assert isinstance(result, DataArray) - assert len(result.coords) == 4 - assert_almost_equal(list(result.coords.keys()), - ['labels', 'items', 'major_axis', - 'minor_axis']) - assert len(result.dims) == 4 - - # non-convertible - pytest.raises(ValueError, lambda: result.to_pandas()) - - -# run all the tests, but wrap each in a warning catcher -for t in ['test_rename', 'test_get_numeric_data', - 'test_get_default', 'test_nonzero', - 'test_downcast', 'test_constructor_compound_dtypes', - 'test_head_tail', - 'test_size_compat', 'test_split_compat', - 'test_unexpected_keyword', - 'test_stat_unexpected_keyword', 'test_api_compat', - 'test_stat_non_defaults_args', - 'test_truncate_out_of_bounds', - 'test_metadata_propagation', 'test_copy_and_deepcopy', - 'test_sample']: - - def f(): - def tester(self): - f = getattr(super(TestPanel, self), t) - with catch_warnings(record=True): - f() - return tester - - setattr(TestPanel, t, f()) - - def f(): - def tester(self): - f = getattr(super(TestPanel4D, self), t) - with catch_warnings(record=True): - f() - return tester - - setattr(TestPanel4D, t, f()) diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py index b40350ada546c..04da6da74059b 100644 --- a/pandas/tests/io/test_pytables.py +++ b/pandas/tests/io/test_pytables.py @@ -11,15 +11,14 @@ import numpy as np import pandas as pd -from pandas import (Series, DataFrame, Panel, Panel4D, MultiIndex, Int64Index, +from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index, RangeIndex, Categorical, bdate_range, date_range, timedelta_range, Index, DatetimeIndex, isna, compat, concat, Timestamp) import pandas.util.testing as tm import pandas.util._test_decorators as td -from pandas.util.testing import (assert_panel4d_equal, - assert_panel_equal, +from pandas.util.testing import (assert_panel_equal, assert_frame_equal, assert_series_equal, set_timezone) @@ -888,30 +887,6 @@ def test_append(self): store.append('wp1', wp.iloc[:, 10:, :]) assert_panel_equal(store['wp1'], wp) - # ndim - p4d = tm.makePanel4D() - _maybe_remove(store, 'p4d') - store.append('p4d', p4d.iloc[:, :, :10, :]) - store.append('p4d', p4d.iloc[:, :, 10:, :]) - assert_panel4d_equal(store['p4d'], p4d) - - # test using axis labels - _maybe_remove(store, 'p4d') - store.append('p4d', p4d.iloc[:, :, :10, :], axes=[ - 'items', 'major_axis', 'minor_axis']) - store.append('p4d', p4d.iloc[:, :, 10:, :], axes=[ - 'items', 'major_axis', 'minor_axis']) - assert_panel4d_equal(store['p4d'], p4d) - - # test using different number of items on each axis - p4d2 = p4d.copy() - p4d2['l4'] = p4d['l1'] - p4d2['l5'] = p4d['l1'] - _maybe_remove(store, 'p4d2') - store.append( - 'p4d2', p4d2, axes=['items', 'major_axis', 'minor_axis']) - assert_panel4d_equal(store['p4d2'], p4d2) - # test using differt order of items on the non-index axes _maybe_remove(store, 'wp1') wp_append1 = wp.iloc[:, :10, :] @@ -1308,76 +1283,6 @@ def test_append_with_different_block_ordering(self): df['float_3'] = Series([1.] * len(df), dtype='float64') pytest.raises(ValueError, store.append, 'df', df) - def test_ndim_indexables(self): - # test using ndim tables in new ways - - with catch_warnings(record=True): - with ensure_clean_store(self.path) as store: - - p4d = tm.makePanel4D() - - def check_indexers(key, indexers): - for i, idx in enumerate(indexers): - descr = getattr(store.root, key).table.description - assert getattr(descr, idx)._v_pos == i - - # append then change (will take existing schema) - indexers = ['items', 'major_axis', 'minor_axis'] - - _maybe_remove(store, 'p4d') - store.append('p4d', p4d.iloc[:, :, :10, :], axes=indexers) - store.append('p4d', p4d.iloc[:, :, 10:, :]) - assert_panel4d_equal(store.select('p4d'), p4d) - check_indexers('p4d', indexers) - - # same as above, but try to append with different axes - _maybe_remove(store, 'p4d') - store.append('p4d', p4d.iloc[:, :, :10, :], axes=indexers) - store.append('p4d', p4d.iloc[:, :, 10:, :], axes=[ - 'labels', 'items', 'major_axis']) - assert_panel4d_equal(store.select('p4d'), p4d) - check_indexers('p4d', indexers) - - # pass incorrect number of axes - _maybe_remove(store, 'p4d') - pytest.raises(ValueError, store.append, 'p4d', p4d.iloc[ - :, :, :10, :], axes=['major_axis', 'minor_axis']) - - # different than default indexables #1 - indexers = ['labels', 'major_axis', 'minor_axis'] - _maybe_remove(store, 'p4d') - store.append('p4d', p4d.iloc[:, :, :10, :], axes=indexers) - store.append('p4d', p4d.iloc[:, :, 10:, :]) - assert_panel4d_equal(store['p4d'], p4d) - check_indexers('p4d', indexers) - - # different than default indexables #2 - indexers = ['major_axis', 'labels', 'minor_axis'] - _maybe_remove(store, 'p4d') - store.append('p4d', p4d.iloc[:, :, :10, :], axes=indexers) - store.append('p4d', p4d.iloc[:, :, 10:, :]) - assert_panel4d_equal(store['p4d'], p4d) - check_indexers('p4d', indexers) - - # partial selection - result = store.select('p4d', ['labels=l1']) - expected = p4d.reindex(labels=['l1']) - assert_panel4d_equal(result, expected) - - # partial selection2 - result = store.select( - 'p4d', "labels='l1' and items='ItemA' and minor_axis='B'") - expected = p4d.reindex( - labels=['l1'], items=['ItemA'], minor_axis=['B']) - assert_panel4d_equal(result, expected) - - # non-existent partial selection - result = store.select( - 'p4d', "labels='l1' and items='Item1' and minor_axis='B'") - expected = p4d.reindex(labels=['l1'], items=[], - minor_axis=['B']) - assert_panel4d_equal(result, expected) - def test_append_with_strings(self): with ensure_clean_store(self.path) as store: @@ -1972,27 +1877,14 @@ def test_pass_spec_to_storer(self): def test_append_misc(self): with ensure_clean_store(self.path) as store: + df = tm.makeDataFrame() + store.append('df', df, chunksize=1) + result = store.select('df') + tm.assert_frame_equal(result, df) - with catch_warnings(record=True): - - # unsupported data types for non-tables - p4d = tm.makePanel4D() - pytest.raises(TypeError, store.put, 'p4d', p4d) - - # unsupported data types - pytest.raises(TypeError, store.put, 'abc', None) - pytest.raises(TypeError, store.put, 'abc', '123') - pytest.raises(TypeError, store.put, 'abc', 123) - pytest.raises(TypeError, store.put, 'abc', np.arange(5)) - - df = tm.makeDataFrame() - store.append('df', df, chunksize=1) - result = store.select('df') - tm.assert_frame_equal(result, df) - - store.append('df1', df, expectedrows=10) - result = store.select('df1') - tm.assert_frame_equal(result, df) + store.append('df1', df, expectedrows=10) + result = store.select('df1') + tm.assert_frame_equal(result, df) # more chunksize in append tests def check(obj, comparator): @@ -2015,10 +1907,6 @@ def check(obj, comparator): p = tm.makePanel() check(p, assert_panel_equal) - with catch_warnings(record=True): - p4d = tm.makePanel4D() - check(p4d, assert_panel4d_equal) - # empty frame, GH4273 with ensure_clean_store(self.path) as store: @@ -2189,21 +2077,6 @@ def test_table_mixed_dtypes(self): store.append('p1_mixed', wp) assert_panel_equal(store.select('p1_mixed'), wp) - with catch_warnings(record=True): - # ndim - wp = tm.makePanel4D() - wp['obj1'] = 'foo' - wp['obj2'] = 'bar' - wp['bool1'] = wp['l1'] > 0 - wp['bool2'] = wp['l2'] > 0 - wp['int1'] = 1 - wp['int2'] = 2 - wp = wp._consolidate() - - with ensure_clean_store(self.path) as store: - store.append('p4d_mixed', wp) - assert_panel4d_equal(store.select('p4d_mixed'), wp) - def test_unimplemented_dtypes_table_columns(self): with ensure_clean_store(self.path) as store: @@ -2545,10 +2418,8 @@ def test_invalid_terms(self): df.loc[0:4, 'string'] = 'bar' wp = tm.makePanel() - p4d = tm.makePanel4D() store.put('df', df, format='table') store.put('wp', wp, format='table') - store.put('p4d', p4d, format='table') # some invalid terms pytest.raises(ValueError, store.select, @@ -2597,8 +2468,7 @@ def test_terms(self): wpneg = Panel.fromDict({-1: tm.makeDataFrame(), 0: tm.makeDataFrame(), 1: tm.makeDataFrame()}) - p4d = tm.makePanel4D() - store.put('p4d', p4d, format='table') + store.put('wp', wp, format='table') store.put('wpneg', wpneg, format='table') @@ -2618,17 +2488,6 @@ def test_terms(self): after='20000108').reindex(minor=['A', 'B']) tm.assert_panel_equal(result, expected) - # p4d - with catch_warnings(record=True): - - result = store.select('p4d', - ("major_axis<'20000108' and " - "minor_axis=['A', 'B'] and " - "items=['ItemA', 'ItemB']")) - expected = p4d.truncate(after='20000108').reindex( - minor=['A', 'B'], items=['ItemA', 'ItemB']) - assert_panel4d_equal(result, expected) - with catch_warnings(record=True): # valid terms @@ -2648,12 +2507,6 @@ def test_terms(self): for t in terms: store.select('wp', t) - store.select('p4d', t) - - # valid for p4d only - terms = ["labels=['l1', 'l2']"] - for t in terms: - store.select('p4d', t) with tm.assert_raises_regex( TypeError, 'Only named functions are supported'): @@ -5353,11 +5206,9 @@ def test_complex_across_dimensions(self): with catch_warnings(record=True): p = Panel({'One': df, 'Two': df}) - p4d = Panel4D({'i': p, 'ii': p}) - objs = [df, p, p4d] - comps = [tm.assert_frame_equal, tm.assert_panel_equal, - tm.assert_panel4d_equal] + objs = [df, p] + comps = [tm.assert_frame_equal, tm.assert_panel_equal] for obj, comp in zip(objs, comps): with ensure_clean_path(self.path) as path: obj.to_hdf(path, 'obj', format='table') diff --git a/pandas/tests/reshape/test_concat.py b/pandas/tests/reshape/test_concat.py index f66cb12b11210..85e3115e96f83 100644 --- a/pandas/tests/reshape/test_concat.py +++ b/pandas/tests/reshape/test_concat.py @@ -1402,39 +1402,6 @@ def df(): # it works! concat([panel1, panel3], axis=1, verify_integrity=True) - def test_panel4d_concat(self): - with catch_warnings(record=True): - p4d = tm.makePanel4D() - - p1 = p4d.iloc[:, :, :5, :] - p2 = p4d.iloc[:, :, 5:, :] - - result = concat([p1, p2], axis=2) - tm.assert_panel4d_equal(result, p4d) - - p1 = p4d.iloc[:, :, :, :2] - p2 = p4d.iloc[:, :, :, 2:] - - result = concat([p1, p2], axis=3) - tm.assert_panel4d_equal(result, p4d) - - def test_panel4d_concat_mixed_type(self): - with catch_warnings(record=True): - p4d = tm.makePanel4D() - - # if things are a bit misbehaved - p1 = p4d.iloc[:, :2, :, :2] - p2 = p4d.iloc[:, :, :, 2:] - p1['L5'] = 'baz' - - result = concat([p1, p2], axis=3) - - p2['L5'] = np.nan - expected = concat([p1, p2], axis=3) - expected = expected.loc[result.labels] - - tm.assert_panel4d_equal(result, expected) - def test_concat_series(self): ts = tm.makeTimeSeries() diff --git a/pandas/tests/test_expressions.py b/pandas/tests/test_expressions.py index aebc9cd3deaac..56e00fa8af23d 100644 --- a/pandas/tests/test_expressions.py +++ b/pandas/tests/test_expressions.py @@ -15,8 +15,7 @@ from pandas.core.computation import expressions as expr from pandas import compat, _np_version_under1p11, _np_version_under1p13 from pandas.util.testing import (assert_almost_equal, assert_series_equal, - assert_frame_equal, assert_panel_equal, - assert_panel4d_equal) + assert_frame_equal, assert_panel_equal) from pandas.io.formats.printing import pprint_thing import pandas.util.testing as tm @@ -205,12 +204,6 @@ def test_float_arithmetic_series(self): def test_float_panel(self): self.run_panel(_frame2_panel, np.random.randn() + 0.1, binary_comp=0.8) - @pytest.mark.slow - def test_panel4d(self): - with catch_warnings(record=True): - self.run_panel(tm.makePanel4D(), np.random.randn() + 0.5, - assert_func=assert_panel4d_equal, binary_comp=3) - def test_mixed_arithmetic_frame(self): # TODO: FIGURE OUT HOW TO GET IT TO WORK... # can't do arithmetic because comparison methods try to do *entire* diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py deleted file mode 100644 index e429403bbc919..0000000000000 --- a/pandas/tests/test_panel4d.py +++ /dev/null @@ -1,944 +0,0 @@ -# -*- coding: utf-8 -*- -from datetime import datetime -from pandas.compat import range, lrange -import operator -import pytest -from warnings import catch_warnings -import numpy as np - -from pandas import Series, Index, isna, notna -from pandas.core.dtypes.common import is_float_dtype -from pandas.core.panel import Panel -from pandas.core.panel4d import Panel4D -from pandas.tseries.offsets import BDay - -from pandas.util.testing import (assert_frame_equal, assert_series_equal, - assert_almost_equal) -import pandas.util.testing as tm -import pandas.util._test_decorators as td - - -def add_nans(panel4d): - for l, label in enumerate(panel4d.labels): - panel = panel4d[label] - tm.add_nans(panel) - - -class SafeForLongAndSparse(object): - - def test_repr(self): - repr(self.panel4d) - - def test_iter(self): - tm.equalContents(list(self.panel4d), self.panel4d.labels) - - def test_count(self): - f = lambda s: notna(s).sum() - self._check_stat_op('count', f, obj=self.panel4d, has_skipna=False) - - def test_sum(self): - self._check_stat_op('sum', np.sum, skipna_alternative=np.nansum) - - def test_mean(self): - self._check_stat_op('mean', np.mean) - - @td.skip_if_no("numpy", min_version="1.10.0") - def test_prod(self): - self._check_stat_op('prod', np.prod, skipna_alternative=np.nanprod) - - def test_median(self): - def wrapper(x): - if isna(x).any(): - return np.nan - return np.median(x) - - self._check_stat_op('median', wrapper) - - def test_min(self): - self._check_stat_op('min', np.min) - - def test_max(self): - self._check_stat_op('max', np.max) - - @td.skip_if_no_scipy - def test_skew(self): - from scipy.stats import skew - - def this_skew(x): - if len(x) < 3: - return np.nan - return skew(x, bias=False) - self._check_stat_op('skew', this_skew) - - # def test_mad(self): - # f = lambda x: np.abs(x - x.mean()).mean() - # self._check_stat_op('mad', f) - - def test_var(self): - def alt(x): - if len(x) < 2: - return np.nan - return np.var(x, ddof=1) - self._check_stat_op('var', alt) - - def test_std(self): - def alt(x): - if len(x) < 2: - return np.nan - return np.std(x, ddof=1) - self._check_stat_op('std', alt) - - def test_sem(self): - def alt(x): - if len(x) < 2: - return np.nan - return np.std(x, ddof=1) / np.sqrt(len(x)) - self._check_stat_op('sem', alt) - - # def test_skew(self): - # from scipy.stats import skew - - # def alt(x): - # if len(x) < 3: - # return np.nan - # return skew(x, bias=False) - - # self._check_stat_op('skew', alt) - - def _check_stat_op(self, name, alternative, obj=None, has_skipna=True, - skipna_alternative=None): - if obj is None: - obj = self.panel4d - - # # set some NAs - # obj.loc[5:10] = np.nan - # obj.loc[15:20, -2:] = np.nan - - f = getattr(obj, name) - - if has_skipna: - - skipna_wrapper = tm._make_skipna_wrapper(alternative, - skipna_alternative) - - def wrapper(x): - return alternative(np.asarray(x)) - - with catch_warnings(record=True): - for i in range(obj.ndim): - result = f(axis=i, skipna=False) - expected = obj.apply(wrapper, axis=i) - tm.assert_panel_equal(result, expected) - else: - skipna_wrapper = alternative - wrapper = alternative - - with catch_warnings(record=True): - for i in range(obj.ndim): - result = f(axis=i) - if name in ['sum', 'prod']: - expected = obj.apply(skipna_wrapper, axis=i) - tm.assert_panel_equal(result, expected) - - pytest.raises(Exception, f, axis=obj.ndim) - - -class SafeForSparse(object): - - def test_get_axis(self): - assert self.panel4d._get_axis(0) is self.panel4d.labels - assert self.panel4d._get_axis(1) is self.panel4d.items - assert self.panel4d._get_axis(2) is self.panel4d.major_axis - assert self.panel4d._get_axis(3) is self.panel4d.minor_axis - - def test_set_axis(self): - with catch_warnings(record=True): - new_labels = Index(np.arange(len(self.panel4d.labels))) - - # TODO: unused? - # new_items = Index(np.arange(len(self.panel4d.items))) - - new_major = Index(np.arange(len(self.panel4d.major_axis))) - new_minor = Index(np.arange(len(self.panel4d.minor_axis))) - - # ensure propagate to potentially prior-cached items too - - # TODO: unused? - # label = self.panel4d['l1'] - - self.panel4d.labels = new_labels - - if hasattr(self.panel4d, '_item_cache'): - assert 'l1' not in self.panel4d._item_cache - assert self.panel4d.labels is new_labels - - self.panel4d.major_axis = new_major - assert self.panel4d[0].major_axis is new_major - assert self.panel4d.major_axis is new_major - - self.panel4d.minor_axis = new_minor - assert self.panel4d[0].minor_axis is new_minor - assert self.panel4d.minor_axis is new_minor - - def test_get_axis_number(self): - assert self.panel4d._get_axis_number('labels') == 0 - assert self.panel4d._get_axis_number('items') == 1 - assert self.panel4d._get_axis_number('major') == 2 - assert self.panel4d._get_axis_number('minor') == 3 - - def test_get_axis_name(self): - assert self.panel4d._get_axis_name(0) == 'labels' - assert self.panel4d._get_axis_name(1) == 'items' - assert self.panel4d._get_axis_name(2) == 'major_axis' - assert self.panel4d._get_axis_name(3) == 'minor_axis' - - def test_arith(self): - with catch_warnings(record=True): - self._test_op(self.panel4d, operator.add) - self._test_op(self.panel4d, operator.sub) - self._test_op(self.panel4d, operator.mul) - self._test_op(self.panel4d, operator.truediv) - self._test_op(self.panel4d, operator.floordiv) - self._test_op(self.panel4d, operator.pow) - - self._test_op(self.panel4d, lambda x, y: y + x) - self._test_op(self.panel4d, lambda x, y: y - x) - self._test_op(self.panel4d, lambda x, y: y * x) - self._test_op(self.panel4d, lambda x, y: y / x) - self._test_op(self.panel4d, lambda x, y: y ** x) - - pytest.raises(Exception, self.panel4d.__add__, - self.panel4d['l1']) - - @staticmethod - def _test_op(panel4d, op): - result = op(panel4d, 1) - tm.assert_panel_equal(result['l1'], op(panel4d['l1'], 1)) - - def test_keys(self): - tm.equalContents(list(self.panel4d.keys()), self.panel4d.labels) - - def test_iteritems(self): - """Test panel4d.iteritems()""" - - assert (len(list(self.panel4d.iteritems())) == - len(self.panel4d.labels)) - - def test_combinePanel4d(self): - with catch_warnings(record=True): - result = self.panel4d.add(self.panel4d) - tm.assert_panel4d_equal(result, self.panel4d * 2) - - def test_neg(self): - with catch_warnings(record=True): - tm.assert_panel4d_equal(-self.panel4d, self.panel4d * -1) - - def test_select(self): - with catch_warnings(record=True): - - p = self.panel4d - - # select labels - result = p.select(lambda x: x in ('l1', 'l3'), axis='labels') - expected = p.reindex(labels=['l1', 'l3']) - tm.assert_panel4d_equal(result, expected) - - # select items - result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items') - expected = p.reindex(items=['ItemA', 'ItemC']) - tm.assert_panel4d_equal(result, expected) - - # select major_axis - result = p.select(lambda x: x >= datetime(2000, 1, 15), - axis='major') - new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)] - expected = p.reindex(major=new_major) - tm.assert_panel4d_equal(result, expected) - - # select minor_axis - result = p.select(lambda x: x in ('D', 'A'), axis=3) - expected = p.reindex(minor=['A', 'D']) - tm.assert_panel4d_equal(result, expected) - - # corner case, empty thing - result = p.select(lambda x: x in ('foo',), axis='items') - tm.assert_panel4d_equal(result, p.reindex(items=[])) - - def test_get_value(self): - - for item in self.panel.items: - for mjr in self.panel.major_axis[::2]: - for mnr in self.panel.minor_axis: - result = self.panel.get_value(item, mjr, mnr) - expected = self.panel[item][mnr][mjr] - assert_almost_equal(result, expected) - - def test_abs(self): - - with catch_warnings(record=True): - result = self.panel4d.abs() - expected = np.abs(self.panel4d) - tm.assert_panel4d_equal(result, expected) - - p = self.panel4d['l1'] - result = p.abs() - expected = np.abs(p) - tm.assert_panel_equal(result, expected) - - df = p['ItemA'] - result = df.abs() - expected = np.abs(df) - assert_frame_equal(result, expected) - - -class CheckIndexing(object): - - def test_getitem(self): - pytest.raises(Exception, self.panel4d.__getitem__, 'ItemQ') - - def test_delitem_and_pop(self): - - with catch_warnings(record=True): - expected = self.panel4d['l2'] - result = self.panel4d.pop('l2') - tm.assert_panel_equal(expected, result) - assert 'l2' not in self.panel4d.labels - - del self.panel4d['l3'] - assert 'l3' not in self.panel4d.labels - pytest.raises(Exception, self.panel4d.__delitem__, 'l3') - - values = np.empty((4, 4, 4, 4)) - values[0] = 0 - values[1] = 1 - values[2] = 2 - values[3] = 3 - - panel4d = Panel4D(values, lrange(4), lrange(4), - lrange(4), lrange(4)) - - # did we delete the right row? - panel4dc = panel4d.copy() - del panel4dc[0] - tm.assert_panel_equal(panel4dc[1], panel4d[1]) - tm.assert_panel_equal(panel4dc[2], panel4d[2]) - tm.assert_panel_equal(panel4dc[3], panel4d[3]) - - panel4dc = panel4d.copy() - del panel4dc[1] - tm.assert_panel_equal(panel4dc[0], panel4d[0]) - tm.assert_panel_equal(panel4dc[2], panel4d[2]) - tm.assert_panel_equal(panel4dc[3], panel4d[3]) - - panel4dc = panel4d.copy() - del panel4dc[2] - tm.assert_panel_equal(panel4dc[1], panel4d[1]) - tm.assert_panel_equal(panel4dc[0], panel4d[0]) - tm.assert_panel_equal(panel4dc[3], panel4d[3]) - - panel4dc = panel4d.copy() - del panel4dc[3] - tm.assert_panel_equal(panel4dc[1], panel4d[1]) - tm.assert_panel_equal(panel4dc[2], panel4d[2]) - tm.assert_panel_equal(panel4dc[0], panel4d[0]) - - def test_setitem(self): - with catch_warnings(record=True): - - # Panel - p = Panel(dict( - ItemA=self.panel4d['l1']['ItemA'][2:].filter( - items=['A', 'B']))) - self.panel4d['l4'] = p - self.panel4d['l5'] = p - - p2 = self.panel4d['l4'] - - tm.assert_panel_equal(p, p2.reindex(items=p.items, - major_axis=p.major_axis, - minor_axis=p.minor_axis)) - - # scalar - self.panel4d['lG'] = 1 - self.panel4d['lE'] = True - assert self.panel4d['lG'].values.dtype == np.int64 - assert self.panel4d['lE'].values.dtype == np.bool_ - - # object dtype - self.panel4d['lQ'] = 'foo' - assert self.panel4d['lQ'].values.dtype == np.object_ - - # boolean dtype - self.panel4d['lP'] = self.panel4d['l1'] > 0 - assert self.panel4d['lP'].values.dtype == np.bool_ - - def test_setitem_by_indexer(self): - - with catch_warnings(record=True): - - # Panel - panel4dc = self.panel4d.copy() - p = panel4dc.iloc[0] - - def func(): - self.panel4d.iloc[0] = p - pytest.raises(NotImplementedError, func) - - # DataFrame - panel4dc = self.panel4d.copy() - df = panel4dc.iloc[0, 0] - df.iloc[:] = 1 - panel4dc.iloc[0, 0] = df - assert (panel4dc.iloc[0, 0].values == 1).all() - - # Series - panel4dc = self.panel4d.copy() - s = panel4dc.iloc[0, 0, :, 0] - s.iloc[:] = 1 - panel4dc.iloc[0, 0, :, 0] = s - assert (panel4dc.iloc[0, 0, :, 0].values == 1).all() - - # scalar - panel4dc = self.panel4d.copy() - panel4dc.iloc[0] = 1 - panel4dc.iloc[1] = True - panel4dc.iloc[2] = 'foo' - assert (panel4dc.iloc[0].values == 1).all() - assert panel4dc.iloc[1].values.all() - assert (panel4dc.iloc[2].values == 'foo').all() - - def test_setitem_by_indexer_mixed_type(self): - - with catch_warnings(record=True): - # GH 8702 - self.panel4d['foo'] = 'bar' - - # scalar - panel4dc = self.panel4d.copy() - panel4dc.iloc[0] = 1 - panel4dc.iloc[1] = True - panel4dc.iloc[2] = 'foo' - assert (panel4dc.iloc[0].values == 1).all() - assert panel4dc.iloc[1].values.all() - assert (panel4dc.iloc[2].values == 'foo').all() - - def test_comparisons(self): - with catch_warnings(record=True): - p1 = tm.makePanel4D() - p2 = tm.makePanel4D() - - tp = p1.reindex(labels=p1.labels.tolist() + ['foo']) - p = p1[p1.labels[0]] - - def test_comp(func): - result = func(p1, p2) - tm.assert_numpy_array_equal(result.values, - func(p1.values, p2.values)) - - # versus non-indexed same objs - pytest.raises(Exception, func, p1, tp) - - # versus different objs - pytest.raises(Exception, func, p1, p) - - result3 = func(self.panel4d, 0) - tm.assert_numpy_array_equal(result3.values, - func(self.panel4d.values, 0)) - - with np.errstate(invalid='ignore'): - test_comp(operator.eq) - test_comp(operator.ne) - test_comp(operator.lt) - test_comp(operator.gt) - test_comp(operator.ge) - test_comp(operator.le) - - def test_major_xs(self): - ref = self.panel4d['l1']['ItemA'] - - idx = self.panel4d.major_axis[5] - with catch_warnings(record=True): - xs = self.panel4d.major_xs(idx) - - assert_series_equal(xs['l1'].T['ItemA'], - ref.xs(idx), check_names=False) - - # not contained - idx = self.panel4d.major_axis[0] - BDay() - pytest.raises(Exception, self.panel4d.major_xs, idx) - - def test_major_xs_mixed(self): - self.panel4d['l4'] = 'foo' - with catch_warnings(record=True): - xs = self.panel4d.major_xs(self.panel4d.major_axis[0]) - assert xs['l1']['A'].dtype == np.float64 - assert xs['l4']['A'].dtype == np.object_ - - def test_minor_xs(self): - ref = self.panel4d['l1']['ItemA'] - - with catch_warnings(record=True): - idx = self.panel4d.minor_axis[1] - xs = self.panel4d.minor_xs(idx) - - assert_series_equal(xs['l1'].T['ItemA'], ref[idx], check_names=False) - - # not contained - pytest.raises(Exception, self.panel4d.minor_xs, 'E') - - def test_minor_xs_mixed(self): - self.panel4d['l4'] = 'foo' - - with catch_warnings(record=True): - xs = self.panel4d.minor_xs('D') - assert xs['l1'].T['ItemA'].dtype == np.float64 - assert xs['l4'].T['ItemA'].dtype == np.object_ - - def test_xs(self): - l1 = self.panel4d.xs('l1', axis=0) - expected = self.panel4d['l1'] - tm.assert_panel_equal(l1, expected) - - # View if possible - l1_view = self.panel4d.xs('l1', axis=0) - l1_view.values[:] = np.nan - assert np.isnan(self.panel4d['l1'].values).all() - - # Mixed-type - self.panel4d['strings'] = 'foo' - with catch_warnings(record=True): - result = self.panel4d.xs('D', axis=3) - - assert result._is_copy is not None - - def test_getitem_fancy_labels(self): - with catch_warnings(record=True): - panel4d = self.panel4d - - labels = panel4d.labels[[1, 0]] - items = panel4d.items[[1, 0]] - dates = panel4d.major_axis[::2] - cols = ['D', 'C', 'F'] - - # all 4 specified - tm.assert_panel4d_equal(panel4d.loc[labels, items, dates, cols], - panel4d.reindex(labels=labels, items=items, - major=dates, minor=cols)) - - # 3 specified - tm.assert_panel4d_equal(panel4d.loc[:, items, dates, cols], - panel4d.reindex(items=items, major=dates, - minor=cols)) - - # 2 specified - tm.assert_panel4d_equal(panel4d.loc[:, :, dates, cols], - panel4d.reindex(major=dates, minor=cols)) - - tm.assert_panel4d_equal(panel4d.loc[:, items, :, cols], - panel4d.reindex(items=items, minor=cols)) - - tm.assert_panel4d_equal(panel4d.loc[:, items, dates, :], - panel4d.reindex(items=items, major=dates)) - - # only 1 - tm.assert_panel4d_equal(panel4d.loc[:, items, :, :], - panel4d.reindex(items=items)) - - tm.assert_panel4d_equal(panel4d.loc[:, :, dates, :], - panel4d.reindex(major=dates)) - - tm.assert_panel4d_equal(panel4d.loc[:, :, :, cols], - panel4d.reindex(minor=cols)) - - def test_getitem_fancy_slice(self): - pass - - def test_getitem_fancy_ints(self): - pass - - def test_get_value(self): - for label in self.panel4d.labels: - for item in self.panel4d.items: - for mjr in self.panel4d.major_axis[::2]: - for mnr in self.panel4d.minor_axis: - result = self.panel4d.loc[ - label, item, mjr, mnr] - expected = self.panel4d[label][item][mnr][mjr] - assert_almost_equal(result, expected) - - def test_set_value(self): - - with catch_warnings(record=True): - - for label in self.panel4d.labels: - for item in self.panel4d.items: - for mjr in self.panel4d.major_axis[::2]: - for mnr in self.panel4d.minor_axis: - self.panel4d.set_value(label, item, mjr, mnr, 1.) - tm.assert_almost_equal( - self.panel4d[label][item][mnr][mjr], 1.) - - res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5) - assert is_float_dtype(res3['l4'].values) - - # resize - res = self.panel4d.set_value('l4', 'ItemE', 'foo', 'bar', 1.5) - assert isinstance(res, Panel4D) - assert res is not self.panel4d - assert res.get_value('l4', 'ItemE', 'foo', 'bar') == 1.5 - - res3 = self.panel4d.set_value('l4', 'ItemE', 'foobar', 'baz', 5) - assert is_float_dtype(res3['l4'].values) - - -class TestPanel4d(CheckIndexing, SafeForSparse, - SafeForLongAndSparse): - - def setup_method(self, method): - with catch_warnings(record=True): - self.panel4d = tm.makePanel4D(nper=8) - add_nans(self.panel4d) - - def test_constructor(self): - - with catch_warnings(record=True): - panel4d = Panel4D(self.panel4d._data) - assert panel4d._data is self.panel4d._data - - panel4d = Panel4D(self.panel4d._data, copy=True) - assert panel4d._data is not self.panel4d._data - tm.assert_panel4d_equal(panel4d, self.panel4d) - - vals = self.panel4d.values - - # no copy - panel4d = Panel4D(vals) - assert panel4d.values is vals - - # copy - panel4d = Panel4D(vals, copy=True) - assert panel4d.values is not vals - - # GH #8285, test when scalar data is used to construct a Panel4D - # if dtype is not passed, it should be inferred - value_and_dtype = [(1, 'int64'), (3.14, 'float64'), - ('foo', np.object_)] - for (val, dtype) in value_and_dtype: - panel4d = Panel4D(val, labels=range(2), items=range( - 3), major_axis=range(4), minor_axis=range(5)) - vals = np.empty((2, 3, 4, 5), dtype=dtype) - vals.fill(val) - expected = Panel4D(vals, dtype=dtype) - tm.assert_panel4d_equal(panel4d, expected) - - # test the case when dtype is passed - panel4d = Panel4D(1, labels=range(2), items=range( - 3), major_axis=range(4), minor_axis=range(5), dtype='float32') - vals = np.empty((2, 3, 4, 5), dtype='float32') - vals.fill(1) - - expected = Panel4D(vals, dtype='float32') - tm.assert_panel4d_equal(panel4d, expected) - - def test_constructor_cast(self): - with catch_warnings(record=True): - zero_filled = self.panel4d.fillna(0) - - casted = Panel4D(zero_filled._data, dtype=int) - casted2 = Panel4D(zero_filled.values, dtype=int) - - exp_values = zero_filled.values.astype(int) - assert_almost_equal(casted.values, exp_values) - assert_almost_equal(casted2.values, exp_values) - - casted = Panel4D(zero_filled._data, dtype=np.int32) - casted2 = Panel4D(zero_filled.values, dtype=np.int32) - - exp_values = zero_filled.values.astype(np.int32) - assert_almost_equal(casted.values, exp_values) - assert_almost_equal(casted2.values, exp_values) - - # can't cast - data = [[['foo', 'bar', 'baz']]] - pytest.raises(ValueError, Panel, data, dtype=float) - - def test_consolidate(self): - with catch_warnings(record=True): - assert self.panel4d._data.is_consolidated() - - self.panel4d['foo'] = 1. - assert not self.panel4d._data.is_consolidated() - - panel4d = self.panel4d._consolidate() - assert panel4d._data.is_consolidated() - - def test_ctor_dict(self): - with catch_warnings(record=True): - l1 = self.panel4d['l1'] - l2 = self.panel4d['l2'] - - d = {'A': l1, 'B': l2.loc[['ItemB'], :, :]} - panel4d = Panel4D(d) - - tm.assert_panel_equal(panel4d['A'], self.panel4d['l1']) - tm.assert_frame_equal(panel4d.loc['B', 'ItemB', :, :], - self.panel4d.loc['l2', ['ItemB'], - :, :]['ItemB']) - - def test_constructor_dict_mixed(self): - with catch_warnings(record=True): - data = {k: v.values for k, v in self.panel4d.iteritems()} - result = Panel4D(data) - - exp_major = Index(np.arange(len(self.panel4d.major_axis))) - tm.assert_index_equal(result.major_axis, exp_major) - - result = Panel4D(data, - labels=self.panel4d.labels, - items=self.panel4d.items, - major_axis=self.panel4d.major_axis, - minor_axis=self.panel4d.minor_axis) - tm.assert_panel4d_equal(result, self.panel4d) - - data['l2'] = self.panel4d['l2'] - - result = Panel4D(data) - tm.assert_panel4d_equal(result, self.panel4d) - - # corner, blow up - data['l2'] = data['l2']['ItemB'] - pytest.raises(Exception, Panel4D, data) - - data['l2'] = self.panel4d['l2'].values[:, :, :-1] - pytest.raises(Exception, Panel4D, data) - - def test_constructor_resize(self): - with catch_warnings(record=True): - data = self.panel4d._data - labels = self.panel4d.labels[:-1] - items = self.panel4d.items[:-1] - major = self.panel4d.major_axis[:-1] - minor = self.panel4d.minor_axis[:-1] - - result = Panel4D(data, labels=labels, items=items, - major_axis=major, minor_axis=minor) - expected = self.panel4d.reindex( - labels=labels, items=items, major=major, minor=minor) - tm.assert_panel4d_equal(result, expected) - - result = Panel4D(data, items=items, major_axis=major) - expected = self.panel4d.reindex(items=items, major=major) - tm.assert_panel4d_equal(result, expected) - - result = Panel4D(data, items=items) - expected = self.panel4d.reindex(items=items) - tm.assert_panel4d_equal(result, expected) - - result = Panel4D(data, minor_axis=minor) - expected = self.panel4d.reindex(minor=minor) - tm.assert_panel4d_equal(result, expected) - - def test_conform(self): - with catch_warnings(record=True): - - p = self.panel4d['l1'].filter(items=['ItemA', 'ItemB']) - conformed = self.panel4d.conform(p) - - tm.assert_index_equal(conformed.items, self.panel4d.labels) - tm.assert_index_equal(conformed.major_axis, - self.panel4d.major_axis) - tm.assert_index_equal(conformed.minor_axis, - self.panel4d.minor_axis) - - def test_reindex(self): - with catch_warnings(record=True): - ref = self.panel4d['l2'] - - # labels - result = self.panel4d.reindex(labels=['l1', 'l2']) - tm.assert_panel_equal(result['l2'], ref) - - # items - result = self.panel4d.reindex(items=['ItemA', 'ItemB']) - assert_frame_equal(result['l2']['ItemB'], ref['ItemB']) - - # major - new_major = list(self.panel4d.major_axis[:10]) - result = self.panel4d.reindex(major=new_major) - assert_frame_equal( - result['l2']['ItemB'], ref['ItemB'].reindex(index=new_major)) - - # raise exception put both major and major_axis - pytest.raises(Exception, self.panel4d.reindex, - major_axis=new_major, major=new_major) - - # minor - new_minor = list(self.panel4d.minor_axis[:2]) - result = self.panel4d.reindex(minor=new_minor) - assert_frame_equal( - result['l2']['ItemB'], ref['ItemB'].reindex(columns=new_minor)) - - result = self.panel4d.reindex(labels=self.panel4d.labels, - items=self.panel4d.items, - major=self.panel4d.major_axis, - minor=self.panel4d.minor_axis) - - # don't necessarily copy - result = self.panel4d.reindex() - tm.assert_panel4d_equal(result, self.panel4d) - assert result is not self.panel4d - - # with filling - smaller_major = self.panel4d.major_axis[::5] - smaller = self.panel4d.reindex(major=smaller_major) - - larger = smaller.reindex(major=self.panel4d.major_axis, - method='pad') - - tm.assert_panel_equal(larger.loc[:, :, - self.panel4d.major_axis[1], :], - smaller.loc[:, :, smaller_major[0], :]) - - # don't necessarily copy - result = self.panel4d.reindex( - major=self.panel4d.major_axis, copy=False) - tm.assert_panel4d_equal(result, self.panel4d) - assert result is self.panel4d - - def test_not_hashable(self): - with catch_warnings(record=True): - p4D_empty = Panel4D() - pytest.raises(TypeError, hash, p4D_empty) - pytest.raises(TypeError, hash, self.panel4d) - - def test_reindex_like(self): - # reindex_like - with catch_warnings(record=True): - smaller = self.panel4d.reindex(labels=self.panel4d.labels[:-1], - items=self.panel4d.items[:-1], - major=self.panel4d.major_axis[:-1], - minor=self.panel4d.minor_axis[:-1]) - smaller_like = self.panel4d.reindex_like(smaller) - tm.assert_panel4d_equal(smaller, smaller_like) - - def test_sort_index(self): - with catch_warnings(record=True): - import random - - rlabels = list(self.panel4d.labels) - ritems = list(self.panel4d.items) - rmajor = list(self.panel4d.major_axis) - rminor = list(self.panel4d.minor_axis) - random.shuffle(rlabels) - random.shuffle(ritems) - random.shuffle(rmajor) - random.shuffle(rminor) - - random_order = self.panel4d.reindex(labels=rlabels) - sorted_panel4d = random_order.sort_index(axis=0) - tm.assert_panel4d_equal(sorted_panel4d, self.panel4d) - - def test_fillna(self): - - with catch_warnings(record=True): - assert not np.isfinite(self.panel4d.values).all() - filled = self.panel4d.fillna(0) - assert np.isfinite(filled.values).all() - - pytest.raises(NotImplementedError, - self.panel4d.fillna, method='pad') - - def test_swapaxes(self): - with catch_warnings(record=True): - result = self.panel4d.swapaxes('labels', 'items') - assert result.items is self.panel4d.labels - - result = self.panel4d.swapaxes('labels', 'minor') - assert result.labels is self.panel4d.minor_axis - - result = self.panel4d.swapaxes('items', 'minor') - assert result.items is self.panel4d.minor_axis - - result = self.panel4d.swapaxes('items', 'major') - assert result.items is self.panel4d.major_axis - - result = self.panel4d.swapaxes('major', 'minor') - assert result.major_axis is self.panel4d.minor_axis - - # this should also work - result = self.panel4d.swapaxes(0, 1) - assert result.labels is self.panel4d.items - - # this works, but return a copy - result = self.panel4d.swapaxes('items', 'items') - tm.assert_panel4d_equal(self.panel4d, result) - assert id(self.panel4d) != id(result) - - def test_update(self): - - with catch_warnings(record=True): - p4d = Panel4D([[[[1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]], - [[1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]]]]) - - other = Panel4D([[[[3.6, 2., np.nan]], - [[np.nan, np.nan, 7]]]]) - - p4d.update(other) - - expected = Panel4D([[[[3.6, 2, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]], - [[1.5, np.nan, 7], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.], - [1.5, np.nan, 3.]]]]) - - tm.assert_panel4d_equal(p4d, expected) - - def test_dtypes(self): - - result = self.panel4d.dtypes - expected = Series(np.dtype('float64'), index=self.panel4d.labels) - assert_series_equal(result, expected) - - def test_repr_empty(self): - with catch_warnings(record=True): - empty = Panel4D() - repr(empty) - - def test_rename(self): - with catch_warnings(record=True): - - mapper = {'l1': 'foo', - 'l2': 'bar', - 'l3': 'baz'} - - renamed = self.panel4d.rename_axis(mapper, axis=0) - exp = Index(['foo', 'bar', 'baz']) - tm.assert_index_equal(renamed.labels, exp) - - renamed = self.panel4d.rename_axis(str.lower, axis=3) - exp = Index(['a', 'b', 'c', 'd']) - tm.assert_index_equal(renamed.minor_axis, exp) - - # don't copy - renamed_nocopy = self.panel4d.rename_axis(mapper, - axis=0, - copy=False) - renamed_nocopy['foo'] = 3. - assert (self.panel4d['l1'].values == 3).all() - - def test_get_attr(self): - tm.assert_panel_equal(self.panel4d['l1'], self.panel4d.l1) - - # GH issue 15960 - def test_sort_values(self): - pytest.raises(NotImplementedError, self.panel4d.sort_values) - pytest.raises(NotImplementedError, self.panel4d.sort_values, 'ItemA') diff --git a/pandas/tests/test_panelnd.py b/pandas/tests/test_panelnd.py deleted file mode 100644 index c473e3c09cc74..0000000000000 --- a/pandas/tests/test_panelnd.py +++ /dev/null @@ -1,104 +0,0 @@ -# -*- coding: utf-8 -*- -import pytest - -from warnings import catch_warnings -from pandas.core import panelnd -from pandas.core.panel import Panel - -from pandas.util.testing import assert_panel_equal -import pandas.util.testing as tm - - -class TestPanelnd(object): - - def setup_method(self, method): - pass - - def test_4d_construction(self): - - with catch_warnings(record=True): - - # create a 4D - Panel4D = panelnd.create_nd_panel_factory( - klass_name='Panel4D', - orders=['labels', 'items', 'major_axis', 'minor_axis'], - slices={'items': 'items', 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, - slicer=Panel, - aliases={'major': 'major_axis', 'minor': 'minor_axis'}, - stat_axis=2) - - p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) # noqa - - def test_4d_construction_alt(self): - - with catch_warnings(record=True): - - # create a 4D - Panel4D = panelnd.create_nd_panel_factory( - klass_name='Panel4D', - orders=['labels', 'items', 'major_axis', 'minor_axis'], - slices={'items': 'items', 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, - slicer='Panel', - aliases={'major': 'major_axis', 'minor': 'minor_axis'}, - stat_axis=2) - - p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) # noqa - - def test_4d_construction_error(self): - - # create a 4D - pytest.raises(Exception, - panelnd.create_nd_panel_factory, - klass_name='Panel4D', - orders=['labels', 'items', 'major_axis', - 'minor_axis'], - slices={'items': 'items', - 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, - slicer='foo', - aliases={'major': 'major_axis', - 'minor': 'minor_axis'}, - stat_axis=2) - - def test_5d_construction(self): - - with catch_warnings(record=True): - - # create a 4D - Panel4D = panelnd.create_nd_panel_factory( - klass_name='Panel4D', - orders=['labels1', 'items', 'major_axis', 'minor_axis'], - slices={'items': 'items', 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, - slicer=Panel, - aliases={'major': 'major_axis', 'minor': 'minor_axis'}, - stat_axis=2) - - # deprecation GH13564 - p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel())) - - # create a 5D - Panel5D = panelnd.create_nd_panel_factory( - klass_name='Panel5D', - orders=['cool1', 'labels1', 'items', 'major_axis', - 'minor_axis'], - slices={'labels1': 'labels1', 'items': 'items', - 'major_axis': 'major_axis', - 'minor_axis': 'minor_axis'}, - slicer=Panel4D, - aliases={'major': 'major_axis', 'minor': 'minor_axis'}, - stat_axis=2) - - # deprecation GH13564 - p5d = Panel5D(dict(C1=p4d)) - - # slice back to 4d - results = p5d.iloc[p5d.cool1.get_loc('C1'), :, :, 0:3, :] - expected = p4d.iloc[:, :, 0:3, :] - assert_panel_equal(results['L1'], expected['L1']) - - # test a transpose - # results = p5d.transpose(1,2,3,4,0) - # expected = diff --git a/pandas/util/testing.py b/pandas/util/testing.py index 8dc0aa1e85ef4..2278d1fe25c7c 100644 --- a/pandas/util/testing.py +++ b/pandas/util/testing.py @@ -42,7 +42,7 @@ from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex, DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex, Index, MultiIndex, - Series, DataFrame, Panel, Panel4D) + Series, DataFrame, Panel) from pandas._libs import testing as _testing from pandas.io.common import urlopen @@ -1333,8 +1333,6 @@ def assert_panelnd_equal(left, right, _panel_frame_equal = partial(assert_frame_equal, check_names=False) assert_panel_equal = partial(assert_panelnd_equal, assert_func=_panel_frame_equal) -assert_panel4d_equal = partial(assert_panelnd_equal, - assert_func=assert_panel_equal) # ----------------------------------------------------------------------------- @@ -1674,13 +1672,6 @@ def makePeriodPanel(nper=None): return Panel.fromDict(data) -def makePanel4D(nper=None): - with warnings.catch_warnings(record=True): - d = dict(l1=makePanel(nper), l2=makePanel(nper), - l3=makePanel(nper)) - return Panel4D(d) - - def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None, idx_type=None): """Create an index/multindex with given dimensions, levels, names, etc'
Deprecated back in v0.19.0 xref #13147
https://api.github.com/repos/pandas-dev/pandas/pulls/19059
2018-01-03T17:08:13Z
2018-01-03T23:56:32Z
2018-01-03T23:56:32Z
2018-01-04T05:15:43Z
BUG: incorrect set_labels in MI
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 77de1851490b2..57aa947d27e3a 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -363,6 +363,7 @@ Indexing - Bug in indexing non-scalar value from ``Series`` having non-unique ``Index`` will return value flattened (:issue:`17610`) - Bug in :func:`DatetimeIndex.insert` where inserting ``NaT`` into a timezone-aware index incorrectly raised (:issue:`16357`) - Bug in ``__setitem__`` when indexing a :class:`DataFrame` with a 2-d boolean ndarray (:issue:`18582`) +- Bug in :func:`MultiIndex.set_labels` which would cause casting (and potentially clipping) of the new labels if the ``level`` argument is not 0 or a list like [0, 1, ... ] (:issue:`19057`) - Bug in ``str.extractall`` when there were no matches empty :class:`Index` was returned instead of appropriate :class:`MultiIndex` (:issue:`19034`) I/O diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 7107378671ba5..398ee7d0aef3c 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -328,8 +328,9 @@ def _set_labels(self, labels, level=None, copy=False, validate=True, else: level = [self._get_level_number(l) for l in level] new_labels = list(self._labels) - for l, lev, lab in zip(level, self.levels, labels): - new_labels[l] = _ensure_frozen( + for lev_idx, lab in zip(level, labels): + lev = self.levels[lev_idx] + new_labels[lev_idx] = _ensure_frozen( lab, lev, copy=copy)._shallow_copy() new_labels = FrozenList(new_labels) diff --git a/pandas/tests/indexes/test_multi.py b/pandas/tests/indexes/test_multi.py index 2a7c020f4c9e9..9664d73651185 100644 --- a/pandas/tests/indexes/test_multi.py +++ b/pandas/tests/indexes/test_multi.py @@ -327,6 +327,21 @@ def assert_matching(actual, expected): assert_matching(ind2.labels, new_labels) assert_matching(self.index.labels, labels) + # label changing for levels of different magnitude of categories + ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)]) + new_labels = range(129, -1, -1) + expected = pd.MultiIndex.from_tuples( + [(0, i) for i in new_labels]) + + # [w/o mutation] + result = ind.set_labels(labels=new_labels, level=1) + assert result.equals(expected) + + # [w/ mutation] + result = ind.copy() + result.set_labels(labels=new_labels, level=1, inplace=True) + assert result.equals(expected) + def test_set_levels_labels_names_bad_input(self): levels, labels = self.index.levels, self.index.labels names = self.index.names
This fixes #19057
https://api.github.com/repos/pandas-dev/pandas/pulls/19058
2018-01-03T16:57:40Z
2018-01-06T17:25:23Z
2018-01-06T17:25:23Z
2018-01-06T17:25:28Z
BUG: x in MultiIndex.drop(x)
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index c4fc222353738..23eae18c4c121 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -338,7 +338,7 @@ Conversion - Bug in :class:`Series` floor-division where operating on a scalar ``timedelta`` raises an exception (:issue:`18846`) - Bug in :class:`FY5253Quarter`, :class:`LastWeekOfMonth` where rollback and rollforward behavior was inconsistent with addition and subtraction behavior (:issue:`18854`) - Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) -- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) +- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) - Bug in :class:`Series`` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` had results cast to ``dtype='int64'`` (:issue:`17250`) - Bug in :class:`TimedeltaIndex` where division by a ``Series`` would return a ``TimedeltaIndex`` instead of a ``Series`` (issue:`19042`) - Bug in :class:`Series` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` could return a ``Series`` with an incorrect name (issue:`19043`) @@ -364,6 +364,7 @@ Indexing - Bug in indexing non-scalar value from ``Series`` having non-unique ``Index`` will return value flattened (:issue:`17610`) - Bug in :func:`DatetimeIndex.insert` where inserting ``NaT`` into a timezone-aware index incorrectly raised (:issue:`16357`) - Bug in ``__setitem__`` when indexing a :class:`DataFrame` with a 2-d boolean ndarray (:issue:`18582`) +- Bug in :func:`MultiIndex.__contains__` where non-tuple keys would return ``True`` even if they had been dropped (:issue:`19027`) - Bug in :func:`MultiIndex.set_labels` which would cause casting (and potentially clipping) of the new labels if the ``level`` argument is not 0 or a list like [0, 1, ... ] (:issue:`19057`) - Bug in ``str.extractall`` when there were no matches empty :class:`Index` was returned instead of appropriate :class:`MultiIndex` (:issue:`19034`) diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py index 398ee7d0aef3c..5739c8dfd8b53 100644 --- a/pandas/core/indexes/multi.py +++ b/pandas/core/indexes/multi.py @@ -2123,6 +2123,11 @@ def _maybe_to_slice(loc): if not isinstance(key, tuple): loc = self._get_level_indexer(key, level=0) + + # _get_level_indexer returns an empty slice if the key has + # been dropped from the MultiIndex + if isinstance(loc, slice) and loc.start == loc.stop: + raise KeyError(key) return _maybe_to_slice(loc) keylen = len(key) diff --git a/pandas/core/reshape/reshape.py b/pandas/core/reshape/reshape.py index 1ca014baa9ec8..d6aed064e49f8 100644 --- a/pandas/core/reshape/reshape.py +++ b/pandas/core/reshape/reshape.py @@ -628,7 +628,11 @@ def _convert_level_number(level_num, columns): levsize = len(level_labels) drop_cols = [] for key in unique_groups: - loc = this.columns.get_loc(key) + try: + loc = this.columns.get_loc(key) + except KeyError: + drop_cols.append(key) + continue # can make more efficient? # we almost always return a slice @@ -639,10 +643,7 @@ def _convert_level_number(level_num, columns): else: slice_len = loc.stop - loc.start - if slice_len == 0: - drop_cols.append(key) - continue - elif slice_len != levsize: + if slice_len != levsize: chunk = this.loc[:, this.columns[loc]] chunk.columns = level_vals.take(chunk.columns.labels[-1]) value_slice = chunk.reindex(columns=level_vals_used).values diff --git a/pandas/tests/frame/test_mutate_columns.py b/pandas/tests/frame/test_mutate_columns.py index 26e2b801f6460..9acdf2f17d86a 100644 --- a/pandas/tests/frame/test_mutate_columns.py +++ b/pandas/tests/frame/test_mutate_columns.py @@ -193,9 +193,10 @@ def test_delitem_multiindex(self): with pytest.raises(KeyError): del df[('A',)] - # xref: https://github.com/pandas-dev/pandas/issues/2770 - # the 'A' is STILL in the columns! - assert 'A' in df.columns + # behavior of dropped/deleted MultiIndex levels changed from + # GH 2770 to GH 19027: MultiIndex no longer '.__contains__' + # levels which are dropped/deleted + assert 'A' not in df.columns with pytest.raises(KeyError): del df['A'] diff --git a/pandas/tests/indexing/test_multiindex.py b/pandas/tests/indexing/test_multiindex.py index f69b9d98143b0..43656a392e582 100644 --- a/pandas/tests/indexing/test_multiindex.py +++ b/pandas/tests/indexing/test_multiindex.py @@ -705,6 +705,26 @@ def test_multiindex_symmetric_difference(self): result = idx ^ idx2 assert result.names == [None, None] + def test_multiindex_contains_dropped(self): + # GH 19027 + # test that dropped MultiIndex levels are not in the MultiIndex + # despite continuing to be in the MultiIndex's levels + idx = MultiIndex.from_product([[1, 2], [3, 4]]) + assert 2 in idx + idx = idx.drop(2) + + # drop implementation keeps 2 in the levels + assert 2 in idx.levels[0] + # but it should no longer be in the index itself + assert 2 not in idx + + # also applies to strings + idx = MultiIndex.from_product([['a', 'b'], ['c', 'd']]) + assert 'a' in idx + idx = idx.drop('a') + assert 'a' in idx.levels[0] + assert 'a' not in idx + class TestMultiIndexSlicers(object):
- [x] closes #19027 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19054
2018-01-03T12:10:06Z
2018-01-10T00:27:59Z
2018-01-10T00:27:59Z
2018-03-19T15:06:36Z
TST: Remove clipboard-excel test warnings
diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py index b5d1435c29cb7..98c0effabec84 100644 --- a/pandas/tests/io/test_clipboard.py +++ b/pandas/tests/io/test_clipboard.py @@ -91,6 +91,8 @@ def test_round_trip_frame(self): self.check_round_trip_frame(dt) def test_read_clipboard_infer_excel(self): + # gh-19010: avoid warnings + clip_kwargs = dict(engine="python") text = dedent(""" John James Charlie Mingus @@ -98,7 +100,7 @@ def test_read_clipboard_infer_excel(self): 4 Harry Carney """.strip()) clipboard_set(text) - df = pd.read_clipboard() + df = pd.read_clipboard(**clip_kwargs) # excel data is parsed correctly assert df.iloc[1][1] == 'Harry Carney' @@ -110,7 +112,7 @@ def test_read_clipboard_infer_excel(self): 3 4 """.strip()) clipboard_set(text) - res = pd.read_clipboard() + res = pd.read_clipboard(**clip_kwargs) text = dedent(""" a b @@ -118,7 +120,7 @@ def test_read_clipboard_infer_excel(self): 3 4 """.strip()) clipboard_set(text) - exp = pd.read_clipboard() + exp = pd.read_clipboard(**clip_kwargs) tm.assert_frame_equal(res, exp)
Closes #19010.
https://api.github.com/repos/pandas-dev/pandas/pulls/19052
2018-01-03T10:04:41Z
2018-01-03T11:15:11Z
2018-01-03T11:15:11Z
2018-01-03T17:00:18Z
CLN: ASV stat_ops
diff --git a/asv_bench/benchmarks/stat_ops.py b/asv_bench/benchmarks/stat_ops.py index 1e1eb167b46bf..c447c78d0d070 100644 --- a/asv_bench/benchmarks/stat_ops.py +++ b/asv_bench/benchmarks/stat_ops.py @@ -1,205 +1,114 @@ -from .pandas_vb_common import * +import numpy as np +import pandas as pd +from .pandas_vb_common import setup # noqa -def _set_use_bottleneck_False(): - try: - pd.options.compute.use_bottleneck = False - except: - from pandas.core import nanops - nanops._USE_BOTTLENECK = False +ops = ['mean', 'sum', 'median', 'std', 'skew', 'kurt', 'mad', 'prod', 'sem', + 'var'] -class FrameOps(object): - goal_time = 0.2 - - param_names = ['op', 'use_bottleneck', 'dtype', 'axis'] - params = [['mean', 'sum', 'median'], - [True, False], - ['float', 'int'], - [0, 1]] - - def setup(self, op, use_bottleneck, dtype, axis): - if dtype == 'float': - self.df = DataFrame(np.random.randn(100000, 4)) - elif dtype == 'int': - self.df = DataFrame(np.random.randint(1000, size=(100000, 4))) - - if not use_bottleneck: - _set_use_bottleneck_False() - - self.func = getattr(self.df, op) - - def time_op(self, op, use_bottleneck, dtype, axis): - self.func(axis=axis) +class FrameOps(object): -class stat_ops_level_frame_sum(object): goal_time = 0.2 + params = [ops, ['float', 'int'], [0, 1], [True, False]] + param_names = ['op', 'dtype', 'axis', 'use_bottleneck'] - def setup(self): - self.index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)]) - random.shuffle(self.index.values) - self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index) - self.df_level = DataFrame(np.random.randn(100, 4), index=self.index.levels[1]) - - def time_stat_ops_level_frame_sum(self): - self.df.sum(level=1) - - -class stat_ops_level_frame_sum_multiple(object): - goal_time = 0.2 + def setup(self, op, dtype, axis, use_bottleneck): + df = pd.DataFrame(np.random.randn(100000, 4)).astype(dtype) + try: + pd.options.compute.use_bottleneck = use_bottleneck + except: + from pandas.core import nanops + nanops._USE_BOTTLENECK = use_bottleneck + self.df_func = getattr(df, op) - def setup(self): - self.index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)]) - random.shuffle(self.index.values) - self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index) - self.df_level = DataFrame(np.random.randn(100, 4), index=self.index.levels[1]) + def time_op(self, op, dtype, axis, use_bottleneck): + self.df_func(axis=axis) - def time_stat_ops_level_frame_sum_multiple(self): - self.df.sum(level=[0, 1]) +class FrameMultiIndexOps(object): -class stat_ops_level_series_sum(object): goal_time = 0.2 + params = ([0, 1, [0, 1]], ops) + param_names = ['level', 'op'] - def setup(self): - self.index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)]) - random.shuffle(self.index.values) - self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index) - self.df_level = DataFrame(np.random.randn(100, 4), index=self.index.levels[1]) + def setup(self, level, op): + levels = [np.arange(10), np.arange(100), np.arange(100)] + labels = [np.arange(10).repeat(10000), + np.tile(np.arange(100).repeat(100), 10), + np.tile(np.tile(np.arange(100), 100), 10)] + index = pd.MultiIndex(levels=levels, labels=labels) + df = pd.DataFrame(np.random.randn(len(index), 4), index=index) + self.df_func = getattr(df, op) - def time_stat_ops_level_series_sum(self): - self.df[1].sum(level=1) + def time_op(self, level, op): + self.df_func(level=level) -class stat_ops_level_series_sum_multiple(object): - goal_time = 0.2 - - def setup(self): - self.index = MultiIndex(levels=[np.arange(10), np.arange(100), np.arange(100)], labels=[np.arange(10).repeat(10000), np.tile(np.arange(100).repeat(100), 10), np.tile(np.tile(np.arange(100), 100), 10)]) - random.shuffle(self.index.values) - self.df = DataFrame(np.random.randn(len(self.index), 4), index=self.index) - self.df_level = DataFrame(np.random.randn(100, 4), index=self.index.levels[1]) - - def time_stat_ops_level_series_sum_multiple(self): - self.df[1].sum(level=[0, 1]) +class SeriesOps(object): - -class stat_ops_series_std(object): goal_time = 0.2 + params = [ops, ['float', 'int'], [True, False]] + param_names = ['op', 'dtype', 'use_bottleneck'] - def setup(self): - self.s = Series(np.random.randn(100000), index=np.arange(100000)) - self.s[::2] = np.nan - - def time_stat_ops_series_std(self): - self.s.std() + def setup(self, op, dtype, use_bottleneck): + s = pd.Series(np.random.randn(100000)).astype(dtype) + try: + pd.options.compute.use_bottleneck = use_bottleneck + except: + from pandas.core import nanops + nanops._USE_BOTTLENECK = use_bottleneck + self.s_func = getattr(s, op) + def time_op(self, op, dtype, use_bottleneck): + self.s_func() -class stats_corr_spearman(object): - goal_time = 0.2 - def setup(self): - self.df = DataFrame(np.random.randn(1000, 30)) +class SeriesMultiIndexOps(object): - def time_stats_corr_spearman(self): - self.df.corr(method='spearman') - - -class stats_rank2d_axis0_average(object): goal_time = 0.2 + params = ([0, 1, [0, 1]], ops) + param_names = ['level', 'op'] - def setup(self): - self.df = DataFrame(np.random.randn(5000, 50)) - - def time_stats_rank2d_axis0_average(self): - self.df.rank() + def setup(self, level, op): + levels = [np.arange(10), np.arange(100), np.arange(100)] + labels = [np.arange(10).repeat(10000), + np.tile(np.arange(100).repeat(100), 10), + np.tile(np.tile(np.arange(100), 100), 10)] + index = pd.MultiIndex(levels=levels, labels=labels) + s = pd.Series(np.random.randn(len(index)), index=index) + self.s_func = getattr(s, op) + def time_op(self, level, op): + self.s_func(level=level) -class stats_rank2d_axis1_average(object): - goal_time = 0.2 - - def setup(self): - self.df = DataFrame(np.random.randn(5000, 50)) - def time_stats_rank2d_axis1_average(self): - self.df.rank(1) +class Rank(object): - -class stats_rank_average(object): - goal_time = 0.2 - - def setup(self): - self.values = np.concatenate([np.arange(100000), np.random.randn(100000), np.arange(100000)]) - self.s = Series(self.values) - - def time_stats_rank_average(self): - self.s.rank() - - -class stats_rank_average_int(object): - goal_time = 0.2 - - def setup(self): - self.values = np.random.randint(0, 100000, size=200000) - self.s = Series(self.values) - - def time_stats_rank_average_int(self): - self.s.rank() - - -class stats_rank_pct_average(object): goal_time = 0.2 + params = [['DataFrame', 'Series'], [True, False]] + param_names = ['constructor', 'pct'] - def setup(self): - self.values = np.concatenate([np.arange(100000), np.random.randn(100000), np.arange(100000)]) - self.s = Series(self.values) + def setup(self, constructor, pct): + values = np.random.randn(10**5) + self.data = getattr(pd, constructor)(values) - def time_stats_rank_pct_average(self): - self.s.rank(pct=True) + def time_rank(self, constructor, pct): + self.data.rank(pct=pct) + def time_average_old(self, constructor, pct): + self.data.rank(pct=pct) / len(self.data) -class stats_rank_pct_average_old(object): - goal_time = 0.2 - - def setup(self): - self.values = np.concatenate([np.arange(100000), np.random.randn(100000), np.arange(100000)]) - self.s = Series(self.values) - - def time_stats_rank_pct_average_old(self): - (self.s.rank() / len(self.s)) +class Correlation(object): -class stats_rolling_mean(object): goal_time = 0.2 + params = ['spearman', 'kendall', 'pearson'] + param_names = ['method'] - def setup(self): - self.arr = np.random.randn(100000) - self.win = 100 - - def time_rolling_mean(self): - rolling_mean(self.arr, self.win) - - def time_rolling_median(self): - rolling_median(self.arr, self.win) - - def time_rolling_min(self): - rolling_min(self.arr, self.win) - - def time_rolling_max(self): - rolling_max(self.arr, self.win) - - def time_rolling_sum(self): - rolling_sum(self.arr, self.win) - - def time_rolling_std(self): - rolling_std(self.arr, self.win) - - def time_rolling_var(self): - rolling_var(self.arr, self.win) - - def time_rolling_skew(self): - rolling_skew(self.arr, self.win) + def setup(self, method): + self.df = pd.DataFrame(np.random.randn(1000, 30)) - def time_rolling_kurt(self): - rolling_kurt(self.arr, self.win) + def time_corr(self, method): + self.df.corr(method=method)
There were some old `pd.rolling_*` methods being tested in `stat_ops.py` that I moved to `rolling.py` (or should they just be removed?), otherwise the usual cleanup: ``` $ asv dev -b ^stat_ops · Discovering benchmarks · Running 7 total benchmarks (1 commits * 1 environments * 7 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 14.29%] ··· Running stat_ops.Correlation.time_corr ok [ 14.29%] ···· ========== ======== method ---------- -------- spearman 118ms kendall 693ms pearson 5.80ms ========== ======== [ 28.57%] ··· Running stat_ops.FrameMultiIndexOps.time_op ok [ 28.57%] ···· ======== ======== ======== ======== -- op -------- -------------------------- level mean sum median ======== ======== ======== ======== 0 8.40ms 8.51ms 21.8ms 1 8.57ms 8.52ms 22.6ms [0, 1] 17.3ms 17.0ms 31.9ms ======== ======== ======== ======== [ 42.86%] ··· Running stat_ops.FrameOps.time_op ok [ 42.86%] ···· ======== ================ ======= ======== ======== -- axis --------------------------------- ----------------- op use_bottleneck dtype 0 1 ======== ================ ======= ======== ======== mean True float 1.16ms 2.07ms mean True int 1.27ms 2.10ms mean False float 11.8ms 12.2ms mean False int 10.2ms 11.0ms sum True float 11.6ms 11.6ms sum True int 7.41ms 8.42ms sum False float 11.7ms 11.6ms sum False int 7.41ms 8.19ms median True float 6.86ms 6.05ms median True int 4.53ms 5.67ms median False float 23.4ms 7.45s median False int 24.9ms 7.44s std True float 1.95ms 4.51ms std True int 3.42ms 6.06ms std False float 23.2ms 26.6ms std False int 24.5ms 25.8ms ======== ================ ======= ======== ======== [ 57.14%] ··· Running stat_ops.Rank.time_average_old ok [ 57.14%] ···· ============= ======= ======= -- pct ------------- --------------- constructor True False ============= ======= ======= DataFrame 435ms 432ms Series 432ms 435ms ============= ======= ======= [ 71.43%] ··· Running stat_ops.Rank.time_rank ok [ 71.43%] ···· ============= ======== ======== -- pct ------------- ----------------- constructor True False ============= ======== ======== DataFrame 18.6ms 18.4ms Series 18.9ms 18.2ms ============= ======== ======== [ 85.71%] ··· Running stat_ops.SeriesMultiIndexOps.time_op ok [ 85.71%] ···· ======== ======== ======== ======== -- op -------- -------------------------- level mean sum median ======== ======== ======== ======== 0 21.2ms 20.3ms 23.6ms 1 21.0ms 21.0ms 25.0ms [0, 1] 15.2ms 15.6ms 18.9ms ======== ======== ======== ======== [100.00%] ··· Running stat_ops.SeriesOps.time_op ok [100.00%] ···· ======== ================ ======== ======== -- dtype ------------------------- ----------------- op use_bottleneck float int ======== ================ ======== ======== mean True 421μs 388μs mean False 2.01ms 2.20ms sum True 2.02ms 2.09ms sum False 2.01ms 2.04ms median True 2.23ms 1.30ms median False 6.25ms 6.73ms std True 603μs 951μs std False 3.26ms 3.67ms ======== ================ ======== ======== ``` ``` $ asv dev -b ^rolling.DepreciatedRolling · Discovering benchmarks · Running 1 total benchmarks (1 commits * 1 environments * 1 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [100.00%] ··· Running rolling.DepreciatedRolling.time_method ok [100.00%] ···· ================ ======== method ---------------- -------- rolling_median 88.8ms rolling_mean 11.0ms rolling_min 12.4ms rolling_max 12.1ms rolling_var 12.9ms rolling_skew 16.2ms rolling_kurt 16.0ms rolling_std 14.2ms ================ ======== [100.00%] ····· For parameters: 'rolling_median' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/rolling.py:56: FutureWarning: pd.rolling_median is deprecated for ndarrays and will be removed in a future version getattr(pd, method)(self.arr, self.win) For parameters: 'rolling_mean' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/rolling.py:56: FutureWarning: pd.rolling_mean is deprecated for ndarrays and will be removed in a future version getattr(pd, method)(self.arr, self.win) For parameters: 'rolling_min' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/rolling.py:56: FutureWarning: pd.rolling_min is deprecated for ndarrays and will be removed in a future version getattr(pd, method)(self.arr, self.win) For parameters: 'rolling_max' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/rolling.py:56: FutureWarning: pd.rolling_max is deprecated for ndarrays and will be removed in a future version getattr(pd, method)(self.arr, self.win) For parameters: 'rolling_var' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/rolling.py:56: FutureWarning: pd.rolling_var is deprecated for ndarrays and will be removed in a future version getattr(pd, method)(self.arr, self.win) For parameters: 'rolling_skew' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/rolling.py:56: FutureWarning: pd.rolling_skew is deprecated for ndarrays and will be removed in a future version getattr(pd, method)(self.arr, self.win) For parameters: 'rolling_kurt' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/rolling.py:56: FutureWarning: pd.rolling_kurt is deprecated for ndarrays and will be removed in a future version getattr(pd, method)(self.arr, self.win) For parameters: 'rolling_std' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/rolling.py:56: FutureWarning: pd.rolling_std is deprecated for ndarrays and will be removed in a future version getattr(pd, method)(self.arr, self.win) ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19049
2018-01-03T05:41:26Z
2018-01-06T17:19:13Z
2018-01-06T17:19:13Z
2018-01-07T01:22:21Z
BUG: Index constructor does not maintain CategoricalDtype
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index bd3bee507baa3..1ba92f41edc89 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -300,6 +300,7 @@ Conversion - Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`) - Bug in :class:`DatetimeIndex` where adding or subtracting an array-like of ``DateOffset`` objects either raised (``np.array``, ``pd.Index``) or broadcast incorrectly (``pd.Series``) (:issue:`18849`) - Bug in :class:`Series` floor-division where operating on a scalar ``timedelta`` raises an exception (:issue:`18846`) +- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) Indexing diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py index 52c4a1ad9865a..55a26d57fa1d6 100644 --- a/pandas/core/indexes/base.py +++ b/pandas/core/indexes/base.py @@ -197,13 +197,13 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, # categorical if is_categorical_dtype(data) or is_categorical_dtype(dtype): from .category import CategoricalIndex - return CategoricalIndex(data, copy=copy, name=name, **kwargs) + return CategoricalIndex(data, dtype=dtype, copy=copy, name=name, + **kwargs) # interval - if is_interval_dtype(data): + if is_interval_dtype(data) or is_interval_dtype(dtype): from .interval import IntervalIndex - return IntervalIndex.from_intervals(data, name=name, - copy=copy) + return IntervalIndex(data, dtype=dtype, name=name, copy=copy) # index-like elif isinstance(data, (np.ndarray, Index, ABCSeries)): diff --git a/pandas/tests/indexes/test_category.py b/pandas/tests/indexes/test_category.py index f7328a99195b9..dc4f60ce5f0f1 100644 --- a/pandas/tests/indexes/test_category.py +++ b/pandas/tests/indexes/test_category.py @@ -137,18 +137,27 @@ def test_construction_with_categorical_dtype(self): data, cats, ordered = 'a a b b'.split(), 'c b a'.split(), True dtype = CategoricalDtype(categories=cats, ordered=ordered) - result = pd.CategoricalIndex(data, dtype=dtype) - expected = pd.CategoricalIndex(data, categories=cats, - ordered=ordered) + result = CategoricalIndex(data, dtype=dtype) + expected = CategoricalIndex(data, categories=cats, ordered=ordered) tm.assert_index_equal(result, expected, exact=True) - # error to combine categories or ordered and dtype keywords args - with pytest.raises(ValueError, match="Cannot specify both `dtype` and " - "`categories` or `ordered`."): - pd.CategoricalIndex(data, categories=cats, dtype=dtype) - with pytest.raises(ValueError, match="Cannot specify both `dtype` and " - "`categories` or `ordered`."): - pd.CategoricalIndex(data, ordered=ordered, dtype=dtype) + # GH 19032 + result = Index(data, dtype=dtype) + tm.assert_index_equal(result, expected, exact=True) + + # error when combining categories/ordered and dtype kwargs + msg = 'Cannot specify both `dtype` and `categories` or `ordered`.' + with pytest.raises(ValueError, match=msg): + CategoricalIndex(data, categories=cats, dtype=dtype) + + with pytest.raises(ValueError, match=msg): + Index(data, categories=cats, dtype=dtype) + + with pytest.raises(ValueError, match=msg): + CategoricalIndex(data, ordered=ordered, dtype=dtype) + + with pytest.raises(ValueError, match=msg): + Index(data, ordered=ordered, dtype=dtype) def test_create_categorical(self): # https://github.com/pandas-dev/pandas/pull/17513
- [X] closes #19032 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [X] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19048
2018-01-03T05:36:50Z
2018-01-03T11:13:16Z
2018-01-03T11:13:16Z
2018-01-03T15:20:48Z
CLN: ASV sparse
diff --git a/asv_bench/benchmarks/sparse.py b/asv_bench/benchmarks/sparse.py index a46205026481e..dcb7694abc2ad 100644 --- a/asv_bench/benchmarks/sparse.py +++ b/asv_bench/benchmarks/sparse.py @@ -1,211 +1,162 @@ import itertools -from .pandas_vb_common import * +import numpy as np import scipy.sparse -from pandas import SparseSeries, SparseDataFrame, SparseArray +from pandas import (SparseSeries, SparseDataFrame, SparseArray, Series, + date_range, MultiIndex) +from .pandas_vb_common import setup # noqa -class sparse_series_to_frame(object): - goal_time = 0.2 - def setup(self): - self.K = 50 - self.N = 50000 - self.rng = np.asarray(date_range('1/1/2000', periods=self.N, freq='T')) - self.series = {} - for i in range(1, (self.K + 1)): - self.data = np.random.randn(self.N)[:(- i)] - self.this_rng = self.rng[:(- i)] - self.data[100:] = np.nan - self.series[i] = SparseSeries(self.data, index=self.this_rng) +def make_array(size, dense_proportion, fill_value, dtype): + dense_size = int(size * dense_proportion) + arr = np.full(size, fill_value, dtype) + indexer = np.random.choice(np.arange(size), dense_size, replace=False) + arr[indexer] = np.random.choice(np.arange(100, dtype=dtype), dense_size) + return arr - def time_sparse_series_to_frame(self): - SparseDataFrame(self.series) +class SparseSeriesToFrame(object): -class sparse_array_constructor(object): goal_time = 0.2 def setup(self): - np.random.seed(1) - self.int64_10percent = self.make_numeric_array(length=1000000, dense_size=100000, fill_value=0, dtype=np.int64) - self.int64_1percent = self.make_numeric_array(length=1000000, dense_size=10000, fill_value=0, dtype=np.int64) - - self.float64_10percent = self.make_numeric_array(length=1000000, dense_size=100000, fill_value=np.nan, dtype=np.float64) - self.float64_1percent = self.make_numeric_array(length=1000000, dense_size=10000, fill_value=np.nan, dtype=np.float64) - - self.object_nan_fill_value_10percent = self.make_object_array(length=1000000, dense_size=100000, fill_value=np.nan) - self.object_nan_fill_value_1percent = self.make_object_array(length=1000000, dense_size=10000, fill_value=np.nan) - - self.object_non_nan_fill_value_10percent = self.make_object_array(length=1000000, dense_size=100000, fill_value=0) - self.object_non_nan_fill_value_1percent = self.make_object_array(length=1000000, dense_size=10000, fill_value=0) - - def make_numeric_array(self, length, dense_size, fill_value, dtype): - arr = np.array([fill_value] * length, dtype=dtype) - indexer = np.unique(np.random.randint(0, length, dense_size)) - arr[indexer] = np.random.randint(0, 100, len(indexer)) - return (arr, fill_value, dtype) - - def make_object_array(self, length, dense_size, fill_value): - elems = np.array(['a', 0.0, False, 1, 2], dtype=np.object) - arr = np.array([fill_value] * length, dtype=np.object) - indexer = np.unique(np.random.randint(0, length, dense_size)) - arr[indexer] = np.random.choice(elems, len(indexer)) - return (arr, fill_value, np.object) - - def time_sparse_array_constructor_int64_10percent(self): - arr, fill_value, dtype = self.int64_10percent - SparseArray(arr, fill_value=fill_value, dtype=dtype) - - def time_sparse_array_constructor_int64_1percent(self): - arr, fill_value, dtype = self.int64_1percent - SparseArray(arr, fill_value=fill_value, dtype=dtype) - - def time_sparse_array_constructor_float64_10percent(self): - arr, fill_value, dtype = self.float64_10percent - SparseArray(arr, fill_value=fill_value, dtype=dtype) - - def time_sparse_array_constructor_float64_1percent(self): - arr, fill_value, dtype = self.float64_1percent - SparseArray(arr, fill_value=fill_value, dtype=dtype) - - def time_sparse_array_constructor_object_nan_fill_value_10percent(self): - arr, fill_value, dtype = self.object_nan_fill_value_10percent - SparseArray(arr, fill_value=fill_value, dtype=dtype) - - def time_sparse_array_constructor_object_nan_fill_value_1percent(self): - arr, fill_value, dtype = self.object_nan_fill_value_1percent - SparseArray(arr, fill_value=fill_value, dtype=dtype) + K = 50 + N = 50001 + rng = date_range('1/1/2000', periods=N, freq='T') + self.series = {} + for i in range(1, K): + data = np.random.randn(N)[:-i] + idx = rng[:-i] + data[100:] = np.nan + self.series[i] = SparseSeries(data, index=idx) - def time_sparse_array_constructor_object_non_nan_fill_value_10percent(self): - arr, fill_value, dtype = self.object_non_nan_fill_value_10percent - SparseArray(arr, fill_value=fill_value, dtype=dtype) + def time_series_to_frame(self): + SparseDataFrame(self.series) - def time_sparse_array_constructor_object_non_nan_fill_value_1percent(self): - arr, fill_value, dtype = self.object_non_nan_fill_value_1percent - SparseArray(arr, fill_value=fill_value, dtype=dtype) +class SparseArrayConstructor(object): -class sparse_frame_constructor(object): goal_time = 0.2 + params = ([0.1, 0.01], [0, np.nan], + [np.int64, np.float64, np.object]) + param_names = ['dense_proportion', 'fill_value', 'dtype'] - def time_sparse_frame_constructor(self): - SparseDataFrame(columns=np.arange(100), index=np.arange(1000)) + def setup(self, dense_proportion, fill_value, dtype): + N = 10**6 + self.array = make_array(N, dense_proportion, fill_value, dtype) - def time_sparse_from_scipy(self): - SparseDataFrame(scipy.sparse.rand(1000, 1000, 0.005)) + def time_sparse_array(self, dense_proportion, fill_value, dtype): + SparseArray(self.array, fill_value=fill_value, dtype=dtype) - def time_sparse_from_dict(self): - SparseDataFrame(dict(zip(range(1000), itertools.repeat([0])))) +class SparseDataFrameConstructor(object): -class sparse_series_from_coo(object): goal_time = 0.2 def setup(self): - self.A = scipy.sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(100, 100)) + N = 1000 + self.arr = np.arange(N) + self.sparse = scipy.sparse.rand(N, N, 0.005) + self.dict = dict(zip(range(N), itertools.repeat([0]))) - def time_sparse_series_from_coo(self): - self.ss = SparseSeries.from_coo(self.A) + def time_constructor(self): + SparseDataFrame(columns=self.arr, index=self.arr) + def time_from_scipy(self): + SparseDataFrame(self.sparse) -class sparse_series_to_coo(object): - goal_time = 0.2 + def time_from_dict(self): + SparseDataFrame(self.dict) - def setup(self): - self.s = pd.Series(([np.nan] * 10000)) - self.s[0] = 3.0 - self.s[100] = (-1.0) - self.s[999] = 12.1 - self.s.index = pd.MultiIndex.from_product((range(10), range(10), range(10), range(10))) - self.ss = self.s.to_sparse() - - def time_sparse_series_to_coo(self): - self.ss.to_coo(row_levels=[0, 1], column_levels=[2, 3], sort_labels=True) +class FromCoo(object): -class sparse_arithmetic_int(object): goal_time = 0.2 def setup(self): - np.random.seed(1) - self.a_10percent = self.make_sparse_array(length=1000000, dense_size=100000, fill_value=np.nan) - self.b_10percent = self.make_sparse_array(length=1000000, dense_size=100000, fill_value=np.nan) - - self.a_10percent_zero = self.make_sparse_array(length=1000000, dense_size=100000, fill_value=0) - self.b_10percent_zero = self.make_sparse_array(length=1000000, dense_size=100000, fill_value=0) - - self.a_1percent = self.make_sparse_array(length=1000000, dense_size=10000, fill_value=np.nan) - self.b_1percent = self.make_sparse_array(length=1000000, dense_size=10000, fill_value=np.nan) - - def make_sparse_array(self, length, dense_size, fill_value): - arr = np.array([fill_value] * length, dtype=np.float64) - indexer = np.unique(np.random.randint(0, length, dense_size)) - arr[indexer] = np.random.randint(0, 100, len(indexer)) - return pd.SparseArray(arr, fill_value=fill_value) - - def time_sparse_make_union(self): - self.a_10percent.sp_index.make_union(self.b_10percent.sp_index) + self.matrix = scipy.sparse.coo_matrix(([3.0, 1.0, 2.0], + ([1, 0, 0], [0, 2, 3])), + shape=(100, 100)) - def time_sparse_intersect(self): - self.a_10percent.sp_index.intersect(self.b_10percent.sp_index) - - def time_sparse_addition_10percent(self): - self.a_10percent + self.b_10percent + def time_sparse_series_from_coo(self): + SparseSeries.from_coo(self.matrix) - def time_sparse_addition_10percent_zero(self): - self.a_10percent_zero + self.b_10percent_zero - def time_sparse_addition_1percent(self): - self.a_1percent + self.b_1percent +class ToCoo(object): - def time_sparse_division_10percent(self): - self.a_10percent / self.b_10percent + goal_time = 0.2 - def time_sparse_division_10percent_zero(self): - self.a_10percent_zero / self.b_10percent_zero + def setup(self): + s = Series([np.nan] * 10000) + s[0] = 3.0 + s[100] = -1.0 + s[999] = 12.1 + s.index = MultiIndex.from_product([range(10)] * 4) + self.ss = s.to_sparse() - def time_sparse_division_1percent(self): - self.a_1percent / self.b_1percent + def time_sparse_series_to_coo(self): + self.ss.to_coo(row_levels=[0, 1], + column_levels=[2, 3], + sort_labels=True) +class Arithmetic(object): -class sparse_arithmetic_block(object): goal_time = 0.2 + params = ([0.1, 0.01], [0, np.nan]) + param_names = ['dense_proportion', 'fill_value'] - def setup(self): - np.random.seed(1) - self.a = self.make_sparse_array(length=1000000, num_blocks=1000, - block_size=10, fill_value=np.nan) - self.b = self.make_sparse_array(length=1000000, num_blocks=1000, - block_size=10, fill_value=np.nan) - - self.a_zero = self.make_sparse_array(length=1000000, num_blocks=1000, - block_size=10, fill_value=0) - self.b_zero = self.make_sparse_array(length=1000000, num_blocks=1000, - block_size=10, fill_value=np.nan) + def setup(self, dense_proportion, fill_value): + N = 10**6 + arr1 = make_array(N, dense_proportion, fill_value, np.int64) + self.array1 = SparseArray(arr1, fill_value=fill_value) + arr2 = make_array(N, dense_proportion, fill_value, np.int64) + self.array2 = SparseArray(arr2, fill_value=fill_value) - def make_sparse_array(self, length, num_blocks, block_size, fill_value): - a = np.array([fill_value] * length) - for block in range(num_blocks): - i = np.random.randint(0, length) - a[i:i + block_size] = np.random.randint(0, 100, len(a[i:i + block_size])) - return pd.SparseArray(a, fill_value=fill_value) + def time_make_union(self, dense_proportion, fill_value): + self.array1.sp_index.make_union(self.array2.sp_index) - def time_sparse_make_union(self): - self.a.sp_index.make_union(self.b.sp_index) + def time_intersect(self, dense_proportion, fill_value): + self.array1.sp_index.intersect(self.array2.sp_index) - def time_sparse_intersect(self): - self.a.sp_index.intersect(self.b.sp_index) + def time_add(self, dense_proportion, fill_value): + self.array1 + self.array2 - def time_sparse_addition(self): - self.a + self.b + def time_divide(self, dense_proportion, fill_value): + self.array1 / self.array2 - def time_sparse_addition_zero(self): - self.a_zero + self.b_zero - def time_sparse_division(self): - self.a / self.b +class ArithmeticBlock(object): - def time_sparse_division_zero(self): - self.a_zero / self.b_zero + goal_time = 0.2 + params = [np.nan, 0] + param_names = ['fill_value'] + + def setup(self, fill_value): + N = 10**6 + self.arr1 = self.make_block_array(length=N, num_blocks=1000, + block_size=10, fill_value=fill_value) + self.arr2 = self.make_block_array(length=N, num_blocks=1000, + block_size=10, fill_value=fill_value) + + def make_block_array(self, length, num_blocks, block_size, fill_value): + arr = np.full(length, fill_value) + indicies = np.random.choice(np.arange(0, length, block_size), + num_blocks, + replace=False) + for ind in indicies: + arr[ind:ind + block_size] = np.random.randint(0, 100, block_size) + return SparseArray(arr, fill_value=fill_value) + + def time_make_union(self, fill_value): + self.arr1.sp_index.make_union(self.arr2.sp_index) + + def time_intersect(self, fill_value): + self.arr2.sp_index.intersect(self.arr2.sp_index) + + def time_addition(self, fill_value): + self.arr1 + self.arr2 + + def time_division(self, fill_value): + self.arr1 / self.arr2
Simplified and created a top level function that made sparse arrays (which was repeated throughout the file). Otherwise, simplified benchmarks with `param` where possible. ``` $ asv dev -b ^sparse · Discovering benchmarks · Running 15 total benchmarks (1 commits * 1 environments * 15 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 6.67%] ··· Running sparse.Arithmetic.time_add ok [ 6.67%] ···· ================== ======== ======== -- fill_value ------------------ ----------------- dense_proportion 0 nan ================== ======== ======== 0.1 85.6ms 68.0ms 0.01 8.46ms 67.5ms ================== ======== ======== [ 13.33%] ··· Running sparse.Arithmetic.time_divide ok [ 13.33%] ···· ================== ======== ======== -- fill_value ------------------ ----------------- dense_proportion 0 nan ================== ======== ======== 0.1 82.7ms 67.6ms 0.01 8.94ms 67.6ms ================== ======== ======== [ 20.00%] ··· Running sparse.Arithmetic.time_intersect ok [ 20.00%] ···· ================== ======== ======= -- fill_value ------------------ ---------------- dense_proportion 0 nan ================== ======== ======= 0.1 5.72ms 344ms 0.01 356μs 343ms ================== ======== ======= [ 26.67%] ··· Running sparse.Arithmetic.time_make_union ok [ 26.67%] ···· ================== ======== ======= -- fill_value ------------------ ---------------- dense_proportion 0 nan ================== ======== ======= 0.1 79.4ms 715ms 0.01 8.63ms 742ms ================== ======== ======= [ 33.33%] ··· Running sparse.ArithmeticBlock.time_addition ok [ 33.33%] ···· ============ ======== fill_value ------------ -------- nan 8.20ms 0 8.03ms ============ ======== [ 40.00%] ··· Running sparse.ArithmeticBlock.time_division ok [ 40.00%] ···· ============ ======== fill_value ------------ -------- nan 8.39ms 0 8.04ms ============ ======== [ 46.67%] ··· Running sparse.ArithmeticBlock.time_intersect ok [ 46.67%] ···· ============ ======== fill_value ------------ -------- nan 3.51ms 0 3.69ms ============ ======== [ 53.33%] ··· Running sparse.ArithmeticBlock.time_make_union ok [ 53.33%] ···· ============ ======== fill_value ------------ -------- nan 7.86ms 0 8.19ms ============ ======== [ 60.00%] ··· Running sparse.FromCoo.time_sparse_series_from_coo 3.30ms [ 66.67%] ··· Running ...se.SparseArrayConstructor.time_sparse_array ok [ 66.67%] ···· ================== ============ ======================== ======== dense_proportion fill_value dtype ------------------ ------------ ------------------------ -------- 0.1 0 <type 'numpy.int64'> 46.7ms 0.1 0 <type 'numpy.float64'> 47.1ms 0.1 0 <type 'object'> 104ms 0.1 nan <type 'numpy.int64'> 364ms 0.1 nan <type 'numpy.float64'> 47.7ms 0.1 nan <type 'object'> 116ms 0.01 0 <type 'numpy.int64'> 8.80ms 0.01 0 <type 'numpy.float64'> 8.71ms 0.01 0 <type 'object'> 67.5ms 0.01 nan <type 'numpy.int64'> 380ms 0.01 nan <type 'numpy.float64'> 9.57ms 0.01 nan <type 'object'> 64.7ms ================== ============ ======================== ======== [ 73.33%] ··· Running ...SparseDataFrameConstructor.time_constructor 6.39s [ 80.00%] ··· Running ...e.SparseDataFrameConstructor.time_from_dict 245ms [ 86.67%] ··· Running ....SparseDataFrameConstructor.time_from_scipy 563ms [ 93.33%] ··· Running ...se.SparseSeriesToFrame.time_series_to_frame 280ms [100.00%] ··· Running sparse.ToCoo.time_sparse_series_to_coo 43.9ms ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19047
2018-01-03T04:54:05Z
2018-01-03T11:16:35Z
2018-01-03T11:16:35Z
2020-02-19T18:46:25Z
CLN: ASV series_methods
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py index 0ce003d1a9277..478aba278029c 100644 --- a/asv_bench/benchmarks/series_methods.py +++ b/asv_bench/benchmarks/series_methods.py @@ -1,185 +1,119 @@ -from .pandas_vb_common import * +from datetime import datetime +import numpy as np +import pandas.util.testing as tm +from pandas import Series, date_range, NaT -class series_constructor_no_data_datetime_index(object): - goal_time = 0.2 - - def setup(self): - self.dr = pd.date_range( - start=datetime(2015,10,26), - end=datetime(2016,1,1), - freq='50s' - ) # ~100k long - - def time_series_constructor_no_data_datetime_index(self): - Series(data=None, index=self.dr) - - -class series_constructor_dict_data_datetime_index(object): - goal_time = 0.2 - - def setup(self): - self.dr = pd.date_range( - start=datetime(2015, 10, 26), - end=datetime(2016, 1, 1), - freq='50s' - ) # ~100k long - self.data = {d: v for d, v in zip(self.dr, range(len(self.dr)))} +from .pandas_vb_common import setup # noqa - def time_series_constructor_no_data_datetime_index(self): - Series(data=self.data, index=self.dr) +class SeriesConstructor(object): -class series_isin_int64(object): goal_time = 0.2 + params = [None, 'dict'] + param_names = ['data'] - def setup(self): - self.s3 = Series(np.random.randint(1, 10, 100000)).astype('int64') - self.s4 = Series(np.random.randint(1, 100, 10000000)).astype('int64') - self.values = [1, 2] + def setup(self, data): + self.idx = date_range(start=datetime(2015, 10, 26), + end=datetime(2016, 1, 1), + freq='50s') + dict_data = dict(zip(self.idx, range(len(self.idx)))) + self.data = None if data is None else dict_data - def time_series_isin_int64(self): - self.s3.isin(self.values) + def time_constructor(self, data): + Series(data=self.data, index=self.idx) - def time_series_isin_int64_large(self): - self.s4.isin(self.values) +class IsIn(object): -class series_isin_object(object): goal_time = 0.2 + params = ['int64', 'object'] + param_names = ['dtype'] - def setup(self): - self.s3 = Series(np.random.randint(1, 10, 100000)).astype('int64') + def setup(self, dtype): + self.s = Series(np.random.randint(1, 10, 100000)).astype(dtype) self.values = [1, 2] - self.s4 = self.s3.astype('object') - def time_series_isin_object(self): - self.s4.isin(self.values) + def time_isin(self, dtypes): + self.s.isin(self.values) -class series_nlargest1(object): - goal_time = 0.2 - - def setup(self): - self.s1 = Series(np.random.randn(10000)) - self.s2 = Series(np.random.randint(1, 10, 10000)) - self.s3 = Series(np.random.randint(1, 10, 100000)).astype('int64') - self.values = [1, 2] - self.s4 = self.s3.astype('object') - - def time_series_nlargest1(self): - self.s1.nlargest(3, keep='last') - self.s1.nlargest(3, keep='first') - +class NSort(object): -class series_nlargest2(object): goal_time = 0.2 + params = ['last', 'first'] + param_names = ['keep'] - def setup(self): - self.s1 = Series(np.random.randn(10000)) - self.s2 = Series(np.random.randint(1, 10, 10000)) - self.s3 = Series(np.random.randint(1, 10, 100000)).astype('int64') - self.values = [1, 2] - self.s4 = self.s3.astype('object') - - def time_series_nlargest2(self): - self.s2.nlargest(3, keep='last') - self.s2.nlargest(3, keep='first') + def setup(self, keep): + self.s = Series(np.random.randint(1, 10, 100000)) + def time_nlargest(self, keep): + self.s.nlargest(3, keep=keep) -class series_nsmallest2(object): - goal_time = 0.2 + def time_nsmallest(self, keep): + self.s.nsmallest(3, keep=keep) - def setup(self): - self.s1 = Series(np.random.randn(10000)) - self.s2 = Series(np.random.randint(1, 10, 10000)) - self.s3 = Series(np.random.randint(1, 10, 100000)).astype('int64') - self.values = [1, 2] - self.s4 = self.s3.astype('object') - def time_series_nsmallest2(self): - self.s2.nsmallest(3, keep='last') - self.s2.nsmallest(3, keep='first') +class Dropna(object): - -class series_dropna_int64(object): goal_time = 0.2 - - def setup(self): - self.s = Series(np.random.randint(1, 10, 1000000)) - - def time_series_dropna_int64(self): + params = ['int', 'datetime'] + param_names = ['dtype'] + + def setup(self, dtype): + N = 10**6 + data = {'int': np.random.randint(1, 10, N), + 'datetime': date_range('2000-01-01', freq='S', periods=N)} + self.s = Series(data[dtype]) + if dtype == 'datetime': + self.s[np.random.randint(1, N, 100)] = NaT + + def time_dropna(self, dtype): self.s.dropna() -class series_dropna_datetime(object): - goal_time = 0.2 - - def setup(self): - self.s = Series(pd.date_range('2000-01-01', freq='S', periods=1000000)) - self.s[np.random.randint(1, 1000000, 100)] = pd.NaT - - def time_series_dropna_datetime(self): - self.s.dropna() - +class Map(object): -class series_map_dict(object): goal_time = 0.2 + params = ['dict', 'Series'] + param_names = 'mapper' - def setup(self): + def setup(self, mapper): map_size = 1000 + map_data = Series(map_size - np.arange(map_size)) + self.map_data = map_data if mapper == 'Series' else map_data.to_dict() self.s = Series(np.random.randint(0, map_size, 10000)) - self.map_dict = {i: map_size - i for i in range(map_size)} - def time_series_map_dict(self): - self.s.map(self.map_dict) + def time_map(self, mapper): + self.s.map(self.map_data) -class series_map_series(object): - goal_time = 0.2 +class Clip(object): - def setup(self): - map_size = 1000 - self.s = Series(np.random.randint(0, map_size, 10000)) - self.map_series = Series(map_size - np.arange(map_size)) - - def time_series_map_series(self): - self.s.map(self.map_series) - - -class series_clip(object): goal_time = 0.2 def setup(self): - self.s = pd.Series(np.random.randn(50)) + self.s = Series(np.random.randn(50)) - def time_series_dropna_datetime(self): + def time_clip(self): self.s.clip(0, 1) -class series_value_counts(object): - goal_time = 0.2 +class ValueCounts(object): - def setup(self): - self.s = Series(np.random.randint(0, 1000, size=100000)) - self.s2 = self.s.astype(float) + goal_time = 0.2 + params = ['int', 'float', 'object'] + param_names = ['dtype'] - self.K = 1000 - self.N = 100000 - self.uniques = tm.makeStringIndex(self.K).values - self.s3 = Series(np.tile(self.uniques, (self.N // self.K))) + def setup(self, dtype): + self.s = Series(np.random.randint(0, 1000, size=100000)).astype(dtype) - def time_value_counts_int64(self): + def time_value_counts(self, dtype): self.s.value_counts() - def time_value_counts_float64(self): - self.s2.value_counts() - - def time_value_counts_strings(self): - self.s.value_counts() +class Dir(object): -class series_dir(object): goal_time = 0.2 def setup(self):
Utilized `param` and cleaned up some benchmarks. ``` asv dev -b ^series_methods · Discovering benchmarks · Running 9 total benchmarks (1 commits * 1 environments * 9 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 11.11%] ··· Running series_methods.Clip.time_clip 290μs [ 22.22%] ··· Running series_methods.Dir.time_dir_strings 30.5ms [ 33.33%] ··· Running series_methods.Dropna.time_dropna ok [ 33.33%] ···· ========== ======== dtype ---------- -------- int 4.20ms datetime 19.4ms ========== ======== [ 44.44%] ··· Running series_methods.IsIn.time_isin ok [ 44.44%] ···· ======== ======== dtype -------- -------- int64 2.63ms object 5.03ms ======== ======== [ 55.56%] ··· Running series_methods.Map.time_map ok [ 55.56%] ···· ======== ======== m -------- -------- dict 3.54ms Series 2.31ms ======== ======== [ 66.67%] ··· Running series_methods.NSort.time_nlargest ok [ 66.67%] ···· ======= ======== keep ------- -------- last 5.20ms first 5.53ms ======= ======== [ 77.78%] ··· Running series_methods.NSort.time_nsmallest ok [ 77.78%] ···· ======= ======== keep ------- -------- last 4.19ms first 5.00ms ======= ======== [ 88.89%] ··· Running ..._methods.SeriesConstructor.time_constructor ok [ 88.89%] ···· ====== ======= data ------ ------- None 726μs dict 1.56s ====== ======= [100.00%] ··· Running series_methods.ValueCounts.time_value_counts ok [100.00%] ···· ======== ======== dtype -------- -------- int 4.63ms float 7.00ms object 24.7ms ======== ======== ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19046
2018-01-03T04:45:04Z
2018-01-03T11:17:30Z
2018-01-03T11:17:30Z
2018-01-03T17:40:46Z
TST: Add tests for Categorical.is_dtype_equal against Series
diff --git a/pandas/tests/categorical/test_dtypes.py b/pandas/tests/categorical/test_dtypes.py index bad2c27026b31..8973d1196f6a9 100644 --- a/pandas/tests/categorical/test_dtypes.py +++ b/pandas/tests/categorical/test_dtypes.py @@ -6,7 +6,7 @@ import pandas.util.testing as tm from pandas.core.dtypes.dtypes import CategoricalDtype -from pandas import Categorical, Index, CategoricalIndex +from pandas import Categorical, Index, CategoricalIndex, Series class TestCategoricalDtypes(object): @@ -30,6 +30,17 @@ def test_is_equal_dtype(self): CategoricalIndex(c1, categories=list('cab')))) assert not c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)) + # GH 16659 + s1 = Series(c1) + s2 = Series(c2) + s3 = Series(c3) + assert c1.is_dtype_equal(s1) + assert c2.is_dtype_equal(s2) + assert c3.is_dtype_equal(s3) + assert c1.is_dtype_equal(s2) + assert not c1.is_dtype_equal(s3) + assert not c1.is_dtype_equal(s1.astype(object)) + def test_set_dtype_same(self): c = Categorical(['a', 'b', 'c']) result = c._set_dtype(CategoricalDtype(['a', 'b', 'c']))
- [X] closes #16659 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [ ] whatsnew entry Issue is already fixed on 0.22.0 (so really probably fixed in 0.21.x), just adding tests to ensure that there's not a regression. Didn't add a whatsnew entry since this behavior already exists in a released version, but can add one if need be.
https://api.github.com/repos/pandas-dev/pandas/pulls/19045
2018-01-03T01:49:19Z
2018-01-03T11:20:21Z
2018-01-03T11:20:20Z
2018-01-03T15:21:00Z
Tests for TDI issues already fixed
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index a62a737fbba31..9f05f9e14125f 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -312,7 +312,11 @@ Conversion - Bug in :class:`Series` floor-division where operating on a scalar ``timedelta`` raises an exception (:issue:`18846`) - Bug in :class:`FY5253Quarter`, :class:`LastWeekOfMonth` where rollback and rollforward behavior was inconsistent with addition and subtraction behavior (:issue:`18854`) - Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) - +- Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) +- Bug in :class:`Series`` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` had results cast to ``dtype='int64'`` (:issue:`17250`) +- Bug in :class:`TimedeltaIndex` where division by a ``Series`` would return a ``TimedeltaIndex`` instead of a ``Series`` (issue:`19042`) +- Bug in :class:`Series` with ``dtype='timedelta64[ns]`` where addition or subtraction of ``TimedeltaIndex`` could return a ``Series`` with an incorrect name (issue:`19043`) +- Indexing ^^^^^^^^ diff --git a/pandas/core/indexes/timedeltas.py b/pandas/core/indexes/timedeltas.py index d28a09225e8b8..39ca4f5c8fbdf 100644 --- a/pandas/core/indexes/timedeltas.py +++ b/pandas/core/indexes/timedeltas.py @@ -371,6 +371,9 @@ def _add_delta(self, delta): return result def _evaluate_with_timedelta_like(self, other, op, opstr): + if isinstance(other, ABCSeries): + # GH#19042 + return NotImplemented # allow division by a timedelta if opstr in ['__div__', '__truediv__', '__floordiv__']: diff --git a/pandas/core/ops.py b/pandas/core/ops.py index 554f0cb3803e9..99f7e7309d463 100644 --- a/pandas/core/ops.py +++ b/pandas/core/ops.py @@ -597,7 +597,15 @@ def _align_method_SERIES(left, right, align_asobject=False): def _construct_result(left, result, index, name, dtype): - return left._constructor(result, index=index, name=name, dtype=dtype) + """ + If the raw op result has a non-None name (e.g. it is an Index object) and + the name argument is None, then passing name to the constructor will + not be enough; we still need to override the name attribute. + """ + out = left._constructor(result, index=index, dtype=dtype) + + out.name = name + return out def _construct_divmod_result(left, result, index, name, dtype): @@ -687,21 +695,10 @@ def wrapper(left, right, name=name, na_op=na_op): not isinstance(lvalues, ABCDatetimeIndex)): lvalues = lvalues.values - if isinstance(right, (ABCSeries, pd.Index)): - # `left` is always a Series object - res_name = _maybe_match_name(left, right) - else: - res_name = left.name - result = wrap_results(safe_na_op(lvalues, rvalues)) res_name = _get_series_op_result_name(left, right) - return construct_result( - left, - result, - index=left.index, - name=res_name, - dtype=dtype, - ) + return construct_result(left, result, + index=left.index, name=res_name, dtype=dtype) return wrapper diff --git a/pandas/tests/series/test_operators.py b/pandas/tests/series/test_operators.py index 2350477c2302a..dbfeb9715c59e 100644 --- a/pandas/tests/series/test_operators.py +++ b/pandas/tests/series/test_operators.py @@ -1045,6 +1045,74 @@ def test_timedelta_floordiv(self, scalar_td): expected = Series([0, 0, np.nan]) tm.assert_series_equal(result, expected) + @pytest.mark.parametrize('names', [(None, None, None), + ('Egon', 'Venkman', None), + ('NCC1701D', 'NCC1701D', 'NCC1701D')]) + def test_td64_series_with_tdi(self, names): + # GH#17250 make sure result dtype is correct + # GH#19043 make sure names are propogated correctly + tdi = pd.TimedeltaIndex(['0 days', '1 day'], name=names[0]) + ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1]) + expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)], + name=names[2]) + + result = tdi + ser + tm.assert_series_equal(result, expected) + assert result.dtype == 'timedelta64[ns]' + + result = ser + tdi + tm.assert_series_equal(result, expected) + assert result.dtype == 'timedelta64[ns]' + + expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)], + name=names[2]) + + result = tdi - ser + tm.assert_series_equal(result, expected) + assert result.dtype == 'timedelta64[ns]' + + result = ser - tdi + tm.assert_series_equal(result, -expected) + assert result.dtype == 'timedelta64[ns]' + + @pytest.mark.parametrize('names', [(None, None, None), + ('Egon', 'Venkman', None), + ('NCC1701D', 'NCC1701D', 'NCC1701D')]) + def test_tdi_mul_int_series(self, names): + # GH#19042 + tdi = pd.TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'], + name=names[0]) + ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1]) + + expected = Series(['0days', '1day', '4days', '9days', '16days'], + dtype='timedelta64[ns]', + name=names[2]) + + result = ser * tdi + tm.assert_series_equal(result, expected) + + # The direct operation tdi * ser still needs to be fixed. + result = ser.__rmul__(tdi) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize('names', [(None, None, None), + ('Egon', 'Venkman', None), + ('NCC1701D', 'NCC1701D', 'NCC1701D')]) + def test_float_series_rdiv_tdi(self, names): + # GH#19042 + # TODO: the direct operation TimedeltaIndex / Series still + # needs to be fixed. + tdi = pd.TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'], + name=names[0]) + ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1]) + + expected = Series([tdi[n] / ser[n] for n in range(len(ser))], + dtype='timedelta64[ns]', + name=names[2]) + + result = ser.__rdiv__(tdi) + tm.assert_series_equal(result, expected) + class TestDatetimeSeriesArithmetic(object): @pytest.mark.parametrize(
Fix a couple others caused by failing to return NotImplemented or to override names correctly. closes #17250 xref #19042 closes #19043 - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19044
2018-01-03T01:04:07Z
2018-01-05T19:16:23Z
2018-01-05T19:16:23Z
2018-01-05T19:20:34Z
read_json support for orient="table"
diff --git a/doc/source/io.rst b/doc/source/io.rst index 2ef7e6d3b64f4..3e1619d6e1578 100644 --- a/doc/source/io.rst +++ b/doc/source/io.rst @@ -1648,7 +1648,7 @@ with optional parameters: DataFrame - default is ``columns`` - - allowed values are {``split``, ``records``, ``index``, ``columns``, ``values``} + - allowed values are {``split``, ``records``, ``index``, ``columns``, ``values``, ``table``} The format of the JSON string @@ -1732,6 +1732,9 @@ values, index and columns. Name is also included for ``Series``: dfjo.to_json(orient="split") sjo.to_json(orient="split") +**Table oriented** serializes to the JSON `Table Schema`_, allowing for the +preservation of metadata including but not limited to dtypes and index names. + .. note:: Any orient option that encodes to a JSON object will not preserve the ordering of @@ -1833,7 +1836,7 @@ is ``None``. To explicitly force ``Series`` parsing, pass ``typ=series`` DataFrame - default is ``columns`` - - allowed values are {``split``, ``records``, ``index``, ``columns``, ``values``} + - allowed values are {``split``, ``records``, ``index``, ``columns``, ``values``, ``table``} The format of the JSON string @@ -1846,6 +1849,8 @@ is ``None``. To explicitly force ``Series`` parsing, pass ``typ=series`` ``index``; dict like {index -> {column -> value}} ``columns``; dict like {column -> {index -> value}} ``values``; just the values array + ``table``; adhering to the JSON `Table Schema`_ + - ``dtype`` : if True, infer dtypes, if a dict of column to dtype, then use those, if False, then don't infer dtypes at all, default is True, apply only to the data - ``convert_axes`` : boolean, try to convert the axes to the proper dtypes, default is True @@ -2202,7 +2207,39 @@ A few notes on the generated table schema: then ``level_<i>`` is used. -_Table Schema: http://specs.frictionlessdata.io/json-table-schema/ +.. versionadded:: 0.23.0 + +``read_json`` also accepts ``orient='table'`` as an argument. This allows for +the preserveration of metadata such as dtypes and index names in a +round-trippable manner. + + .. ipython:: python + + df = pd.DataFrame({'foo': [1, 2, 3, 4], + 'bar': ['a', 'b', 'c', 'd'], + 'baz': pd.date_range('2018-01-01', freq='d', periods=4), + 'qux': pd.Categorical(['a', 'b', 'c', 'c']) + }, index=pd.Index(range(4), name='idx')) + df + df.dtypes + + df.to_json('test.json', orient='table') + new_df = pd.read_json('test.json', orient='table') + new_df + new_df.dtypes + +Please note that the string `index` is not supported with the round trip +format, as it is used by default in ``write_json`` to indicate a missing index +name. + +.. ipython:: python + + df.index.name = 'index' + df.to_json('test.json', orient='table') + new_df = pd.read_json('test.json', orient='table') + print(new_df.index.name) + +.. _Table Schema: http://specs.frictionlessdata.io/json-table-schema/ HTML ---- diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index 77de1851490b2..4edf8f8a62f61 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -145,6 +145,37 @@ Current Behavior s.rank(na_option='top') +.. _whatsnew_0230.enhancements.round-trippable_json: + +JSON read/write round-trippable with ``orient='table'`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A ``DataFrame`` can now be written to and subsequently read back via JSON while preserving metadata through usage of the ``orient='table'`` argument (see :issue:`18912` and :issue:`9146`). Previously, none of the available ``orient`` values guaranteed the preservation of dtypes and index names, amongst other metadata. + +.. ipython:: python + + df = pd.DataFrame({'foo': [1, 2, 3, 4], + 'bar': ['a', 'b', 'c', 'd'], + 'baz': pd.date_range('2018-01-01', freq='d', periods=4), + 'qux': pd.Categorical(['a', 'b', 'c', 'c']) + }, index=pd.Index(range(4), name='idx')) + df + df.dtypes + df.to_json('test.json', orient='table') + new_df = pd.read_json('test.json', orient='table') + new_df + new_df.dtypes + +Please note that the string `index` is not supported with the round trip format, as it is used by default in ``write_json`` to indicate a missing index name. + +.. ipython:: python + + df.index.name = 'index' + df.to_json('test.json', orient='table') + new_df = pd.read_json('test.json', orient='table') + new_df + print(new_df.index.name) + .. _whatsnew_0230.enhancements.other: Other Enhancements diff --git a/pandas/io/json/json.py b/pandas/io/json/json.py index 72ec5c59c90af..d1c83ad57f59d 100644 --- a/pandas/io/json/json.py +++ b/pandas/io/json/json.py @@ -16,7 +16,7 @@ from pandas.core.reshape.concat import concat from pandas.io.formats.printing import pprint_thing from .normalize import _convert_to_line_delimits -from .table_schema import build_table_schema +from .table_schema import build_table_schema, parse_table_schema from pandas.core.dtypes.common import is_period_dtype loads = json.loads @@ -261,13 +261,16 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, * when ``typ == 'frame'``, - allowed orients are ``{'split','records','index', - 'columns','values'}`` + 'columns','values', 'table'}`` - default is ``'columns'`` - The DataFrame index must be unique for orients ``'index'`` and ``'columns'``. - The DataFrame columns must be unique for orients ``'index'``, ``'columns'``, and ``'records'``. + .. versionadded:: 0.23.0 + 'table' as an allowed value for the ``orient`` argument + typ : type of object to recover (series or frame), default 'frame' dtype : boolean or dict, default True If True, infer dtypes, if a dict of column to dtype, then use those, @@ -336,6 +339,15 @@ def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True, ------- result : Series or DataFrame, depending on the value of `typ`. + Notes + ----- + Specific to ``orient='table'``, if a ``DataFrame`` with a literal ``Index`` + name of `index` gets written with ``write_json``, the subsequent read + operation will incorrectly set the ``Index`` name to ``None``. This is + because `index` is also used by ``write_json`` to denote a missing + ``Index`` name, and the subsequent ``read_json`` operation cannot + distinguish between the two. + See Also -------- DataFrame.to_json @@ -839,6 +851,9 @@ def _parse_no_numpy(self): elif orient == "index": self.obj = DataFrame( loads(json, precise_float=self.precise_float), dtype=None).T + elif orient == 'table': + self.obj = parse_table_schema(json, + precise_float=self.precise_float) else: self.obj = DataFrame( loads(json, precise_float=self.precise_float), dtype=None) diff --git a/pandas/io/json/table_schema.py b/pandas/io/json/table_schema.py index 9cec5b3d6ba49..8da36b64b0914 100644 --- a/pandas/io/json/table_schema.py +++ b/pandas/io/json/table_schema.py @@ -3,6 +3,9 @@ http://specs.frictionlessdata.io/json-table-schema/ """ +import pandas._libs.json as json +from pandas import DataFrame +from pandas.api.types import CategoricalDtype from pandas.core.common import _all_not_none from pandas.core.dtypes.common import ( is_integer_dtype, is_timedelta64_dtype, is_numeric_dtype, @@ -10,6 +13,8 @@ is_categorical_dtype, is_period_dtype, is_string_dtype ) +loads = json.loads + def as_json_table_type(x): """ @@ -75,7 +80,7 @@ def set_default_names(data): return data -def make_field(arr, dtype=None): +def convert_pandas_type_to_json_field(arr, dtype=None): dtype = dtype or arr.dtype if arr.name is None: name = 'values' @@ -103,6 +108,69 @@ def make_field(arr, dtype=None): return field +def convert_json_field_to_pandas_type(field): + """ + Converts a JSON field descriptor into its corresponding NumPy / pandas type + + Parameters + ---------- + field + A JSON field descriptor + + Returns + ------- + dtype + + Raises + ----- + ValueError + If the type of the provided field is unknown or currently unsupported + + Examples + -------- + >>> convert_json_field_to_pandas_type({'name': 'an_int', + 'type': 'integer'}) + 'int64' + >>> convert_json_field_to_pandas_type({'name': 'a_categorical', + 'type': 'any', + 'contraints': {'enum': [ + 'a', 'b', 'c']}, + 'ordered': True}) + 'CategoricalDtype(categories=['a', 'b', 'c'], ordered=True)' + >>> convert_json_field_to_pandas_type({'name': 'a_datetime', + 'type': 'datetime'}) + 'datetime64[ns]' + >>> convert_json_field_to_pandas_type({'name': 'a_datetime_with_tz', + 'type': 'datetime', + 'tz': 'US/Central'}) + 'datetime64[ns, US/Central]' + """ + typ = field['type'] + if typ == 'string': + return 'object' + elif typ == 'integer': + return 'int64' + elif typ == 'number': + return 'float64' + elif typ == 'boolean': + return 'bool' + elif typ == 'duration': + return 'timedelta64' + elif typ == 'datetime': + if field.get('tz'): + return 'datetime64[ns, {tz}]'.format(tz=field['tz']) + else: + return 'datetime64[ns]' + elif typ == 'any': + if 'constraints' in field and 'ordered' in field: + return CategoricalDtype(categories=field['constraints']['enum'], + ordered=field['ordered']) + else: + return 'object' + + raise ValueError("Unsupported or invalid field type: {}".format(typ)) + + def build_table_schema(data, index=True, primary_key=None, version=True): """ Create a Table schema from ``data``. @@ -158,15 +226,15 @@ def build_table_schema(data, index=True, primary_key=None, version=True): if index: if data.index.nlevels > 1: for level in data.index.levels: - fields.append(make_field(level)) + fields.append(convert_pandas_type_to_json_field(level)) else: - fields.append(make_field(data.index)) + fields.append(convert_pandas_type_to_json_field(data.index)) if data.ndim > 1: for column, s in data.iteritems(): - fields.append(make_field(s)) + fields.append(convert_pandas_type_to_json_field(s)) else: - fields.append(make_field(data)) + fields.append(convert_pandas_type_to_json_field(data)) schema['fields'] = fields if index and data.index.is_unique and primary_key is None: @@ -180,3 +248,65 @@ def build_table_schema(data, index=True, primary_key=None, version=True): if version: schema['pandas_version'] = '0.20.0' return schema + + +def parse_table_schema(json, precise_float): + """ + Builds a DataFrame from a given schema + + Parameters + ---------- + json : + A JSON table schema + precise_float : boolean + Flag controlling precision when decoding string to double values, as + dictated by ``read_json`` + + Returns + ------- + df : DataFrame + + Raises + ------ + NotImplementedError + If the JSON table schema contains either timezone or timedelta data + + Notes + ----- + Because ``write_json`` uses the string `index` to denote a name-less + ``Index``, this function sets the name of the returned ``DataFrame`` to + ``None`` when said string is encountered. Therefore, intentional usage + of `index` as the ``Index`` name is not supported. + + See also + -------- + build_table_schema : inverse function + pandas.read_json + """ + table = loads(json, precise_float=precise_float) + col_order = [field['name'] for field in table['schema']['fields']] + df = DataFrame(table['data'])[col_order] + + dtypes = {field['name']: convert_json_field_to_pandas_type(field) + for field in table['schema']['fields']} + + # Cannot directly use as_type with timezone data on object; raise for now + if any(str(x).startswith('datetime64[ns, ') for x in dtypes.values()): + raise NotImplementedError('table="orient" can not yet read timezone ' + 'data') + + # No ISO constructor for Timedelta as of yet, so need to raise + if 'timedelta64' in dtypes.values(): + raise NotImplementedError('table="orient" can not yet read ' + 'ISO-formatted Timedelta data') + + df = df.astype(dtypes) + + df = df.set_index(table['schema']['primaryKey']) + if len(df.index.names) == 1 and df.index.name == 'index': + df.index.name = None + else: + if all(x.startswith('level_') for x in df.index.names): + df.index.names = [None] * len(df.index.names) + + return df diff --git a/pandas/tests/io/json/test_json_table_schema.py b/pandas/tests/io/json/test_json_table_schema.py index dab56e264b955..76748f30e639b 100644 --- a/pandas/tests/io/json/test_json_table_schema.py +++ b/pandas/tests/io/json/test_json_table_schema.py @@ -12,8 +12,10 @@ from pandas.io.json.table_schema import ( as_json_table_type, build_table_schema, - make_field, + convert_pandas_type_to_json_field, + convert_json_field_to_pandas_type, set_default_names) +import pandas.util.testing as tm class TestBuildSchema(object): @@ -334,62 +336,89 @@ def test_date_format_raises(self): self.df.to_json(orient='table', date_format='iso') self.df.to_json(orient='table') - def test_make_field_int(self): + def test_convert_pandas_type_to_json_field_int(self): data = [1, 2, 3] kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')] for kind in kinds: - result = make_field(kind) + result = convert_pandas_type_to_json_field(kind) expected = {"name": "name", "type": 'integer'} assert result == expected - def test_make_field_float(self): + def test_convert_pandas_type_to_json_field_float(self): data = [1., 2., 3.] kinds = [pd.Series(data, name='name'), pd.Index(data, name='name')] for kind in kinds: - result = make_field(kind) + result = convert_pandas_type_to_json_field(kind) expected = {"name": "name", "type": 'number'} assert result == expected - def test_make_field_datetime(self): + def test_convert_pandas_type_to_json_field_datetime(self): data = [1., 2., 3.] kinds = [pd.Series(pd.to_datetime(data), name='values'), pd.to_datetime(data)] for kind in kinds: - result = make_field(kind) + result = convert_pandas_type_to_json_field(kind) expected = {"name": "values", "type": 'datetime'} assert result == expected kinds = [pd.Series(pd.to_datetime(data, utc=True), name='values'), pd.to_datetime(data, utc=True)] for kind in kinds: - result = make_field(kind) + result = convert_pandas_type_to_json_field(kind) expected = {"name": "values", "type": 'datetime', "tz": "UTC"} assert result == expected arr = pd.period_range('2016', freq='A-DEC', periods=4) - result = make_field(arr) + result = convert_pandas_type_to_json_field(arr) expected = {"name": "values", "type": 'datetime', "freq": "A-DEC"} assert result == expected - def test_make_field_categorical(self): + def test_convert_pandas_type_to_json_field_categorical(self): data = ['a', 'b', 'c'] ordereds = [True, False] for ordered in ordereds: arr = pd.Series(pd.Categorical(data, ordered=ordered), name='cats') - result = make_field(arr) + result = convert_pandas_type_to_json_field(arr) expected = {"name": "cats", "type": "any", "constraints": {"enum": data}, "ordered": ordered} assert result == expected arr = pd.CategoricalIndex(data, ordered=ordered, name='cats') - result = make_field(arr) + result = convert_pandas_type_to_json_field(arr) expected = {"name": "cats", "type": "any", "constraints": {"enum": data}, "ordered": ordered} assert result == expected + @pytest.mark.parametrize("inp,exp", [ + ({'type': 'integer'}, 'int64'), + ({'type': 'number'}, 'float64'), + ({'type': 'boolean'}, 'bool'), + ({'type': 'duration'}, 'timedelta64'), + ({'type': 'datetime'}, 'datetime64[ns]'), + ({'type': 'datetime', 'tz': 'US/Hawaii'}, 'datetime64[ns, US/Hawaii]'), + ({'type': 'any'}, 'object'), + ({'type': 'any', 'constraints': {'enum': ['a', 'b', 'c']}, + 'ordered': False}, CategoricalDtype(categories=['a', 'b', 'c'], + ordered=False)), + ({'type': 'any', 'constraints': {'enum': ['a', 'b', 'c']}, + 'ordered': True}, CategoricalDtype(categories=['a', 'b', 'c'], + ordered=True)), + ({'type': 'string'}, 'object')]) + def test_convert_json_field_to_pandas_type(self, inp, exp): + field = {'name': 'foo'} + field.update(inp) + assert convert_json_field_to_pandas_type(field) == exp + + @pytest.mark.parametrize("inp", ["geopoint", "geojson", "fake_type"]) + def test_convert_json_field_to_pandas_type_raises(self, inp): + field = {'type': inp} + with tm.assert_raises_regex(ValueError, "Unsupported or invalid field " + "type: {}".format(inp)): + convert_json_field_to_pandas_type(field) + def test_categorical(self): s = pd.Series(pd.Categorical(['a', 'b', 'a'])) s.index.name = 'idx' @@ -471,3 +500,70 @@ def test_mi_falsey_name(self): ('a', 'b')])) result = [x['name'] for x in build_table_schema(df)['fields']] assert result == ['level_0', 'level_1', 0, 1, 2, 3] + + +class TestTableOrientReader(object): + + @pytest.mark.parametrize("index_nm", [ + None, "idx", pytest.param("index", marks=pytest.mark.xfail)]) + @pytest.mark.parametrize("vals", [ + {'ints': [1, 2, 3, 4]}, + {'objects': ['a', 'b', 'c', 'd']}, + {'date_ranges': pd.date_range('2016-01-01', freq='d', periods=4)}, + {'categoricals': pd.Series(pd.Categorical(['a', 'b', 'c', 'c']))}, + {'ordered_cats': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'], + ordered=True))}, + pytest.param({'floats': [1., 2., 3., 4.]}, marks=pytest.mark.xfail), + {'floats': [1.1, 2.2, 3.3, 4.4]}, + {'bools': [True, False, False, True]}]) + def test_read_json_table_orient(self, index_nm, vals): + df = DataFrame(vals, index=pd.Index(range(4), name=index_nm)) + out = df.to_json(orient="table") + result = pd.read_json(out, orient="table") + tm.assert_frame_equal(df, result) + + @pytest.mark.parametrize("index_nm", [ + None, "idx", pytest.param("index", marks=pytest.mark.xfail)]) + @pytest.mark.parametrize("vals", [ + {'timedeltas': pd.timedelta_range('1H', periods=4, freq='T')}, + {'timezones': pd.date_range('2016-01-01', freq='d', periods=4, + tz='US/Central')}]) + def test_read_json_table_orient_raises(self, index_nm, vals): + df = DataFrame(vals, index=pd.Index(range(4), name=index_nm)) + out = df.to_json(orient="table") + with tm.assert_raises_regex(NotImplementedError, 'can not yet read '): + pd.read_json(out, orient="table") + + def test_comprehensive(self): + df = DataFrame( + {'A': [1, 2, 3, 4], + 'B': ['a', 'b', 'c', 'c'], + 'C': pd.date_range('2016-01-01', freq='d', periods=4), + # 'D': pd.timedelta_range('1H', periods=4, freq='T'), + 'E': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'])), + 'F': pd.Series(pd.Categorical(['a', 'b', 'c', 'c'], + ordered=True)), + 'G': [1.1, 2.2, 3.3, 4.4], + # 'H': pd.date_range('2016-01-01', freq='d', periods=4, + # tz='US/Central'), + 'I': [True, False, False, True], + }, + index=pd.Index(range(4), name='idx')) + + out = df.to_json(orient="table") + result = pd.read_json(out, orient="table") + tm.assert_frame_equal(df, result) + + @pytest.mark.parametrize("index_names", [[None, None], ['foo', 'bar']]) + def test_multiindex(self, index_names): + # GH 18912 + df = pd.DataFrame( + [["Arr", "alpha", [1, 2, 3, 4]], + ["Bee", "Beta", [10, 20, 30, 40]]], + index=[["A", "B"], ["Null", "Eins"]], + columns=["Aussprache", "Griechisch", "Args"] + ) + df.index.names = index_names + out = df.to_json(orient="table") + result = pd.read_json(out, orient="table") + tm.assert_frame_equal(df, result)
- [X] closes #18912 and closes #9146 - [X] tests added / passed - [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry This is a starting point for ``read_json`` supporting ``orient='table'`` for basic types. I'm explicitly raising in the instances that don't work, namely timezone aware datetimes and``Timedelta``. I marked floats with no decimal values as an xfail (these are being converted to ``int64`` by the ``_try_convert_data`` method). ``Timedelta`` would be best fixed in a separate change providing a constructor equivalent to the ``isoformat`` method (see #19040). The new tests could also all be parametrized, but I've kept as is before going too far with this change in case of feedback
https://api.github.com/repos/pandas-dev/pandas/pulls/19039
2018-01-02T21:13:19Z
2018-01-06T17:33:07Z
2018-01-06T17:33:07Z
2018-01-06T21:38:48Z
Fix bugs in FY5253Quarter and LastWeekOfMonth
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index ea4245cb3281e..924c91ef09c97 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -307,6 +307,7 @@ Conversion - Bug in :class:`FY5253` where ``datetime`` addition and subtraction incremented incorrectly for dates on the year-end but not normalized to midnight (:issue:`18854`) - Bug in :class:`DatetimeIndex` where adding or subtracting an array-like of ``DateOffset`` objects either raised (``np.array``, ``pd.Index``) or broadcast incorrectly (``pd.Series``) (:issue:`18849`) - Bug in :class:`Series` floor-division where operating on a scalar ``timedelta`` raises an exception (:issue:`18846`) +- Bug in :class:`FY5253Quarter`, :class:`LastWeekOfMonth` where rollback and rollforward behavior was inconsistent with addition and subtraction behavior (:issue:`18854`) - Bug in :class:`Index` constructor with ``dtype=CategoricalDtype(...)`` where ``categories`` and ``ordered`` are not maintained (issue:`19032`) diff --git a/pandas/tests/tseries/offsets/test_fiscal.py b/pandas/tests/tseries/offsets/test_fiscal.py index 09206439e9996..f71480e1f83a5 100644 --- a/pandas/tests/tseries/offsets/test_fiscal.py +++ b/pandas/tests/tseries/offsets/test_fiscal.py @@ -633,3 +633,25 @@ def test_fy5253_nearest_onoffset(): fast = offset.onOffset(ts) slow = (ts + offset) - offset == ts assert fast == slow + + +def test_fy5253qtr_onoffset_nearest(): + # GH#19036 + ts = Timestamp('1985-09-02 23:57:46.232550356-0300', + tz='Atlantic/Bermuda') + offset = FY5253Quarter(n=3, qtr_with_extra_week=1, startingMonth=2, + variation="nearest", weekday=0) + fast = offset.onOffset(ts) + slow = (ts + offset) - offset == ts + assert fast == slow + + +def test_fy5253qtr_onoffset_last(): + # GH#19036 + offset = FY5253Quarter(n=-2, qtr_with_extra_week=1, + startingMonth=7, variation="last", weekday=2) + ts = Timestamp('2011-01-26 19:03:40.331096129+0200', + tz='Africa/Windhoek') + slow = (ts + offset) - offset == ts + fast = offset.onOffset(ts) + assert fast == slow diff --git a/pandas/tests/tseries/offsets/test_offsets.py b/pandas/tests/tseries/offsets/test_offsets.py index 7c7e5c4a5a35c..1a032182319f2 100644 --- a/pandas/tests/tseries/offsets/test_offsets.py +++ b/pandas/tests/tseries/offsets/test_offsets.py @@ -3153,3 +3153,21 @@ def test_weekofmonth_onoffset(): fast = offset.onOffset(ts) slow = (ts + offset) - offset == ts assert fast == slow + + +def test_last_week_of_month_on_offset(): + # GH#19036, GH#18977 _adjust_dst was incorrect for LastWeekOfMonth + offset = LastWeekOfMonth(n=4, weekday=6) + ts = Timestamp('1917-05-27 20:55:27.084284178+0200', + tz='Europe/Warsaw') + slow = (ts + offset) - offset == ts + fast = offset.onOffset(ts) + assert fast == slow + + # negative n + offset = LastWeekOfMonth(n=-4, weekday=5) + ts = Timestamp('2005-08-27 05:01:42.799392561-0500', + tz='America/Rainy_River') + slow = (ts + offset) - offset == ts + fast = offset.onOffset(ts) + assert fast == slow diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py index 4f3c24ba534ff..7c5fe2f0314e4 100644 --- a/pandas/tseries/offsets.py +++ b/pandas/tseries/offsets.py @@ -1468,6 +1468,7 @@ class LastWeekOfMonth(_WeekOfMonthMixin, DateOffset): """ _prefix = 'LWOM' + _adjust_dst = True def __init__(self, n=1, normalize=False, weekday=None): self.n = self._validate_n(n) @@ -1727,8 +1728,7 @@ class FY5253(DateOffset): such as retail, manufacturing and parking industry. For more information see: - http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar - + http://en.wikipedia.org/wiki/4-4-5_calendar The year may either: - end on the last X day of the Y month. @@ -1922,7 +1922,7 @@ class FY5253Quarter(DateOffset): such as retail, manufacturing and parking industry. For more information see: - http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar + http://en.wikipedia.org/wiki/4-4-5_calendar The year may either: - end on the last X day of the Y month. @@ -1982,46 +1982,77 @@ def _offset(self): def isAnchored(self): return self.n == 1 and self._offset.isAnchored() + def _rollback_to_year(self, other): + """roll `other` back to the most recent date that was on a fiscal year + end. Return the date of that year-end, the number of full quarters + elapsed between that year-end and other, and the remaining Timedelta + since the most recent quarter-end. + + Parameters + ---------- + other : datetime or Timestamp + + Returns + ------- + tuple of + prev_year_end : Timestamp giving most recent fiscal year end + num_qtrs : int + tdelta : Timedelta + """ + num_qtrs = 0 + + norm = Timestamp(other).tz_localize(None) + start = self._offset.rollback(norm) + # Note: start <= norm and self._offset.onOffset(start) + + if start < norm: + # roll adjustment + qtr_lens = self.get_weeks(norm) + + # check thet qtr_lens is consistent with self._offset addition + end = shift_day(start, days=7 * sum(qtr_lens)) + assert self._offset.onOffset(end), (start, end, qtr_lens) + + tdelta = norm - start + for qlen in qtr_lens: + if qlen * 7 <= tdelta.days: + num_qtrs += 1 + tdelta -= Timedelta(days=qlen * 7) + else: + break + else: + tdelta = Timedelta(0) + + # Note: we always have tdelta.value >= 0 + return start, num_qtrs, tdelta + @apply_wraps def apply(self, other): - base = other + # Note: self.n == 0 is not allowed. n = self.n - if n > 0: - while n > 0: - if not self._offset.onOffset(other): - qtr_lens = self.get_weeks(other) - start = other - self._offset - else: - start = other - qtr_lens = self.get_weeks(other + self._offset) + prev_year_end, num_qtrs, tdelta = self._rollback_to_year(other) + res = prev_year_end + n += num_qtrs + if self.n <= 0 and tdelta.value > 0: + n += 1 - for weeks in qtr_lens: - start += timedelta(weeks=weeks) - if start > other: - other = start - n -= 1 - break + # Possible speedup by handling years first. + years = n // 4 + if years: + res += self._offset * years + n -= years * 4 - else: - n = -n - while n > 0: - if not self._offset.onOffset(other): - qtr_lens = self.get_weeks(other) - end = other + self._offset - else: - end = other - qtr_lens = self.get_weeks(other) - - for weeks in reversed(qtr_lens): - end -= timedelta(weeks=weeks) - if end < other: - other = end - n -= 1 - break - other = datetime(other.year, other.month, other.day, - base.hour, base.minute, base.second, base.microsecond) - return other + # Add an extra day to make *sure* we are getting the quarter lengths + # for the upcoming year, not the previous year + qtr_lens = self.get_weeks(res + Timedelta(days=1)) + + # Note: we always have 0 <= n < 4 + weeks = sum(qtr_lens[:n]) + if weeks: + res = shift_day(res, days=weeks * 7) + + return res def get_weeks(self, dt): ret = [13] * 4 @@ -2034,16 +2065,15 @@ def get_weeks(self, dt): return ret def year_has_extra_week(self, dt): - if self._offset.onOffset(dt): - prev_year_end = dt - self._offset - next_year_end = dt - else: - next_year_end = dt + self._offset - prev_year_end = dt - self._offset - - week_in_year = (next_year_end - prev_year_end).days / 7 + # Avoid round-down errors --> normalize to get + # e.g. '370D' instead of '360D23H' + norm = Timestamp(dt).normalize().tz_localize(None) - return week_in_year == 53 + next_year_end = self._offset.rollforward(norm) + prev_year_end = norm - self._offset + weeks_in_year = (next_year_end - prev_year_end).days / 7 + assert weeks_in_year in [52, 53], weeks_in_year + return weeks_in_year == 53 def onOffset(self, dt): if self.normalize and not _is_normalized(dt): @@ -2056,8 +2086,8 @@ def onOffset(self, dt): qtr_lens = self.get_weeks(dt) current = next_year_end - for qtr_len in qtr_lens[0:4]: - current += timedelta(weeks=qtr_len) + for qtr_len in qtr_lens: + current = shift_day(current, days=qtr_len * 7) if dt == current: return True return False
Similar to [need to lookup reference], this fixes bugs in `LastWeekOfMonth` and `FY5253Quarter` to ensure that the fast implementation `offset.onOffset(ts)` matches the slow (base class) implementation `(ts + offset) - offset == ts`. I haven't yet looked at BusinessHour and CustomBusinessHour, but other than that I think these are the last classes that have this category of bug. - [ ] closes #xxxx - [x] tests added / passed - [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff` - [x] whatsnew entry
https://api.github.com/repos/pandas-dev/pandas/pulls/19036
2018-01-02T18:13:40Z
2018-01-04T14:15:07Z
2018-01-04T14:15:07Z
2018-01-23T04:40:41Z
ENH: Add matmul to DataFrame, Series
diff --git a/doc/source/whatsnew/v0.23.0.txt b/doc/source/whatsnew/v0.23.0.txt index af5371b06192f..ce63cb2473bc4 100644 --- a/doc/source/whatsnew/v0.23.0.txt +++ b/doc/source/whatsnew/v0.23.0.txt @@ -403,6 +403,7 @@ Other Enhancements ``SQLAlchemy`` dialects supporting multivalue inserts include: ``mysql``, ``postgresql``, ``sqlite`` and any dialect with ``supports_multivalues_insert``. (:issue:`14315`, :issue:`8953`) - :func:`read_html` now accepts a ``displayed_only`` keyword argument to controls whether or not hidden elements are parsed (``True`` by default) (:issue:`20027`) - zip compression is supported via ``compression=zip`` in :func:`DataFrame.to_pickle`, :func:`Series.to_pickle`, :func:`DataFrame.to_csv`, :func:`Series.to_csv`, :func:`DataFrame.to_json`, :func:`Series.to_json`. (:issue:`17778`) +- :class:`DataFrame` and :class:`Series` now support matrix multiplication (```@```) operator (:issue:`10259`) for Python>=3.5 .. _whatsnew_0230.api_breaking: diff --git a/pandas/core/frame.py b/pandas/core/frame.py index ace975385ce32..9626079660771 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -863,7 +863,8 @@ def __len__(self): def dot(self, other): """ - Matrix multiplication with DataFrame or Series objects + Matrix multiplication with DataFrame or Series objects. Can also be + called using `self @ other` in Python >= 3.5. Parameters ---------- @@ -905,6 +906,14 @@ def dot(self, other): else: # pragma: no cover raise TypeError('unsupported type: %s' % type(other)) + def __matmul__(self, other): + """ Matrix multiplication using binary `@` operator in Python>=3.5 """ + return self.dot(other) + + def __rmatmul__(self, other): + """ Matrix multiplication using binary `@` operator in Python>=3.5 """ + return self.T.dot(np.transpose(other)).T + # ---------------------------------------------------------------------- # IO methods (to / from other formats) diff --git a/pandas/core/series.py b/pandas/core/series.py index 1b07f24e148e3..f3630dc43fbd1 100644 --- a/pandas/core/series.py +++ b/pandas/core/series.py @@ -1994,7 +1994,7 @@ def autocorr(self, lag=1): def dot(self, other): """ Matrix multiplication with DataFrame or inner-product with Series - objects + objects. Can also be called using `self @ other` in Python >= 3.5. Parameters ---------- @@ -2033,6 +2033,14 @@ def dot(self, other): else: # pragma: no cover raise TypeError('unsupported type: %s' % type(other)) + def __matmul__(self, other): + """ Matrix multiplication using binary `@` operator in Python>=3.5 """ + return self.dot(other) + + def __rmatmul__(self, other): + """ Matrix multiplication using binary `@` operator in Python>=3.5 """ + return self.dot(other) + @Substitution(klass='Series') @Appender(base._shared_docs['searchsorted']) @deprecate_kwarg(old_arg_name='v', new_arg_name='value') diff --git a/pandas/tests/frame/test_analytics.py b/pandas/tests/frame/test_analytics.py index 8efa140237614..7949636fcafbb 100644 --- a/pandas/tests/frame/test_analytics.py +++ b/pandas/tests/frame/test_analytics.py @@ -5,6 +5,7 @@ import warnings from datetime import timedelta from distutils.version import LooseVersion +import operator import sys import pytest @@ -13,7 +14,7 @@ from numpy.random import randn import numpy as np -from pandas.compat import lrange, product +from pandas.compat import lrange, product, PY35 from pandas import (compat, isna, notna, DataFrame, Series, MultiIndex, date_range, Timestamp, Categorical, _np_version_under1p15) @@ -2091,7 +2092,6 @@ def test_clip_with_na_args(self): self.frame) # Matrix-like - def test_dot(self): a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'], columns=['p', 'q', 'r', 's']) @@ -2144,6 +2144,63 @@ def test_dot(self): with tm.assert_raises_regex(ValueError, 'aligned'): df.dot(df2) + @pytest.mark.skipif(not PY35, + reason='matmul supported for Python>=3.5') + def test_matmul(self): + # matmul test is for GH #10259 + a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'], + columns=['p', 'q', 'r', 's']) + b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'], + columns=['one', 'two']) + + # DataFrame @ DataFrame + result = operator.matmul(a, b) + expected = DataFrame(np.dot(a.values, b.values), + index=['a', 'b', 'c'], + columns=['one', 'two']) + tm.assert_frame_equal(result, expected) + + # DataFrame @ Series + result = operator.matmul(a, b.one) + expected = Series(np.dot(a.values, b.one.values), + index=['a', 'b', 'c']) + tm.assert_series_equal(result, expected) + + # np.array @ DataFrame + result = operator.matmul(a.values, b) + expected = np.dot(a.values, b.values) + tm.assert_almost_equal(result, expected) + + # nested list @ DataFrame (__rmatmul__) + result = operator.matmul(a.values.tolist(), b) + expected = DataFrame(np.dot(a.values, b.values), + index=['a', 'b', 'c'], + columns=['one', 'two']) + tm.assert_almost_equal(result.values, expected.values) + + # mixed dtype DataFrame @ DataFrame + a['q'] = a.q.round().astype(int) + result = operator.matmul(a, b) + expected = DataFrame(np.dot(a.values, b.values), + index=['a', 'b', 'c'], + columns=['one', 'two']) + tm.assert_frame_equal(result, expected) + + # different dtypes DataFrame @ DataFrame + a = a.astype(int) + result = operator.matmul(a, b) + expected = DataFrame(np.dot(a.values, b.values), + index=['a', 'b', 'c'], + columns=['one', 'two']) + tm.assert_frame_equal(result, expected) + + # unaligned + df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4)) + df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3]) + + with tm.assert_raises_regex(ValueError, 'aligned'): + operator.matmul(df, df2) + @pytest.fixture def df_duplicates(): diff --git a/pandas/tests/series/test_analytics.py b/pandas/tests/series/test_analytics.py index 0e6e44e839464..f93aaf2115601 100644 --- a/pandas/tests/series/test_analytics.py +++ b/pandas/tests/series/test_analytics.py @@ -3,7 +3,7 @@ from itertools import product from distutils.version import LooseVersion - +import operator import pytest from numpy import nan @@ -18,7 +18,7 @@ from pandas.core.indexes.timedeltas import Timedelta import pandas.core.nanops as nanops -from pandas.compat import lrange, range +from pandas.compat import lrange, range, PY35 from pandas import compat from pandas.util.testing import (assert_series_equal, assert_almost_equal, assert_frame_equal, assert_index_equal) @@ -921,6 +921,52 @@ def test_dot(self): pytest.raises(Exception, a.dot, a.values[:3]) pytest.raises(ValueError, a.dot, b.T) + @pytest.mark.skipif(not PY35, + reason='matmul supported for Python>=3.5') + def test_matmul(self): + # matmul test is for GH #10259 + a = Series(np.random.randn(4), index=['p', 'q', 'r', 's']) + b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'], + columns=['p', 'q', 'r', 's']).T + + # Series @ DataFrame + result = operator.matmul(a, b) + expected = Series(np.dot(a.values, b.values), index=['1', '2', '3']) + assert_series_equal(result, expected) + + # DataFrame @ Series + result = operator.matmul(b.T, a) + expected = Series(np.dot(b.T.values, a.T.values), + index=['1', '2', '3']) + assert_series_equal(result, expected) + + # Series @ Series + result = operator.matmul(a, a) + expected = np.dot(a.values, a.values) + assert_almost_equal(result, expected) + + # np.array @ Series (__rmatmul__) + result = operator.matmul(a.values, a) + expected = np.dot(a.values, a.values) + assert_almost_equal(result, expected) + + # mixed dtype DataFrame @ Series + a['p'] = int(a.p) + result = operator.matmul(b.T, a) + expected = Series(np.dot(b.T.values, a.T.values), + index=['1', '2', '3']) + assert_series_equal(result, expected) + + # different dtypes DataFrame @ Series + a = a.astype(int) + result = operator.matmul(b.T, a) + expected = Series(np.dot(b.T.values, a.T.values), + index=['1', '2', '3']) + assert_series_equal(result, expected) + + pytest.raises(Exception, a.dot, a.values[:3]) + pytest.raises(ValueError, a.dot, b.T) + def test_value_counts_nunique(self): # basics.rst doc example
Fixes #10259. Not sure about the exact set of types that should be supported, for now I just copied the classes that are referenced in `dot`.
https://api.github.com/repos/pandas-dev/pandas/pulls/19035
2018-01-02T18:06:28Z
2018-03-30T21:56:37Z
2018-03-30T21:56:37Z
2018-03-30T21:57:22Z
CLN: ASV indexing
diff --git a/asv_bench/benchmarks/index_object.py b/asv_bench/benchmarks/index_object.py index d73b216478ad5..970760373632a 100644 --- a/asv_bench/benchmarks/index_object.py +++ b/asv_bench/benchmarks/index_object.py @@ -1,7 +1,7 @@ import numpy as np import pandas.util.testing as tm -from pandas import (Series, date_range, DatetimeIndex, Index, MultiIndex, - RangeIndex) +from pandas import (Series, date_range, DatetimeIndex, Index, RangeIndex, + Float64Index) from .pandas_vb_common import setup # noqa @@ -84,66 +84,6 @@ def time_modulo(self, dtype): self.index % 2 -class Duplicated(object): - - goal_time = 0.2 - - def setup(self): - n, k = 200, 5000 - levels = [np.arange(n), - tm.makeStringIndex(n).values, - 1000 + np.arange(n)] - labels = [np.random.choice(n, (k * n)) for lev in levels] - self.mi = MultiIndex(levels=levels, labels=labels) - - def time_duplicated(self): - self.mi.duplicated() - - -class Sortlevel(object): - - goal_time = 0.2 - - def setup(self): - n = 1182720 - low, high = -4096, 4096 - arrs = [np.repeat(np.random.randint(low, high, (n // k)), k) - for k in [11, 7, 5, 3, 1]] - self.mi_int = MultiIndex.from_arrays(arrs)[np.random.permutation(n)] - - a = np.repeat(np.arange(100), 1000) - b = np.tile(np.arange(1000), 100) - self.mi = MultiIndex.from_arrays([a, b]) - self.mi = self.mi.take(np.random.permutation(np.arange(100000))) - - def time_sortlevel_int64(self): - self.mi_int.sortlevel() - - def time_sortlevel_zero(self): - self.mi.sortlevel(0) - - def time_sortlevel_one(self): - self.mi.sortlevel(1) - - -class MultiIndexValues(object): - - goal_time = 0.2 - - def setup_cache(self): - - level1 = range(1000) - level2 = date_range(start='1/1/2012', periods=100) - mi = MultiIndex.from_product([level1, level2]) - return mi - - def time_datetime_level_values_copy(self, mi): - mi.copy().values - - def time_datetime_level_values_sliced(self, mi): - mi[:10].values - - class Range(object): goal_time = 0.2 @@ -222,3 +162,16 @@ def time_slice(self, dtype): def time_slice_step(self, dtype): self.idx[::2] + + +class Float64IndexMethod(object): + # GH 13166 + goal_time = 0.2 + + def setup(self): + N = 100000 + a = np.arange(N) + self.ind = Float64Index(a * 4.8000000418824129e-08) + + def time_get_loc(self): + self.ind.get_loc(0) diff --git a/asv_bench/benchmarks/indexing.py b/asv_bench/benchmarks/indexing.py index 5b12f6ea89614..b35f00db2b054 100644 --- a/asv_bench/benchmarks/indexing.py +++ b/asv_bench/benchmarks/indexing.py @@ -1,284 +1,233 @@ -from .pandas_vb_common import * +import numpy as np +import pandas.util.testing as tm +from pandas import (Series, DataFrame, MultiIndex, Int64Index, Float64Index, + IntervalIndex, IndexSlice, concat, date_range) +from .pandas_vb_common import setup, Panel # noqa -class Int64Indexing(object): +class NumericSeriesIndexing(object): + goal_time = 0.2 + params = [Int64Index, Float64Index] + param = ['index'] - def setup(self): - self.s = Series(np.random.rand(1000000)) + def setup(self, index): + N = 10**6 + idx = index(range(N)) + self.data = Series(np.random.rand(N), index=idx) + self.array = np.arange(10000) + self.array_list = self.array.tolist() - def time_getitem_scalar(self): - self.s[800000] + def time_getitem_scalar(self, index): + self.data[800000] - def time_getitem_slice(self): - self.s[:800000] + def time_getitem_slice(self, index): + self.data[:800000] - def time_getitem_list_like(self): - self.s[[800000]] + def time_getitem_list_like(self, index): + self.data[[800000]] - def time_getitem_array(self): - self.s[np.arange(10000)] + def time_getitem_array(self, index): + self.data[self.array] - def time_getitem_lists(self): - self.s[np.arange(10000).tolist()] + def time_getitem_lists(self, index): + self.data[self.array_list] - def time_iloc_array(self): - self.s.iloc[np.arange(10000)] + def time_iloc_array(self, index): + self.data.iloc[self.array] - def time_iloc_list_like(self): - self.s.iloc[[800000]] + def time_iloc_list_like(self, index): + self.data.iloc[[800000]] - def time_iloc_scalar(self): - self.s.iloc[800000] + def time_iloc_scalar(self, index): + self.data.iloc[800000] - def time_iloc_slice(self): - self.s.iloc[:800000] + def time_iloc_slice(self, index): + self.data.iloc[:800000] - def time_ix_array(self): - self.s.ix[np.arange(10000)] + def time_ix_array(self, index): + self.data.ix[self.array] - def time_ix_list_like(self): - self.s.ix[[800000]] + def time_ix_list_like(self, index): + self.data.ix[[800000]] - def time_ix_scalar(self): - self.s.ix[800000] + def time_ix_scalar(self, index): + self.data.ix[800000] - def time_ix_slice(self): - self.s.ix[:800000] + def time_ix_slice(self, index): + self.data.ix[:800000] - def time_loc_array(self): - self.s.loc[np.arange(10000)] + def time_loc_array(self, index): + self.data.loc[self.array] - def time_loc_list_like(self): - self.s.loc[[800000]] + def time_loc_list_like(self, index): + self.data.loc[[800000]] - def time_loc_scalar(self): - self.s.loc[800000] + def time_loc_scalar(self, index): + self.data.loc[800000] - def time_loc_slice(self): - self.s.loc[:800000] + def time_loc_slice(self, index): + self.data.loc[:800000] -class StringIndexing(object): - goal_time = 0.2 +class NonNumericSeriesIndexing(object): - def setup(self): - self.index = tm.makeStringIndex(1000000) - self.s = Series(np.random.rand(1000000), index=self.index) - self.lbl = self.s.index[800000] - - def time_getitem_label_slice(self): + goal_time = 0.2 + params = ['string', 'datetime'] + param_names = ['index'] + + def setup(self, index): + N = 10**5 + indexes = {'string': tm.makeStringIndex(N), + 'datetime': date_range('1900', periods=N, freq='s')} + index = indexes[index] + self.s = Series(np.random.rand(N), index=index) + self.lbl = index[80000] + + def time_getitem_label_slice(self, index): self.s[:self.lbl] - def time_getitem_pos_slice(self): - self.s[:800000] + def time_getitem_pos_slice(self, index): + self.s[:80000] - def time_get_value(self): + def time_get_value(self, index): self.s.get_value(self.lbl) + def time_getitem_scalar(self, index): + self.s[self.lbl] -class DatetimeIndexing(object): - goal_time = 0.2 - def setup(self): - tm.N = 1000 - self.ts = tm.makeTimeSeries() - self.dt = self.ts.index[500] +class DataFrameStringIndexing(object): - def time_getitem_scalar(self): - self.ts[self.dt] - - -class DataFrameIndexing(object): goal_time = 0.2 def setup(self): - self.index = tm.makeStringIndex(1000) - self.columns = tm.makeStringIndex(30) - self.df = DataFrame(np.random.randn(1000, 30), index=self.index, - columns=self.columns) - self.idx = self.index[100] - self.col = self.columns[10] - - self.df2 = DataFrame(np.random.randn(10000, 4), - columns=['A', 'B', 'C', 'D']) - self.indexer = (self.df2['B'] > 0) - self.obj_indexer = self.indexer.astype('O') - - # duptes - self.idx_dupe = (np.array(range(30)) * 99) - self.df3 = DataFrame({'A': ([0.1] * 1000), 'B': ([1] * 1000),}) - self.df3 = concat([self.df3, (2 * self.df3), (3 * self.df3)]) - - self.df_big = DataFrame(dict(A=(['foo'] * 1000000))) + index = tm.makeStringIndex(1000) + columns = tm.makeStringIndex(30) + self.df = DataFrame(np.random.randn(1000, 30), index=index, + columns=columns) + self.idx_scalar = index[100] + self.col_scalar = columns[10] + self.bool_indexer = self.df[self.col_scalar] > 0 + self.bool_obj_indexer = self.bool_indexer.astype(object) def time_get_value(self): - self.df.get_value(self.idx, self.col) + self.df.get_value(self.idx_scalar, self.col_scalar) - def time_get_value_ix(self): - self.df.ix[(self.idx, self.col)] + def time_ix(self): + self.df.ix[self.idx_scalar, self.col_scalar] + + def time_loc(self): + self.df.loc[self.idx_scalar, self.col_scalar] def time_getitem_scalar(self): - self.df[self.col][self.idx] + self.df[self.col_scalar][self.idx_scalar] def time_boolean_rows(self): - self.df2[self.indexer] + self.df[self.bool_indexer] def time_boolean_rows_object(self): - self.df2[self.obj_indexer] + self.df[self.bool_obj_indexer] + + +class DataFrameNumericIndexing(object): + + goal_time = 0.2 + + def setup(self): + self.idx_dupe = np.array(range(30)) * 99 + self.df = DataFrame(np.random.randn(10000, 5)) + self.df_dup = concat([self.df, 2 * self.df, 3 * self.df]) + self.bool_indexer = [True] * 5000 + [False] * 5000 def time_iloc_dups(self): - self.df3.iloc[self.idx_dupe] + self.df_dup.iloc[self.idx_dupe] def time_loc_dups(self): - self.df3.loc[self.idx_dupe] + self.df_dup.loc[self.idx_dupe] - def time_iloc_big(self): - self.df_big.iloc[:100, 0] + def time_iloc(self): + self.df.iloc[:100, 0] + def time_loc(self): + self.df.loc[:100, 0] -class IndexingMethods(object): - # GH 13166 - goal_time = 0.2 + def time_bool_indexer(self): + self.df[self.bool_indexer] - def setup(self): - a = np.arange(100000) - self.ind = pd.Float64Index(a * 4.8000000418824129e-08) - self.s = Series(np.random.rand(100000)) - self.ts = Series(np.random.rand(100000), - index=date_range('2011-01-01', freq='S', periods=100000)) - self.indexer = ([True, False, True, True, False] * 20000) +class Take(object): - def time_get_loc_float(self): - self.ind.get_loc(0) + goal_time = 0.2 + params = ['int', 'datetime'] + param_names = ['index'] - def time_take_dtindex(self): - self.ts.take(self.indexer) + def setup(self, index): + N = 100000 + indexes = {'int': Int64Index(np.arange(N)), + 'datetime': date_range('2011-01-01', freq='S', periods=N)} + index = indexes[index] + self.s = Series(np.random.rand(N), index=index) + self.indexer = [True, False, True, True, False] * 20000 - def time_take_intindex(self): + def time_take(self, index): self.s.take(self.indexer) class MultiIndexing(object): + goal_time = 0.2 def setup(self): - self.mi = MultiIndex.from_tuples([(x, y) for x in range(1000) for y in range(1000)]) - self.s = Series(np.random.randn(1000000), index=self.mi) + mi = MultiIndex.from_product([range(1000), range(1000)]) + self.s = Series(np.random.randn(1000000), index=mi) self.df = DataFrame(self.s) - # slicers - np.random.seed(1234) - self.idx = pd.IndexSlice - self.n = 100000 - self.mdt = pandas.DataFrame() - self.mdt['A'] = np.random.choice(range(10000, 45000, 1000), self.n) - self.mdt['B'] = np.random.choice(range(10, 400), self.n) - self.mdt['C'] = np.random.choice(range(1, 150), self.n) - self.mdt['D'] = np.random.choice(range(10000, 45000), self.n) - self.mdt['x'] = np.random.choice(range(400), self.n) - self.mdt['y'] = np.random.choice(range(25), self.n) - self.test_A = 25000 - self.test_B = 25 - self.test_C = 40 - self.test_D = 35000 - self.eps_A = 5000 - self.eps_B = 5 - self.eps_C = 5 - self.eps_D = 5000 - self.mdt2 = self.mdt.set_index(['A', 'B', 'C', 'D']).sortlevel() - self.miint = MultiIndex.from_product( - [np.arange(1000), - np.arange(1000)], names=['one', 'two']) - - import string - - self.mi_large = MultiIndex.from_product( - [np.arange(1000), np.arange(20), list(string.ascii_letters)], - names=['one', 'two', 'three']) - self.mi_med = MultiIndex.from_product( - [np.arange(1000), np.arange(10), list('A')], - names=['one', 'two', 'three']) - self.mi_small = MultiIndex.from_product( - [np.arange(100), list('A'), list('A')], - names=['one', 'two', 'three']) - - rng = np.random.RandomState(4) - size = 1 << 16 - self.mi_unused_levels = pd.MultiIndex.from_arrays([ - rng.randint(0, 1 << 13, size), - rng.randint(0, 1 << 10, size)])[rng.rand(size) < 0.1] - - def time_series_xs_mi_ix(self): + n = 100000 + self.mdt = DataFrame({'A': np.random.choice(range(10000, 45000, 1000), + n), + 'B': np.random.choice(range(10, 400), n), + 'C': np.random.choice(range(1, 150), n), + 'D': np.random.choice(range(10000, 45000), n), + 'x': np.random.choice(range(400), n), + 'y': np.random.choice(range(25), n)}) + self.idx = IndexSlice[20000:30000, 20:30, 35:45, 30000:40000] + self.mdt = self.mdt.set_index(['A', 'B', 'C', 'D']).sort_index() + + def time_series_ix(self): self.s.ix[999] - def time_frame_xs_mi_ix(self): + def time_frame_ix(self): self.df.ix[999] - def time_multiindex_slicers(self): - self.mdt2.loc[self.idx[ - (self.test_A - self.eps_A):(self.test_A + self.eps_A), - (self.test_B - self.eps_B):(self.test_B + self.eps_B), - (self.test_C - self.eps_C):(self.test_C + self.eps_C), - (self.test_D - self.eps_D):(self.test_D + self.eps_D)], :] - - def time_multiindex_get_indexer(self): - self.miint.get_indexer( - np.array([(0, 10), (0, 11), (0, 12), - (0, 13), (0, 14), (0, 15), - (0, 16), (0, 17), (0, 18), - (0, 19)], dtype=object)) - - def time_multiindex_large_get_loc(self): - self.mi_large.get_loc((999, 19, 'Z')) - - def time_multiindex_large_get_loc_warm(self): - for _ in range(1000): - self.mi_large.get_loc((999, 19, 'Z')) - - def time_multiindex_med_get_loc(self): - self.mi_med.get_loc((999, 9, 'A')) - - def time_multiindex_med_get_loc_warm(self): - for _ in range(1000): - self.mi_med.get_loc((999, 9, 'A')) - - def time_multiindex_string_get_loc(self): - self.mi_small.get_loc((99, 'A', 'A')) - - def time_multiindex_small_get_loc_warm(self): - for _ in range(1000): - self.mi_small.get_loc((99, 'A', 'A')) - - def time_is_monotonic(self): - self.miint.is_monotonic - - def time_remove_unused_levels(self): - self.mi_unused_levels.remove_unused_levels() + def time_index_slice(self): + self.mdt.loc[self.idx, :] class IntervalIndexing(object): + goal_time = 0.2 - def setup(self): - self.monotonic = Series(np.arange(1000000), - index=IntervalIndex.from_breaks(np.arange(1000001))) + def setup_cache(self): + idx = IntervalIndex.from_breaks(np.arange(1000001)) + monotonic = Series(np.arange(1000000), index=idx) + return monotonic - def time_getitem_scalar(self): - self.monotonic[80000] + def time_getitem_scalar(self, monotonic): + monotonic[80000] - def time_loc_scalar(self): - self.monotonic.loc[80000] + def time_loc_scalar(self, monotonic): + monotonic.loc[80000] - def time_getitem_list(self): - self.monotonic[80000:] + def time_getitem_list(self, monotonic): + monotonic[80000:] - def time_loc_list(self): - self.monotonic.loc[80000:] + def time_loc_list(self, monotonic): + monotonic.loc[80000:] class PanelIndexing(object): + goal_time = 0.2 def setup(self): @@ -289,35 +238,22 @@ def time_subset(self): self.p.ix[(self.inds, self.inds, self.inds)] -class IndexerLookup(object): - goal_time = 0.2 - - def setup(self): - self.s = Series(range(10)) - - def time_lookup_iloc(self): - self.s.iloc - - def time_lookup_ix(self): - self.s.ix - - def time_lookup_loc(self): - self.s.loc +class MethodLookup(object): + goal_time = 0.2 -class BooleanRowSelect(object): + def setup_cache(self): + s = Series() + return s - goal_time = 0.2 + def time_lookup_iloc(self, s): + s.iloc - def setup(self): - N = 10000 - np.random.seed(1234) - self.df = DataFrame(np.random.randn(N, 100)) - self.bool_arr = np.zeros(N, dtype=bool) - self.bool_arr[:1000] = True + def time_lookup_ix(self, s): + s.ix - def time_frame_boolean_row_select(self): - self.df[self.bool_arr] + def time_lookup_loc(self, s): + s.loc class GetItemSingleColumn(object): @@ -325,15 +261,14 @@ class GetItemSingleColumn(object): goal_time = 0.2 def setup(self): - np.random.seed(1234) - self.df2 = DataFrame(np.random.randn(3000, 1), columns=['A']) - self.df3 = DataFrame(np.random.randn(3000, 1)) + self.df_string_col = DataFrame(np.random.randn(3000, 1), columns=['A']) + self.df_int_col = DataFrame(np.random.randn(3000, 1)) def time_frame_getitem_single_column_label(self): - self.df2['A'] + self.df_string_col['A'] def time_frame_getitem_single_column_int(self): - self.df3[0] + self.df_int_col[0] class AssignTimeseriesIndex(object): @@ -342,8 +277,7 @@ class AssignTimeseriesIndex(object): def setup(self): N = 100000 - np.random.seed(1234) - dx = date_range('1/1/2000', periods=N, freq='H') + idx = date_range('1/1/2000', periods=N, freq='H') self.df = DataFrame(np.random.randn(N, 1), columns=['A'], index=idx) def time_frame_assign_timeseries_index(self): @@ -356,7 +290,7 @@ class InsertColumns(object): def setup(self): self.N = 10**3 - self.df = DataFrame(index=range(N)) + self.df = DataFrame(index=range(self.N)) def time_insert(self): np.random.seed(1234) diff --git a/asv_bench/benchmarks/io/csv.py b/asv_bench/benchmarks/io/csv.py index bc4599436111f..3b7fdc6e2d78c 100644 --- a/asv_bench/benchmarks/io/csv.py +++ b/asv_bench/benchmarks/io/csv.py @@ -132,7 +132,7 @@ def setup(self, compression, engine): # The Python 2 C parser can't read bz2 from open files. raise NotImplementedError try: - import s3fs + import s3fs # noqa except ImportError: # Skip these benchmarks if `boto` is not installed. raise NotImplementedError diff --git a/asv_bench/benchmarks/multiindex_object.py b/asv_bench/benchmarks/multiindex_object.py new file mode 100644 index 0000000000000..0c92214795557 --- /dev/null +++ b/asv_bench/benchmarks/multiindex_object.py @@ -0,0 +1,140 @@ +import string + +import numpy as np +import pandas.util.testing as tm +from pandas import date_range, MultiIndex + +from .pandas_vb_common import setup # noqa + + +class GetLoc(object): + + goal_time = 0.2 + + def setup(self): + self.mi_large = MultiIndex.from_product( + [np.arange(1000), np.arange(20), list(string.ascii_letters)], + names=['one', 'two', 'three']) + self.mi_med = MultiIndex.from_product( + [np.arange(1000), np.arange(10), list('A')], + names=['one', 'two', 'three']) + self.mi_small = MultiIndex.from_product( + [np.arange(100), list('A'), list('A')], + names=['one', 'two', 'three']) + + def time_large_get_loc(self): + self.mi_large.get_loc((999, 19, 'Z')) + + def time_large_get_loc_warm(self): + for _ in range(1000): + self.mi_large.get_loc((999, 19, 'Z')) + + def time_med_get_loc(self): + self.mi_med.get_loc((999, 9, 'A')) + + def time_med_get_loc_warm(self): + for _ in range(1000): + self.mi_med.get_loc((999, 9, 'A')) + + def time_string_get_loc(self): + self.mi_small.get_loc((99, 'A', 'A')) + + def time_small_get_loc_warm(self): + for _ in range(1000): + self.mi_small.get_loc((99, 'A', 'A')) + + +class Duplicates(object): + + goal_time = 0.2 + + def setup(self): + size = 65536 + arrays = [np.random.randint(0, 8192, size), + np.random.randint(0, 1024, size)] + mask = np.random.rand(size) < 0.1 + self.mi_unused_levels = MultiIndex.from_arrays(arrays) + self.mi_unused_levels = self.mi_unused_levels[mask] + + def time_remove_unused_levels(self): + self.mi_unused_levels.remove_unused_levels() + + +class Integer(object): + + goal_time = 0.2 + + def setup(self): + self.mi_int = MultiIndex.from_product([np.arange(1000), + np.arange(1000)], + names=['one', 'two']) + self.obj_index = np.array([(0, 10), (0, 11), (0, 12), + (0, 13), (0, 14), (0, 15), + (0, 16), (0, 17), (0, 18), + (0, 19)], dtype=object) + + def time_get_indexer(self): + self.mi_int.get_indexer(self.obj_index) + + def time_is_monotonic(self): + self.mi_int.is_monotonic + + +class Duplicated(object): + + goal_time = 0.2 + + def setup(self): + n, k = 200, 5000 + levels = [np.arange(n), + tm.makeStringIndex(n).values, + 1000 + np.arange(n)] + labels = [np.random.choice(n, (k * n)) for lev in levels] + self.mi = MultiIndex(levels=levels, labels=labels) + + def time_duplicated(self): + self.mi.duplicated() + + +class Sortlevel(object): + + goal_time = 0.2 + + def setup(self): + n = 1182720 + low, high = -4096, 4096 + arrs = [np.repeat(np.random.randint(low, high, (n // k)), k) + for k in [11, 7, 5, 3, 1]] + self.mi_int = MultiIndex.from_arrays(arrs)[np.random.permutation(n)] + + a = np.repeat(np.arange(100), 1000) + b = np.tile(np.arange(1000), 100) + self.mi = MultiIndex.from_arrays([a, b]) + self.mi = self.mi.take(np.random.permutation(np.arange(100000))) + + def time_sortlevel_int64(self): + self.mi_int.sortlevel() + + def time_sortlevel_zero(self): + self.mi.sortlevel(0) + + def time_sortlevel_one(self): + self.mi.sortlevel(1) + + +class Values(object): + + goal_time = 0.2 + + def setup_cache(self): + + level1 = range(1000) + level2 = date_range(start='1/1/2012', periods=100) + mi = MultiIndex.from_product([level1, level2]) + return mi + + def time_datetime_level_values_copy(self, mi): + mi.copy().values + + def time_datetime_level_values_sliced(self, mi): + mi[:10].values diff --git a/ci/lint.sh b/ci/lint.sh index 5380c91831cec..b4eafcaf28e39 100755 --- a/ci/lint.sh +++ b/ci/lint.sh @@ -24,7 +24,7 @@ if [ "$LINT" ]; then echo "Linting setup.py DONE" echo "Linting asv_bench/benchmarks/" - flake8 asv_bench/benchmarks/ --exclude=asv_bench/benchmarks/[ips]*.py --ignore=F811 + flake8 asv_bench/benchmarks/ --exclude=asv_bench/benchmarks/[ps]*.py --ignore=F811 if [ $? -ne "0" ]; then RET=1 fi
I moved some benchmarks to `index_object.py` that were testing method of (mostly) MultiIndexes. Otherwise mostly cleanup and now linting files that start with i. ``` $ asv dev -b ^indexing · Discovering benchmarks · Running 49 total benchmarks (1 commits * 1 environments * 49 benchmarks) [ 0.00%] ·· Building for existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ·· Benchmarking existing-py_home_matt_anaconda_envs_pandas_dev_bin_python [ 0.00%] ··· Setting up /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/indexing.py:211 [ 2.04%] ··· Running indexing.IntervalIndexing.time_getitem_list 240μs [ 4.08%] ··· Running indexing.IntervalIndexing.time_getitem_scalar 136μs [ 6.12%] ··· Running indexing.IntervalIndexing.time_loc_list 208μs [ 8.16%] ··· Running indexing.IntervalIndexing.time_loc_scalar 222μs [ 8.16%] ··· Setting up /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/indexing.py:245 [ 10.20%] ··· Running indexing.MethodLookup.time_lookup_iloc 10.4μs [ 12.24%] ··· Running indexing.MethodLookup.time_lookup_ix 10.3μs [ 14.29%] ··· Running indexing.MethodLookup.time_lookup_loc 10.1μs [ 16.33%] ··· Running ...iesIndex.time_frame_assign_timeseries_index 6.08ms [ 18.37%] ··· Running ....DataFrameNumericIndexing.time_bool_indexer 1.44ms [ 20.41%] ··· Running indexing.DataFrameNumericIndexing.time_iloc 448μs [ 22.45%] ··· Running ...ing.DataFrameNumericIndexing.time_iloc_dups 550μs [ 24.49%] ··· Running indexing.DataFrameNumericIndexing.time_loc 827μs [ 26.53%] ··· Running ...xing.DataFrameNumericIndexing.time_loc_dups 6.57ms [ 28.57%] ··· Running ...g.DataFrameStringIndexing.time_boolean_rows 680μs [ 30.61%] ··· Running ...rameStringIndexing.time_boolean_rows_object 671μs [ 32.65%] ··· Running ...xing.DataFrameStringIndexing.time_get_value 215μs [ 32.65%] ····· /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/indexing.py:115: FutureWarning: get_value is deprecated and will be removed in a future release. Please use .at[] or .iat[] accessors instead self.df.get_value(self.idx_scalar, self.col_scalar) [ 34.69%] ··· Running ...DataFrameStringIndexing.time_getitem_scalar 238μs [ 36.73%] ··· Running indexing.DataFrameStringIndexing.time_ix 327μs [ 38.78%] ··· Running indexing.DataFrameStringIndexing.time_loc 265μs [ 40.82%] ··· Running ...Column.time_frame_getitem_single_column_int 167μs [ 42.86%] ··· Running ...lumn.time_frame_getitem_single_column_label 158μs [ 44.90%] ··· Running ...xing.InsertColumns.time_assign_with_setitem 55.0ms [ 46.94%] ··· Running indexing.InsertColumns.time_insert 103ms [ 48.98%] ··· Running indexing.MultiIndexing.time_frame_ix 17.5ms [ 51.02%] ··· Running indexing.MultiIndexing.time_index_slice 11.7ms [ 53.06%] ··· Running indexing.MultiIndexing.time_series_ix 17.2ms [ 55.10%] ··· Running ...ing.NonNumericSeriesIndexing.time_get_value ok [ 55.10%] ···· ========== ======== index ---------- -------- string 23.4ms datetime 4.74ms ========== ======== [ 55.10%] ····· For parameters: 'string' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/indexing.py:94: FutureWarning: get_value is deprecated and will be removed in a future release. Please use .at[] or .iat[] accessors instead self.s.get_value(self.lbl) For parameters: 'datetime' /home/matt/Projects/pandas-mroeschke/asv_bench/benchmarks/indexing.py:94: FutureWarning: get_value is deprecated and will be removed in a future release. Please use .at[] or .iat[] accessors instead self.s.get_value(self.lbl) [ 57.14%] ··· Running ...ericSeriesIndexing.time_getitem_label_slice ok [ 57.14%] ···· ========== ======== index ---------- -------- string 26.0ms datetime 5.32ms ========== ======== [ 59.18%] ··· Running ...umericSeriesIndexing.time_getitem_pos_slice ok [ 59.18%] ···· ========== ======== index ---------- -------- string 2.89ms datetime 474μs ========== ======== [ 61.22%] ··· Running ...onNumericSeriesIndexing.time_getitem_scalar ok [ 61.22%] ···· ========== ======== index ---------- -------- string 24.6ms datetime 4.69ms ========== ======== [ 63.27%] ··· Running ...ng.NumericSeriesIndexing.time_getitem_array ok [ 63.27%] ···· ========================================== ======== param1 ------------------------------------------ -------- pandas.core.indexes.numeric.Int64Index 58.4ms pandas.core.indexes.numeric.Float64Index 269ms ========================================== ======== [ 65.31%] ··· Running ...umericSeriesIndexing.time_getitem_list_like ok [ 65.31%] ···· ========================================== ======== param1 ------------------------------------------ -------- pandas.core.indexes.numeric.Int64Index 56.7ms pandas.core.indexes.numeric.Float64Index 265ms ========================================== ======== [ 67.35%] ··· Running ...ng.NumericSeriesIndexing.time_getitem_lists ok [ 67.35%] ···· ========================================== ======== param1 ------------------------------------------ -------- pandas.core.indexes.numeric.Int64Index 64.9ms pandas.core.indexes.numeric.Float64Index 269ms ========================================== ======== [ 69.39%] ··· Running ...g.NumericSeriesIndexing.time_getitem_scalar ok [ 69.39%] ···· ========================================== ======== param1 ------------------------------------------ -------- pandas.core.indexes.numeric.Int64Index 2.68ms pandas.core.indexes.numeric.Float64Index 3.40ms ========================================== ======== [ 71.43%] ··· Running ...ng.NumericSeriesIndexing.time_getitem_slice ok [ 71.43%] ···· ========================================== ======== param1 ------------------------------------------ -------- pandas.core.indexes.numeric.Int64Index 284μs pandas.core.indexes.numeric.Float64Index 3.61ms ========================================== ======== [ 73.47%] ··· Running indexing.NumericSeriesIndexing.time_iloc_array ok [ 73.47%] ···· ========================================== ======= param1 ------------------------------------------ ------- pandas.core.indexes.numeric.Int64Index 363μs pandas.core.indexes.numeric.Float64Index 328μs ========================================== ======= [ 75.51%] ··· Running ...g.NumericSeriesIndexing.time_iloc_list_like ok [ 75.51%] ···· ========================================== ======= param1 ------------------------------------------ ------- pandas.core.indexes.numeric.Int64Index 231μs pandas.core.indexes.numeric.Float64Index 236μs ========================================== ======= [ 77.55%] ··· Running ...xing.NumericSeriesIndexing.time_iloc_scalar ok [ 77.55%] ···· ========================================== ======= param1 ------------------------------------------ ------- pandas.core.indexes.numeric.Int64Index 132μs pandas.core.indexes.numeric.Float64Index 134μs ========================================== ======= [ 79.59%] ··· Running indexing.NumericSeriesIndexing.time_iloc_slice ok [ 79.59%] ···· ========================================== ======= param1 ------------------------------------------ ------- pandas.core.indexes.numeric.Int64Index 220μs pandas.core.indexes.numeric.Float64Index 221μs ========================================== ======= [ 81.63%] ··· Running indexing.NumericSeriesIndexing.time_ix_array ok [ 81.63%] ···· ========================================== ======== param1 ------------------------------------------ -------- pandas.core.indexes.numeric.Int64Index 58.6ms pandas.core.indexes.numeric.Float64Index 266ms ========================================== ======== [ 83.67%] ··· Running ...ing.NumericSeriesIndexing.time_ix_list_like ok [ 83.67%] ···· ========================================== ======== param1 ------------------------------------------ -------- pandas.core.indexes.numeric.Int64Index 57.0ms pandas.core.indexes.numeric.Float64Index 264ms ========================================== ======== [ 85.71%] ··· Running indexing.NumericSeriesIndexing.time_ix_scalar ok [ 85.71%] ···· ========================================== ======== param1 ------------------------------------------ -------- pandas.core.indexes.numeric.Int64Index 3.46ms pandas.core.indexes.numeric.Float64Index 3.60ms ========================================== ======== [ 87.76%] ··· Running indexing.NumericSeriesIndexing.time_ix_slice ok [ 87.76%] ···· ========================================== ======== param1 ------------------------------------------ -------- pandas.core.indexes.numeric.Int64Index 3.33ms pandas.core.indexes.numeric.Float64Index 3.74ms ========================================== ======== [ 89.80%] ··· Running indexing.NumericSeriesIndexing.time_loc_array ok [ 89.80%] ···· ========================================== ======== param1 ------------------------------------------ -------- pandas.core.indexes.numeric.Int64Index 58.2ms pandas.core.indexes.numeric.Float64Index 266ms ========================================== ======== [ 91.84%] ··· Running ...ng.NumericSeriesIndexing.time_loc_list_like ok [ 91.84%] ···· ========================================== ======== param1 ------------------------------------------ -------- pandas.core.indexes.numeric.Int64Index 57.4ms pandas.core.indexes.numeric.Float64Index 266ms ========================================== ======== [ 93.88%] ··· Running indexing.NumericSeriesIndexing.time_loc_scalar ok [ 93.88%] ···· ========================================== ======== param1 ------------------------------------------ -------- pandas.core.indexes.numeric.Int64Index 58.6ms pandas.core.indexes.numeric.Float64Index 112ms ========================================== ======== [ 95.92%] ··· Running indexing.NumericSeriesIndexing.time_loc_slice ok [ 95.92%] ···· ========================================== ======== param1 ------------------------------------------ -------- pandas.core.indexes.numeric.Int64Index 2.82ms pandas.core.indexes.numeric.Float64Index 3.63ms ========================================== ======== [ 97.96%] ··· Running indexing.PanelIndexing.time_subset 5.09ms [100.00%] ··· Running indexing.Take.time_take ok [100.00%] ···· ========== ======== index ---------- -------- int 11.1ms datetime 11.0ms ========== ======== ```
https://api.github.com/repos/pandas-dev/pandas/pulls/19031
2018-01-02T04:32:09Z
2018-01-03T11:41:34Z
2018-01-03T11:41:34Z
2018-01-03T17:40:01Z